repo_name
stringlengths
5
100
path
stringlengths
4
375
copies
stringclasses
991 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
nevir/plexability
extern/depot_tools/third_party/logilab/common/proc.py
117
9352
# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved. # contact http://www.logilab.fr/ -- mailto:[email protected] # # This file is part of logilab-common. # # logilab-common is free software: you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the Free # Software Foundation, either version 2.1 of the License, or (at your option) any # later version. # # logilab-common is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # details. # # You should have received a copy of the GNU Lesser General Public License along # with logilab-common. If not, see <http://www.gnu.org/licenses/>. """module providing: * process information (linux specific: rely on /proc) * a class for resource control (memory / time / cpu time) This module doesn't work on windows platforms (only tested on linux) :organization: Logilab """ __docformat__ = "restructuredtext en" import os import stat from resource import getrlimit, setrlimit, RLIMIT_CPU, RLIMIT_AS from signal import signal, SIGXCPU, SIGKILL, SIGUSR2, SIGUSR1 from threading import Timer, currentThread, Thread, Event from time import time from logilab.common.tree import Node class NoSuchProcess(Exception): pass def proc_exists(pid): """check the a pid is registered in /proc raise NoSuchProcess exception if not """ if not os.path.exists('/proc/%s' % pid): raise NoSuchProcess() PPID = 3 UTIME = 13 STIME = 14 CUTIME = 15 CSTIME = 16 VSIZE = 22 class ProcInfo(Node): """provide access to process information found in /proc""" def __init__(self, pid): self.pid = int(pid) Node.__init__(self, self.pid) proc_exists(self.pid) self.file = '/proc/%s/stat' % self.pid self.ppid = int(self.status()[PPID]) def memory_usage(self): """return the memory usage of the process in Ko""" try : return int(self.status()[VSIZE]) except IOError: return 0 def lineage_memory_usage(self): return self.memory_usage() + sum([child.lineage_memory_usage() for child in self.children]) def time(self, children=0): """return the number of jiffies that this process has been scheduled in user and kernel mode""" status = self.status() time = int(status[UTIME]) + int(status[STIME]) if children: time += int(status[CUTIME]) + int(status[CSTIME]) return time def status(self): """return the list of fields found in /proc/<pid>/stat""" return open(self.file).read().split() def name(self): """return the process name found in /proc/<pid>/stat """ return self.status()[1].strip('()') def age(self): """return the age of the process """ return os.stat(self.file)[stat.ST_MTIME] class ProcInfoLoader: """manage process information""" def __init__(self): self._loaded = {} def list_pids(self): """return a list of existent process ids""" for subdir in os.listdir('/proc'): if subdir.isdigit(): yield int(subdir) def load(self, pid): """get a ProcInfo object for a given pid""" pid = int(pid) try: return self._loaded[pid] except KeyError: procinfo = ProcInfo(pid) procinfo.manager = self self._loaded[pid] = procinfo return procinfo def load_all(self): """load all processes information""" for pid in self.list_pids(): try: procinfo = self.load(pid) if procinfo.parent is None and procinfo.ppid: pprocinfo = self.load(procinfo.ppid) pprocinfo.append(procinfo) except NoSuchProcess: pass try: class ResourceError(BaseException): """Error raise when resource limit is reached""" limit = "Unknown Resource Limit" except NameError: class ResourceError(Exception): """Error raise when resource limit is reached""" limit = "Unknown Resource Limit" class XCPUError(ResourceError): """Error raised when CPU Time limit is reached""" limit = "CPU Time" class LineageMemoryError(ResourceError): """Error raised when the total amount of memory used by a process and it's child is reached""" limit = "Lineage total Memory" class TimeoutError(ResourceError): """Error raised when the process is running for to much time""" limit = "Real Time" # Can't use subclass because the StandardError MemoryError raised RESOURCE_LIMIT_EXCEPTION = (ResourceError, MemoryError) class MemorySentinel(Thread): """A class checking a process don't use too much memory in a separated daemonic thread """ def __init__(self, interval, memory_limit, gpid=os.getpid()): Thread.__init__(self, target=self._run, name="Test.Sentinel") self.memory_limit = memory_limit self._stop = Event() self.interval = interval self.setDaemon(True) self.gpid = gpid def stop(self): """stop ap""" self._stop.set() def _run(self): pil = ProcInfoLoader() while not self._stop.isSet(): if self.memory_limit <= pil.load(self.gpid).lineage_memory_usage(): os.killpg(self.gpid, SIGUSR1) self._stop.wait(self.interval) class ResourceController: def __init__(self, max_cpu_time=None, max_time=None, max_memory=None, max_reprieve=60): if SIGXCPU == -1: raise RuntimeError("Unsupported platform") self.max_time = max_time self.max_memory = max_memory self.max_cpu_time = max_cpu_time self._reprieve = max_reprieve self._timer = None self._msentinel = None self._old_max_memory = None self._old_usr1_hdlr = None self._old_max_cpu_time = None self._old_usr2_hdlr = None self._old_sigxcpu_hdlr = None self._limit_set = 0 self._abort_try = 0 self._start_time = None self._elapse_time = 0 def _hangle_sig_timeout(self, sig, frame): raise TimeoutError() def _hangle_sig_memory(self, sig, frame): if self._abort_try < self._reprieve: self._abort_try += 1 raise LineageMemoryError("Memory limit reached") else: os.killpg(os.getpid(), SIGKILL) def _handle_sigxcpu(self, sig, frame): if self._abort_try < self._reprieve: self._abort_try += 1 raise XCPUError("Soft CPU time limit reached") else: os.killpg(os.getpid(), SIGKILL) def _time_out(self): if self._abort_try < self._reprieve: self._abort_try += 1 os.killpg(os.getpid(), SIGUSR2) if self._limit_set > 0: self._timer = Timer(1, self._time_out) self._timer.start() else: os.killpg(os.getpid(), SIGKILL) def setup_limit(self): """set up the process limit""" assert currentThread().getName() == 'MainThread' os.setpgrp() if self._limit_set <= 0: if self.max_time is not None: self._old_usr2_hdlr = signal(SIGUSR2, self._hangle_sig_timeout) self._timer = Timer(max(1, int(self.max_time) - self._elapse_time), self._time_out) self._start_time = int(time()) self._timer.start() if self.max_cpu_time is not None: self._old_max_cpu_time = getrlimit(RLIMIT_CPU) cpu_limit = (int(self.max_cpu_time), self._old_max_cpu_time[1]) self._old_sigxcpu_hdlr = signal(SIGXCPU, self._handle_sigxcpu) setrlimit(RLIMIT_CPU, cpu_limit) if self.max_memory is not None: self._msentinel = MemorySentinel(1, int(self.max_memory) ) self._old_max_memory = getrlimit(RLIMIT_AS) self._old_usr1_hdlr = signal(SIGUSR1, self._hangle_sig_memory) as_limit = (int(self.max_memory), self._old_max_memory[1]) setrlimit(RLIMIT_AS, as_limit) self._msentinel.start() self._limit_set += 1 def clean_limit(self): """reinstall the old process limit""" if self._limit_set > 0: if self.max_time is not None: self._timer.cancel() self._elapse_time += int(time())-self._start_time self._timer = None signal(SIGUSR2, self._old_usr2_hdlr) if self.max_cpu_time is not None: setrlimit(RLIMIT_CPU, self._old_max_cpu_time) signal(SIGXCPU, self._old_sigxcpu_hdlr) if self.max_memory is not None: self._msentinel.stop() self._msentinel = None setrlimit(RLIMIT_AS, self._old_max_memory) signal(SIGUSR1, self._old_usr1_hdlr) self._limit_set -= 1
gpl-2.0
stefansommer/jetflows
code/run_two_jets.py
1
1970
#!/usr/bin/python # # This file is part of jetflows. # # Copyright (C) 2014, Henry O. Jacobs ([email protected]), Stefan Sommer ([email protected]) # https://github.com/nefan/jetflows.git # # jetflows is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # jetflows is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with jetflows. If not, see <http://www.gnu.org/licenses/>. # import two_jets as tj import numpy as np DIM = tj.DIM = 2 N = tj.N = 4 SIGMA = tj.SIGMA = 1.0 def d2zip(grid): return np.dstack(grid).reshape([-1,2]) q = SIGMA*2*np.random.randn(N,DIM) #q = SIGMA*2*np.mgrid[-1.5:1.5:np.complex(0,np.sqrt(N)),-1.5:1.5:np.complex(0,np.sqrt(N))] # particles in regular grid #q = d2zip(q) q_1 = np.outer(np.ones(N),np.eye(DIM)).reshape([N,DIM,DIM]) q_2 = np.zeros([N,DIM,DIM,DIM]) p = SIGMA*np.random.randn(N,DIM) mu_1 = SIGMA*np.random.randn(N,DIM,DIM) mu_2 = np.zeros([N,DIM,DIM,DIM]) for i in range(0,N): for d in range(0,DIM): store = (SIGMA**2)*np.random.randn(DIM,DIM) mu_2[i,d] = 0.5*(store + store.T) #q = np.array([[-1.0 , 0.0],[1.0,0.0]]) #p = np.zeros([N,DIM]) #mu_1 = np.zeros([N,DIM,DIM]) #mu_2 = np.zeros([N,DIM,DIM,DIM]) #tj.test_functions(0) (t_span, y_span) = tj.integrate(tj.weinstein_darboux_to_state(q, q_1, q_2, p, mu_1, mu_2 ), T=1.) print 'initial energy was \n' + str(tj.energy(y_span[0])) print 'final energy is \n' + str(tj.energy(y_span[-1])) # save result np.save('output/state_data',y_span) np.save('output/time_data',t_span) np.save('output/setup',[N,DIM,SIGMA])
agpl-3.0
mkeilman/sirepo
tests/template/srw_import_data/srx.py
2
35248
# -*- coding: utf-8 -*- ############################################################################# # SRWLIB Example: Virtual Beamline: a set of utilities and functions allowing to simulate # operation of an SR Beamline. # The standard use of this script is from command line, with some optional arguments, # e.g. for calculation (with default parameter values) of: # UR Spectrum Through a Slit (Flux within a default aperture): # python SRWLIB_VirtBL_*.py --sm # Single-Electron UR Spectrum (Flux per Unit Surface): # python SRWLIB_VirtBL_*.py --ss # UR Power Density (at the first optical element): # python SRWLIB_VirtBL_*.py --pw # Input Single-Electron UR Intensity Distribution (at the first optical element): # python SRWLIB_VirtBL_*.py --si # Single-Electron Wavefront Propagation: # python SRWLIB_VirtBL_*.py --ws # Multi-Electron Wavefront Propagation: # Sequential Mode: # python SRWLIB_VirtBL_*.py --wm # Parallel Mode (using MPI / mpi4py), e.g.: # mpiexec -n 6 python SRWLIB_VirtBL_*.py --wm # For changing parameters of all these calculaitons from the default valuse, see the definition # of all options in the list at the end of the script. # v 0.04 ############################################################################# from __future__ import print_function #Python 2.7 compatibility from srwl_bl import * try: import cPickle as pickle except: import pickle #import time #*********************************Setting Up Optical Elements and Propagation Parameters def set_optics(_v): """This function describes optical layout of the Coherent Hoard X-ray (CHX) beamline of NSLS-II. Such function has to be written for every beamline to be simulated; it is specific to a particular beamline. :param _v: structure containing all parameters allowed to be varied for that particular beamline """ #---Nominal Positions of Optical Elements [m] (with respect to straight section center) zS0 = 33.1798 #White Beam Slits (S0) zHFM = 34.2608 #Horizontally-Focusing Mirror M1 (HFM) zS1 = 35.6678 #Pink Beam Slits (S1) zDCM = 36.4488 #Horizontall-Deflecting Double-Crystal Monochromator (DCM) zBPM1 = 38.6904 #BPM-1 zBPM2 = 50.3872 #BPM-2 zSSA = 50.6572 #Secondary Source Aperture (SSA) zEA = 61.9611 #Energy Absorber (EA) zDBPM1 = 62.272 #Diamond BPM-1 zKBFV = 62.663 #High-Flux Vertically-Focusing KB Mirror M2 zKBFH = 63.0 #High-Flux Horizontally-Focusing KB Mirror M3 zSF = 63.3 #High-Flux Sample position (focus of KBF) zDBPM2 = 65.9178 #Diamond BPM-2 zKBRV = 66.113 #High-Resolution Vertically-Focusing KB Mirror M4 zKBRH = 66.220 #High-Resolution Horizontally-Focusing KB Mirror M5 zSR = 63.3 #High-Resolution Sample position (focus of KBR) #zD = 65 #Detector position (?) #---Instantiation of the Optical Elements arElNamesAll_01 = ['S0', 'S1', 'S1_DCM', 'DCM', 'DCM_SSA', 'SSA', 'SSA_KBFV', 'KBFV', 'KBFV_KBFH', 'KBFH', 'KBFH_zSF'] arElNamesAll_02 = ['S0', 'S0_HFM', 'HFM', 'HFM_S1', 'S1', 'S1_DCM', 'DCM', 'DCM_SSA', 'SSA', 'SSA_KBRV', 'KBRV', 'KBRV_KBRH', 'KBRH', 'KBRH_zSR'] arElNamesAll_03 = ['S0', 'S0_HFM', 'HFM', 'HFM_S1', 'S1', 'S1_DCM', 'DCM', 'DCM_SSA', 'SSA', 'SSA_DBPM2', 'DBPM2_KBRV', 'KBRV', 'KBRV_KBRH', 'KBRH', 'KBRH_zSR'] arElNamesAll_04 = ['S0', 'S0_HFM', 'HFM', 'HFM_S1', 'S1', 'S1_SSA', 'SSA', 'SSA_DBPM2', 'DBPM2_KBRV', 'KBRV', 'KBRV_KBRH', 'KBRH', 'KBRH_zSR'] arElNamesAll = arElNamesAll_01 if(_v.op_BL == 2): arElNamesAll = arElNamesAll_02 elif(_v.op_BL == 3): arElNamesAll = arElNamesAll_03 elif(_v.op_BL == 4): arElNamesAll = arElNamesAll_04 ''' #Treat beamline sub-cases / alternative configurations if(len(_v.op_fin) > 0): if(_v.op_fin not in arElNamesAll): raise Exception('Optical element with the name specified in the "op_fin" option is not present in this beamline') #Could be made more general ''' arElNames = []; for i in range(len(arElNamesAll)): arElNames.append(arElNamesAll[i]) if(len(_v.op_fin) > 0): if(arElNamesAll[i] == _v.op_fin): break el = []; pp = [] #lists of SRW optical element objects and their corresponding propagation parameters #S0 (primary slit) if('S0' in arElNames): el.append(SRWLOptA('r', 'a', _v.op_S0_dx, _v.op_S0_dy)); pp.append(_v.op_S0_pp) #Drift S0 -> HFM if('S0_HFM' in arElNames): el.append(SRWLOptD(zHFM - zS0)); pp.append(_v.op_S0_HFM_pp) #HDM (Height Profile Error) if('HFM' in arElNames): lenHFM = 0.95 #Length [m] horApHFM = lenHFM*_v.op_HFM_ang #Projected dimensions verApHFM = 5.e-03 #? el.append(SRWLOptA('r', 'a', horApHFM, verApHFM)); pp.append(_v.op_HFMA_pp) if(_v.op_HFM_f != 0.): el.append(SRWLOptL(_Fx=_v.op_HFM_f)); pp.append(_v.op_HFML_pp) #To treat Reflectivity (maybe by Planar Mirror?) #elif(_v.op_HFM_r != 0.): #Setup Cylindrical Mirror, take into account Reflectivity #Height Profile Error ifnHFM = os.path.join(_v.fdir, _v.op_HFM_ifn) if len(_v.op_HFM_ifn) > 0 else '' if(len(ifnHFM) > 0): #hProfDataHFM = srwl_uti_read_data_cols(ifnHFM, '\t', 0, 1) hProfDataHFM = srwl_uti_read_data_cols(ifnHFM, '\t') opHFM = srwl_opt_setup_surf_height_2d(hProfDataHFM, 'x', _ang=_v.op_HFM_ang, _amp_coef=_v.op_HFM_amp, _nx=1500, _ny=200) ofnHFM = os.path.join(_v.fdir, _v.op_HFM_ofn) if len(_v.op_HFM_ofn) > 0 else '' if(len(ofnHFM) > 0): pathDifHFM = opHFM.get_data(3, 3) srwl_uti_save_intens_ascii(pathDifHFM, opHFM.mesh, ofnHFM, 0, ['', 'Horizontal Position', 'Vertical Position', 'Opt. Path Dif.'], _arUnits=['', 'm', 'm', 'm']) el.append(opHFM); pp.append(_v.op_HFMT_pp) #Drift HFM -> S1 if('HFM_S1' in arElNames): el.append(SRWLOptD(zS1 - zHFM + _v.op_S1_dz)); pp.append(_v.op_HFM_S1_pp) #S1 slit if('S1' in arElNames): el.append(SRWLOptA('r', 'a', _v.op_S1_dx, _v.op_S1_dy)); pp.append(_v.op_S1_pp) #Drift S1 -> DCM if('S1_DCM' in arElNames): el.append(SRWLOptD(zDCM - zS1 - _v.op_S1_dz)); pp.append(_v.op_S1_DCM_pp) #Drift S1 -> SSA if('S1_SSA' in arElNames): el.append(SRWLOptD(zSSA - zS1 - _v.op_S1_dz + _v.op_SSA_dz)); pp.append(_v.op_S1_SSA_pp) #Double-Crystal Monochromator if('DCM' in arElNames): tc = 1e-02 # [m] crystal thickness angAs = 0.*pi/180. # [rad] asymmetry angle hc = [1,1,1] if(_v.op_DCM_r == '311'): hc = [3,1,1] dc = srwl_uti_cryst_pl_sp(hc, 'Si') #print('DCM Interplannar dist.:', dc) psi = srwl_uti_cryst_pol_f(_v.op_DCM_e0, hc, 'Si') #MR15032016: replaced "op_DCM_e" by "op_DCM_e0" to test the import in Sirepo #print('DCM Fourier Components:', psi) #---------------------- DCM Crystal #1 opCr1 = SRWLOptCryst(_d_sp=dc, _psi0r=psi[0], _psi0i=psi[1], _psi_hr=psi[2], _psi_hi=psi[3], _psi_hbr=psi[2], _psi_hbi=psi[3], _tc=tc, _ang_as=angAs, _ang_roll=1.5707963, _e_avg=_v.op_DCM_e0) #Find appropriate orientation of the Crystal #1 and the Output Beam Frame (using a member-function in SRWLOptCryst): orientDataCr1 = opCr1.find_orient(_en=_v.op_DCM_e0, _ang_dif_pl=1.5707963) # Horizontally-deflecting #MR15032016: replaced "op_DCM_e" by "op_DCM_e0" to test the import in Sirepo #Crystal #1 Orientation found: orientCr1 = orientDataCr1[0] tCr1 = orientCr1[0] #Tangential Vector to Crystal surface sCr1 = orientCr1[1] nCr1 = orientCr1[2] #Normal Vector to Crystal surface # print('DCM Crystal #1 Orientation (original):') # print(' t =', tCr1, 's =', orientCr1[1], 'n =', nCr1) import uti_math if(_v.op_DCM_ac1 != 0): #Small rotation of DCM Crystal #1: rot = uti_math.trf_rotation([0,1,0], _v.op_DCM_ac1, [0,0,0]) tCr1 = uti_math.matr_prod(rot[0], tCr1) sCr1 = uti_math.matr_prod(rot[0], sCr1) nCr1 = uti_math.matr_prod(rot[0], nCr1) #Set the Crystal #1 orientation: opCr1.set_orient(nCr1[0], nCr1[1], nCr1[2], tCr1[0], tCr1[1]) #Orientation of the Outgoing Beam Frame being found: orientCr1OutFr = orientDataCr1[1] rxCr1 = orientCr1OutFr[0] #Horizontal Base Vector of the Output Beam Frame ryCr1 = orientCr1OutFr[1] #Vertical Base Vector of the Output Beam Frame rzCr1 = orientCr1OutFr[2] #Longitudinal Base Vector of the Output Beam Frame # print('DCM Crystal #1 Outgoing Beam Frame:') # print(' ex =', rxCr1, 'ey =', ryCr1, 'ez =', rzCr1) #Incoming/Outgoing beam frame transformation matrix for the DCM Crystal #1 TCr1 = [rxCr1, ryCr1, rzCr1] # print('Total transformation matrix after DCM Crystal #1:') # uti_math.matr_print(TCr1) #print(' ') el.append(opCr1); pp.append(_v.op_DCMC1_pp) #---------------------- DCM Crystal #2 opCr2 = SRWLOptCryst(_d_sp=dc, _psi0r=psi[0], _psi0i=psi[1], _psi_hr=psi[2], _psi_hi=psi[3], _psi_hbr=psi[2], _psi_hbi=psi[3], _tc=tc, _ang_as=angAs, _ang_roll=-1.5707963, _e_avg=_v.op_DCM_e0) #Find appropriate orientation of the Crystal #2 and the Output Beam Frame orientDataCr2 = opCr2.find_orient(_en=_v.op_DCM_e0, _ang_dif_pl=-1.5707963) #MR15032016: replaced "op_DCM_e" by "op_DCM_e0" to test the import in Sirepo #Crystal #2 Orientation found: orientCr2 = orientDataCr2[0] tCr2 = orientCr2[0] #Tangential Vector to Crystal surface sCr2 = orientCr2[1] nCr2 = orientCr2[2] #Normal Vector to Crystal surface # print('Crystal #2 Orientation (original):') # print(' t =', tCr2, 's =', sCr2, 'n =', nCr2) if(_v.op_DCM_ac2 != 0): #Small rotation of DCM Crystal #2: rot = uti_math.trf_rotation([0,1,0], _v.op_DCM_ac2, [0,0,0]) tCr2 = uti_math.matr_prod(rot[0], tCr2) sCr2 = uti_math.matr_prod(rot[0], sCr2) nCr2 = uti_math.matr_prod(rot[0], nCr2) #Set the Crystal #2 orientation opCr2.set_orient(nCr2[0], nCr2[1], nCr2[2], tCr2[0], tCr2[1]) #Orientation of the Outgoing Beam Frame being found: orientCr2OutFr = orientDataCr2[1] rxCr2 = orientCr2OutFr[0] #Horizontal Base Vector of the Output Beam Frame ryCr2 = orientCr2OutFr[1] #Vertical Base Vector of the Output Beam Frame rzCr2 = orientCr2OutFr[2] #Longitudinal Base Vector of the Output Beam Frame # print('DCM Crystal #2 Outgoing Beam Frame:') # print(' ex =', rxCr2, 'ey =', ryCr2, 'ez =',rzCr2) #Incoming/Outgoing beam transformation matrix for the DCM Crystal #2 TCr2 = [rxCr2, ryCr2, rzCr2] Ttot = uti_math.matr_prod(TCr2, TCr1) # print('Total transformation matrix after DCM Crystal #2:') # uti_math.matr_print(Ttot) #print(' ') el.append(opCr2); pp.append(_v.op_DCMC2_pp) #Drift DCM -> SSA if('DCM_SSA' in arElNames): el.append(SRWLOptD(zSSA - zDCM + _v.op_SSA_dz)); pp.append(_v.op_DCM_SSA_pp) #SSA slit if('SSA' in arElNames): el.append(SRWLOptA('r', 'a', _v.op_SSA_dx, _v.op_SSA_dy)); pp.append(_v.op_SSA_pp) #Drift SSA -> DBPM2 if('SSA_DBPM2' in arElNames): el.append(SRWLOptD(zDBPM2 - zSSA - _v.op_SSA_dz + _v.op_DBPM2_dz)); pp.append(_v.op_SSA_DBPM2_pp) ###############To continue ## #Sample ## if('SMP' in arElNames): ## ifnSMP = os.path.join(v.fdir, v.op_SMP_ifn) if len(v.op_SMP_ifn) > 0 else '' ## if(len(ifnSMP) > 0): ## ifSMP = open(ifnSMP, 'rb') ## opSMP = pickle.load(ifSMP) ## ofnSMP = os.path.join(v.fdir, v.op_SMP_ofn) if len(v.op_SMP_ofn) > 0 else '' ## if(len(ofnSMP) > 0): ## pathDifSMP = opSMP.get_data(3, 3) ## srwl_uti_save_intens_ascii(pathDifSMP, opSMP.mesh, ofnSMP, 0, ['', 'Horizontal Position', 'Vertical Position', 'Opt. Path Dif.'], _arUnits=['', 'm', 'm', 'm']) ## el.append(opSMP); pp.append(v.op_SMP_pp) ## ifSMP.close() ## #Drift Sample -> Detector ## if('SMP_D' in arElNames): ## el.append(SRWLOptD(zD - zSample + v.op_D_dz)); pp.append(v.op_SMP_D_pp) pp.append(_v.op_fin_pp) return SRWLOptC(el, pp) #*********************************List of Parameters allowed to be varied #---List of supported options / commands / parameters allowed to be varied for this Beamline (comment-out unnecessary): varParam = [ #---Data Folder ['fdir', 's', os.path.join(os.getcwd(), 'data_SRX'), 'folder (directory) name for reading-in input and saving output data files'], #---Electron Beam ['ebm_nm', 's', 'NSLS-II Low Beta ', 'standard electron beam name'], ['ebm_nms', 's', 'Day1', 'standard electron beam name suffix: e.g. can be Day1, Final'], ['ebm_i', 'f', 0.5, 'electron beam current [A]'], #['ebeam_e', 'f', 3., 'electron beam avarage energy [GeV]'], ['ebm_de', 'f', 0., 'electron beam average energy deviation [GeV]'], ['ebm_x', 'f', 0., 'electron beam initial average horizontal position [m]'], ['ebm_y', 'f', 0., 'electron beam initial average vertical position [m]'], ['ebm_xp', 'f', 0., 'electron beam initial average horizontal angle [rad]'], ['ebm_yp', 'f', 0., 'electron beam initial average vertical angle [rad]'], ['ebm_z', 'f', 0., 'electron beam initial average longitudinal position [m]'], ['ebm_dr', 'f', 0., 'electron beam longitudinal drift [m] to be performed before a required calculation'], ['ebm_ens', 'f', -1, 'electron beam relative energy spread'], ['ebm_emx', 'f', -1, 'electron beam horizontal emittance [m]'], ['ebm_emy', 'f', -1, 'electron beam vertical emittance [m]'], #---Undulator ['und_per', 'f', 0.021, 'undulator period [m]'], ['und_len', 'f', 1.5, 'undulator length [m]'], ['und_b', 'f', 0.88770981, 'undulator vertical peak magnetic field [T]'], #['und_bx', 'f', 0., 'undulator horizontal peak magnetic field [T]'], #['und_by', 'f', 1., 'undulator vertical peak magnetic field [T]'], #['und_phx', 'f', 1.5708, 'undulator horizontal magnetic field phase [rad]'], #['und_phy', 'f', 0., 'undulator vertical magnetic field phase [rad]'], ['und_sx', 'i', 1.0, 'undulator horizontal magnetic field symmetry vs longitudinal position'], #['und_sy', 'i', -1, 'undulator vertical magnetic field symmetry vs longitudinal position'], ['und_zc', 'f', 1.305, 'undulator center longitudinal position [m]'], ['und_mdir', 's', 'magn_meas', 'name of magnetic measurements sub-folder'], ['und_mfs', 's', 'ivu21_srx_sum.txt', 'name of magnetic measurements for different gaps summary file'], #['und_g', 'f', 0., 'undulator gap [mm] (assumes availability of magnetic measurement or simulation data)'], #---Calculation Types #Electron Trajectory ['tr', '', '', 'calculate electron trajectory', 'store_true'], ['tr_cti', 'f', 0., 'initial time moment (c*t) for electron trajectory calculation [m]'], ['tr_ctf', 'f', 0., 'final time moment (c*t) for electron trajectory calculation [m]'], ['tr_np', 'f', 50000, 'number of points for trajectory calculation'], ['tr_mag', 'i', 1, 'magnetic field to be used for trajectory calculation: 1- approximate, 2- accurate'], ['tr_fn', 's', 'res_trj.dat', 'file name for saving calculated trajectory data'], ['tr_pl', 's', 'xxpyypz', 'plot the resulting trajectiry in graph(s): ""- dont plot, otherwise the string should list the trajectory components to plot'], #Single-Electron Spectrum vs Photon Energy ['ss', '', '', 'calculate single-e spectrum vs photon energy', 'store_true'], ['ss_ei', 'f', 100., 'initial photon energy [eV] for single-e spectrum vs photon energy calculation'], ['ss_ef', 'f', 20000., 'final photon energy [eV] for single-e spectrum vs photon energy calculation'], ['ss_ne', 'i', 10000, 'number of points vs photon energy for single-e spectrum vs photon energy calculation'], ['ss_x', 'f', 0., 'horizontal position [m] for single-e spectrum vs photon energy calculation'], ['ss_y', 'f', 0., 'vertical position [m] for single-e spectrum vs photon energy calculation'], ['ss_meth', 'i', 1, 'method to use for single-e spectrum vs photon energy calculation: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler"'], ['ss_prec', 'f', 0.01, 'relative precision for single-e spectrum vs photon energy calculation (nominal value is 0.01)'], ['ss_pol', 'i', 6, 'polarization component to extract after spectrum vs photon energy calculation: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'], ['ss_mag', 'i', 1, 'magnetic field to be used for single-e spectrum vs photon energy calculation: 1- approximate, 2- accurate'], ['ss_fn', 's', 'res_spec_se.dat', 'file name for saving calculated single-e spectrum vs photon energy'], ['ss_pl', 's', 'e', 'plot the resulting single-e spectrum in a graph: ""- dont plot, "e"- show plot vs photon energy'], #Multi-Electron Spectrum vs Photon Energy (taking into account e-beam emittance, energy spread and collection aperture size) ['sm', '', '', 'calculate multi-e spectrum vs photon energy', 'store_true'], ['sm_ei', 'f', 100., 'initial photon energy [eV] for multi-e spectrum vs photon energy calculation'], ['sm_ef', 'f', 20000., 'final photon energy [eV] for multi-e spectrum vs photon energy calculation'], ['sm_ne', 'i', 10000, 'number of points vs photon energy for multi-e spectrum vs photon energy calculation'], ['sm_x', 'f', 0., 'horizontal center position [m] for multi-e spectrum vs photon energy calculation'], ['sm_rx', 'f', 0.001, 'range of horizontal position / horizontal aperture size [m] for multi-e spectrum vs photon energy calculation'], ['sm_nx', 'i', 1, 'number of points vs horizontal position for multi-e spectrum vs photon energy calculation'], ['sm_y', 'f', 0., 'vertical center position [m] for multi-e spectrum vs photon energy calculation'], ['sm_ry', 'f', 0.001, 'range of vertical position / vertical aperture size [m] for multi-e spectrum vs photon energy calculation'], ['sm_ny', 'i', 1, 'number of points vs vertical position for multi-e spectrum vs photon energy calculation'], ['sm_mag', 'i', 1, 'magnetic field to be used for calculation of multi-e spectrum spectrum or intensity distribution: 1- approximate, 2- accurate'], ['sm_hi', 'i', 1, 'initial UR spectral harmonic to be taken into accountfor multi-e spectrum vs photon energy calculation'], ['sm_hf', 'i', 15, 'final UR spectral harmonic to be taken into accountfor multi-e spectrum vs photon energy calculation'], ['sm_prl', 'f', 1., 'longitudinal integration precision parameter for multi-e spectrum vs photon energy calculation'], ['sm_pra', 'f', 1., 'azimuthal integration precision parameter for multi-e spectrum vs photon energy calculation'], ['sm_type', 'i', 1, 'calculate flux (=1) or flux per unit surface (=2)'], ['sm_pol', 'i', 6, 'polarization component to extract after calculation of multi-e flux or intensity: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'], ['sm_fn', 's', 'res_spec_me.dat', 'file name for saving calculated milti-e spectrum vs photon energy'], ['sm_pl', 's', 'e', 'plot the resulting spectrum-e spectrum in a graph: ""- dont plot, "e"- show plot vs photon energy'], #to add options for the multi-e calculation from "accurate" magnetic field #Power Density Distribution vs horizontal and vertical position ['pw', '', '', 'calculate SR power density distribution', 'store_true'], ['pw_x', 'f', 0., 'central horizontal position [m] for calculation of power density distribution vs horizontal and vertical position'], ['pw_rx', 'f', 0.015, 'range of horizontal position [m] for calculation of power density distribution vs horizontal and vertical position'], ['pw_nx', 'i', 100, 'number of points vs horizontal position for calculation of power density distribution'], ['pw_y', 'f', 0., 'central vertical position [m] for calculation of power density distribution vs horizontal and vertical position'], ['pw_ry', 'f', 0.015, 'range of vertical position [m] for calculation of power density distribution vs horizontal and vertical position'], ['pw_ny', 'i', 100, 'number of points vs vertical position for calculation of power density distribution'], ['pw_pr', 'f', 1., 'precision factor for calculation of power density distribution'], ['pw_meth', 'i', 1, 'power density computation method (1- "near field", 2- "far field")'], ['pw_zi', 'f', 0., 'initial longitudinal position along electron trajectory of power density distribution (effective if pow_sst < pow_sfi)'], ['pw_zf', 'f', 0., 'final longitudinal position along electron trajectory of power density distribution (effective if pow_sst < pow_sfi)'], ['pw_mag', 'i', 1, 'magnetic field to be used for power density calculation: 1- approximate, 2- accurate'], ['pw_fn', 's', 'res_pow.dat', 'file name for saving calculated power density distribution'], ['pw_pl', 's', 'xy', 'plot the resulting power density distribution in a graph: ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'], #Single-Electron Intensity distribution vs horizontal and vertical position ['si', '', '', 'calculate single-e intensity distribution (without wavefront propagation through a beamline) vs horizontal and vertical position', 'store_true'], #Single-Electron Wavefront Propagation ['ws', '', '', 'calculate single-electron (/ fully coherent) wavefront propagation', 'store_true'], #Multi-Electron (partially-coherent) Wavefront Propagation ['wm', '', '', 'calculate multi-electron (/ partially coherent) wavefront propagation', 'store_true'], ['w_e', 'f', 9000., 'photon energy [eV] for calculation of intensity distribution vs horizontal and vertical position'], ['w_ef', 'f', -1., 'final photon energy [eV] for calculation of intensity distribution vs horizontal and vertical position'], ['w_ne', 'i', 1, 'number of points vs photon energy for calculation of intensity distribution'], ['w_x', 'f', 0., 'central horizontal position [m] for calculation of intensity distribution'], ['w_rx', 'f', 2.4e-03, 'range of horizontal position [m] for calculation of intensity distribution'], ['w_nx', 'i', 100, 'number of points vs horizontal position for calculation of intensity distribution'], ['w_y', 'f', 0., 'central vertical position [m] for calculation of intensity distribution vs horizontal and vertical position'], ['w_ry', 'f', 2.0e-03, 'range of vertical position [m] for calculation of intensity distribution vs horizontal and vertical position'], ['w_ny', 'i', 100, 'number of points vs vertical position for calculation of intensity distribution'], ['w_smpf', 'f', 1., 'sampling factor for calculation of intensity distribution vs horizontal and vertical position'], ['w_meth', 'i', 1, 'method to use for calculation of intensity distribution vs horizontal and vertical position'], ['w_prec', 'f', 0.01, 'relative precision for calculation of intensity distribution vs horizontal and vertical position'], ['w_mag', 'i', 1, 'magnetic field to be used for calculation of intensity distribution vs horizontal and vertical position: 1- approximate, 2- accurate'], ['si_pol', 'i', 6, 'polarization component to extract after calculation of intensity distribution: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'], ['si_type', 'i', 0, 'type of a characteristic to be extracted after calculation of intensity distribution: 0- Single-Electron Intensity, 1- Multi-Electron Intensity, 2- Single-Electron Flux, 3- Multi-Electron Flux, 4- Single-Electron Radiation Phase, 5- Re(E): Real part of Single-Electron Electric Field, 6- Im(E): Imaginary part of Single-Electron Electric Field, 7- Single-Electron Intensity, integrated over Time or Photon Energy'], ['si_fn', 's', 'res_int_se.dat', 'file name for saving calculated single-e intensity distribution (without wavefront propagation through a beamline) vs horizontal and vertical position'], ['ws_fni', 's', 'res_int_pr_se.dat', 'file name for saving propagated single-e intensity distribution vs horizontal and vertical position'], ['ws_pl', 's', 'xy', 'plot the propagated radiaiton intensity distributions in graph(s): ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'], ['ws_ap', 'i', 0, 'switch specifying representation of the resulting Stokes parameters (/ Intensity distribution): coordinate (0) or angular (1)'], ['si_pl', 's', 'xy', 'plot the input intensity distributions in graph(s): ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'], ['wm_nm', 'i', 100000, 'number of macro-electrons (coherent wavefronts) for calculation of multi-electron wavefront propagation'], ['wm_na', 'i', 5, 'number of macro-electrons (coherent wavefronts) to average on each node at parallel (MPI-based) calculation of multi-electron wavefront propagation'], ['wm_ns', 'i', 5, 'saving periodicity (in terms of macro-electrons / coherent wavefronts) for intermediate intensity at multi-electron wavefront propagation calculation'], ['wm_ch', 'i', 0, 'type of a characteristic to be extracted after calculation of multi-electron wavefront propagation: #0- intensity (s0); 1- four Stokes components; 2- mutual intensity cut vs x; 3- mutual intensity cut vs y'], ['wm_ap', 'i', 0, 'switch specifying representation of the resulting Stokes parameters: coordinate (0) or angular (1)'], ['wm_x0', 'f', 0, 'horizontal center position for mutual intensity cut calculation'], ['wm_y0', 'f', 0, 'vertical center position for mutual intensity cut calculation'], ['wm_ei', 'i', 0, 'integration over photon energy is required (1) or not (0); if the integration is required, the limits are taken from w_e, w_ef'], ['wm_rm', 'i', 1, 'method for generation of pseudo-random numbers for e-beam phase-space integration: 1- standard pseudo-random number generator, 2- Halton sequences, 3- LPtau sequences (to be implemented)'], ['wm_fni', 's', 'res_int_pr_me.dat', 'file name for saving propagated multi-e intensity distribution vs horizontal and vertical position'], #['ws_fn', 's', '', 'file name for saving single-e (/ fully coherent) wavefront data'], #['wm_fn', 's', '', 'file name for saving multi-e (/ partially coherent) wavefront data'], #to add options ['op_r', 'f', 33.1798, 'longitudinal position of the first optical element [m]'], ['op_fin', 's', 'S3_SMP', 'name of the final optical element wavefront has to be propagated through'], #NOTE: the above option/variable names (fdir, ebm*, und*, ss*, sm*, pw*, is*, ws*, wm*) should be the same in all beamline scripts #on the other hand, the beamline optics related options below (op*) are specific to a particular beamline (and can be differ from beamline to beamline). #However, the default values of all the options/variables (above and below) can differ from beamline to beamline. #---Beamline Optics ['op_BL', 'f', 1, 'beamline version/option number'], ['op_S0_dx', 'f', 2.375e-03, 'slit S0: horizontal size [m]'], ['op_S0_dy', 'f', 2.0e-03, 'slit S0: vertical size [m]'], ['op_HFM_f', 'f', 11.0893, 'mirror HFM: focal length [m] (effective if op_HFM_f != 0)'], ['op_HFM_r', 'f', 8.924e+03, 'mirror HFM: radius of curvature [m] (effective if op_HFM_r != 0 and op_HFM_f == 0)'], ['op_HFM_ang', 'f', 2.5e-03, 'mirror HFM: angle of incidence [rad]'], ['op_HFM_mat', 's', '', 'mirror HFM: coating material; possible options: Si, Cr, Rh, Pt'], ['op_HFM_ifn', 's', 'mir_metro/SRX_HFM_height_prof.dat', 'mirror HFM: input file name of height profile data'], #['op_HFM_ifn', 's', '', 'mirror HFM: input file name of height profile data'], ['op_HFM_amp', 'f', 1., 'mirror HFM: amplification coefficient for height profile data'], ['op_HFM_ofn', 's', 'res_SRX_HFM_opt_path_dif.dat', 'mirror HCM: output file name of optical path difference data'], ['op_S1_dz', 'f', 0., 'S1: offset of longitudinal position [m]'], ['op_S1_dx', 'f', 2.375e-03, 'slit S1: horizontal size [m]'], ['op_S1_dy', 'f', 10.0e-03, 'slit S1: vertical size [m]'], ['op_DCM_e0', 'f', 8999., 'DCM: central photon energy DCM is tuned to [eV]'], #MR15032016: replaced "op_DCM_e" by "op_DCM_e0" to test the import in Sirepo ['op_DCM_r', 's', '111', 'DCM: reflection type (can be either "111" or "311")'], ['op_DCM_ac1', 'f', 0., 'DCM: angular deviation of 1st crystal from exact Bragg angle [rad]'], ['op_DCM_ac2', 'f', 0., 'DCM: angular deviation of 2nd crystal from exact Bragg angle [rad]'], ['op_SSA_dz', 'f', 0., 'slit SSA: offset of longitudinal position [m]'], ['op_SSA_dx', 'f', 3.0e-03, 'slit SSA: horizontal size [m]'], ['op_SSA_dy', 'f', 3.0e-03, 'slit SSA: vertical size [m]'], ['op_DBPM2_dz', 'f', 0., 'slit DBPM2: offset of longitudinal position [m]'], ###############To continue ## ['op_SMP_dz', 'f', 0., 'sample: offset of longitudinal position [m]'], ## ['op_SMP_ifn', 's', 'CHX_SMP_CDI_001.pickle', 'sample: model file name (binary "dumped" SRW transmission object)'], ## ['op_SMP_ofn', 's', 'res_CHX_SMP_opt_path_dif.dat', 'sample: output file name of optical path difference data'], ## ['op_D_dz', 'f', 0., 'detector: offset of longitudinal position [m]'], #to add options for different beamline cases, etc. #Propagation Param.: [0][1][2][3][4] [5] [6] [7] [8] [9][10][11] #['op_S0_pp', 'f', [0, 0, 1, 0, 0, 4.5, 5.0, 1.5, 2.5, 0, 0, 0], 'slit S0: propagation parameters'], #['op_S0_pp', 'f', [0, 0, 1, 0, 0, 2.2, 6.0, 3.0, 15.0, 0, 0, 0], 'slit S0: propagation parameters'], #['op_S0_pp', 'f', [0, 0, 1, 0, 0, 2.0, 15.0,1.5, 15.0,0, 0, 0], 'slit S0: propagation parameters'], ['op_S0_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'slit S0: propagation parameters'], ['op_S0_HFM_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift S0 -> HFM: propagation parameters'], ['op_HFMA_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'mirror HCM: Aperture propagation parameters'], ['op_HFML_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'mirror HCM: Lens propagation parameters'], ['op_HFMT_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'mirror HCM: Transmission propagation parameters'], ['op_HFM_S1_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift HDM -> S1: propagation parameters'], ['op_S1_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'slit S1: propagation parameters'], ['op_S1_SSA_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift S1 -> SSA: propagation parameters'], ['op_S1_DCM_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift S1 -> DCM: propagation parameters'], ['op_DCMC1_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'DCM C1: propagation parameters'], ['op_DCMC2_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'DCM C2: propagation parameters'], ['op_DCM_SSA_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift DCM -> SSA: propagation parameters'], ['op_SSA_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'slit SSA: propagation parameters'], ['op_SSA_DBPM2_pp', 'f',[0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift SSA -> DBPM2: propagation parameters'], ###############To continue ## ['op_S3_SMP_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift S3 -> sample: propagation parameters'], ## ['op_SMP_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'sample: propagation parameters'], ## ['op_SMP_D_pp', 'f', [0, 0, 1, 3, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'sample -> detector: propagation parameters'], #['op_fin_pp', 'f', [0, 0, 1, 0, 1, 0.1, 5.0, 1.0, 1.5, 0, 0, 0], 'final post-propagation (resize) parameters'], ['op_fin_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'final post-propagation (resize) parameters'], #[ 0]: Auto-Resize (1) or not (0) Before propagation #[ 1]: Auto-Resize (1) or not (0) After propagation #[ 2]: Relative Precision for propagation with Auto-Resizing (1. is nominal) #[ 3]: Allow (1) or not (0) for semi-analytical treatment of the quadratic (leading) phase terms at the propagation #[ 4]: Do any Resizing on Fourier side, using FFT, (1) or not (0) #[ 5]: Horizontal Range modification factor at Resizing (1. means no modification) #[ 6]: Horizontal Resolution modification factor at Resizing #[ 7]: Vertical Range modification factor at Resizing #[ 8]: Vertical Resolution modification factor at Resizing #[ 9]: Type of wavefront Shift before Resizing (not yet implemented) #[10]: New Horizontal wavefront Center position after Shift (not yet implemented) #[11]: New Vertical wavefront Center position after Shift (not yet implemented) #[12]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Horizontal Coordinate #[13]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Vertical Coordinate #[14]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Longitudinal Coordinate #[15]: Optional: Orientation of the Horizontal Base vector of the Output Frame in the Incident Beam Frame: Horizontal Coordinate #[16]: Optional: Orientation of the Horizontal Base vector of the Output Frame in the Incident Beam Frame: Vertical Coordinate ] varParam = srwl_uti_ext_options(varParam) #Adding other default options #*********************************Entry if __name__ == "__main__": #---Parse options, defining Beamline elements and running calculations v = srwl_uti_parse_options(varParam) #---Add some constant "parameters" (not allowed to be varied) for the beamline v.und_per = 0.021 #['und_per', 'f', 0.021, 'undulator period [m]'], v.und_len = 1.5 #['und_len', 'f', 1.5, 'undulator length [m]'], v.und_zc = 1.305 #['und_zc', 'f', 1.305, 'undulator center longitudinal position [m]'], v.und_sy = -1 #['und_sy', 'i', -1, 'undulator horizontal magnetic field symmetry vs longitudinal position'], v.und_sx = 1 #['und_sx', 'i', 1, 'undulator vertical magnetic field symmetry vs longitudinal position'], #---Setup optics only if Wavefront Propagation is required: v.ws = True op = set_optics(v) if(v.ws or v.wm) else None #---Run all requested calculations SRWLBeamline('SRX beamline').calc_all(v, op)
apache-2.0
Tatsh/youtube-dl
youtube_dl/extractor/ondemandkorea.py
62
2036
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( ExtractorError, js_to_json, ) class OnDemandKoreaIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?ondemandkorea\.com/(?P<id>[^/]+)\.html' _GEO_COUNTRIES = ['US', 'CA'] _TEST = { 'url': 'http://www.ondemandkorea.com/ask-us-anything-e43.html', 'info_dict': { 'id': 'ask-us-anything-e43', 'ext': 'mp4', 'title': 'Ask Us Anything : E43', 'thumbnail': r're:^https?://.*\.jpg$', }, 'params': { 'skip_download': 'm3u8 download' } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id, fatal=False) if not webpage: # Page sometimes returns captcha page with HTTP 403 raise ExtractorError( 'Unable to access page. You may have been blocked.', expected=True) if 'msg_block_01.png' in webpage: self.raise_geo_restricted( msg='This content is not available in your region', countries=self._GEO_COUNTRIES) if 'This video is only available to ODK PLUS members.' in webpage: raise ExtractorError( 'This video is only available to ODK PLUS members.', expected=True) title = self._og_search_title(webpage) jw_config = self._parse_json( self._search_regex( r'(?s)jwplayer\(([\'"])(?:(?!\1).)+\1\)\.setup\s*\((?P<options>.+?)\);', webpage, 'jw config', group='options'), video_id, transform_source=js_to_json) info = self._parse_jwplayer_data( jw_config, video_id, require_title=False, m3u8_id='hls', base_url=url) info.update({ 'title': title, 'thumbnail': self._og_search_thumbnail(webpage), }) return info
unlicense
luiseduardohdbackup/odoo
addons/auth_signup/res_config.py
445
2860
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2012-today OpenERP SA (<http://www.openerp.com>) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/> # ############################################################################## from openerp.osv import osv, fields from openerp.tools.safe_eval import safe_eval class base_config_settings(osv.TransientModel): _inherit = 'base.config.settings' _columns = { 'auth_signup_reset_password': fields.boolean('Enable password reset from Login page', help="This allows users to trigger a password reset from the Login page."), 'auth_signup_uninvited': fields.boolean('Allow external users to sign up', help="If unchecked, only invited users may sign up."), 'auth_signup_template_user_id': fields.many2one('res.users', string='Template user for new users created through signup'), } def get_default_auth_signup_template_user_id(self, cr, uid, fields, context=None): icp = self.pool.get('ir.config_parameter') # we use safe_eval on the result, since the value of the parameter is a nonempty string return { 'auth_signup_reset_password': safe_eval(icp.get_param(cr, uid, 'auth_signup.reset_password', 'False')), 'auth_signup_uninvited': safe_eval(icp.get_param(cr, uid, 'auth_signup.allow_uninvited', 'False')), 'auth_signup_template_user_id': safe_eval(icp.get_param(cr, uid, 'auth_signup.template_user_id', 'False')), } def set_auth_signup_template_user_id(self, cr, uid, ids, context=None): config = self.browse(cr, uid, ids[0], context=context) icp = self.pool.get('ir.config_parameter') # we store the repr of the values, since the value of the parameter is a required string icp.set_param(cr, uid, 'auth_signup.reset_password', repr(config.auth_signup_reset_password)) icp.set_param(cr, uid, 'auth_signup.allow_uninvited', repr(config.auth_signup_uninvited)) icp.set_param(cr, uid, 'auth_signup.template_user_id', repr(config.auth_signup_template_user_id.id))
agpl-3.0
imjonsnooow/synapse
synapse/tests/test_async.py
1
3507
import time import unittest import threading import synapse.async as s_async import synapse.lib.threads as s_threads from synapse.tests.common import * class AsyncTests(SynTest): def test_async_basics(self): boss = s_async.Boss() data = {} def jobmeth(x, y=20): return x + y def jobdork(x, y=20): raise Exception('hi') def jobdone(job): name = job[1].get('name') data[name] = job jid1 = s_async.jobid() jid2 = s_async.jobid() task1 = (jobmeth, (3,), {}) task2 = (jobdork, (3,), {}) job1 = boss.initJob(jid1, task=task1, name='job1', ondone=jobdone) job2 = boss.initJob(jid2, task=task2, name='job2', ondone=jobdone) self.assertEqual( job1[0], jid1 ) self.assertEqual( len(boss.jobs()), 2 ) boss._runJob(job1) self.assertEqual( len(boss.jobs()), 1 ) boss._runJob(job2) self.assertEqual( len(boss.jobs()), 0 ) ret1 = data.get('job1') self.assertIsNotNone(ret1) self.assertEqual( ret1[1]['ret'], 23 ) ret2 = data.get('job2') self.assertIsNotNone(ret2) self.assertEqual( ret2[1]['err'], 'Exception' ) boss.fini() def test_async_pool_basics(self): boss = s_async.Boss() boss.runBossPool(3) data = {} def jobmeth(x, y=20): return x + y def jobdork(x, y=20): raise Exception('hi') def jobdone(job): name = job[1].get('name') data[name] = job jid1 = s_async.jobid() jid2 = s_async.jobid() task1 = (jobmeth, (3,), {}) task2 = (jobdork, (3,), {}) job1 = boss.initJob(jid1, task=task1, name='job1', ondone=jobdone) job2 = boss.initJob(jid2, task=task2, name='job2', ondone=jobdone) self.assertEqual( job1[0], jid1 ) boss.wait(jid1, timeout=1) boss.wait(jid2, timeout=1) ret1 = data.get('job1') self.assertIsNotNone(ret1) self.assertEqual( ret1[1]['ret'], 23 ) ret2 = data.get('job2') self.assertIsNotNone(ret2) self.assertEqual( ret2[1]['err'], 'Exception' ) boss.fini() def test_async_timeout(self): boss = s_async.Boss() def myjob(): time.sleep(0.2) jid = s_async.jobid() job = boss.initJob(jid, task=(myjob,(),{}), timeout=0.01) boss.wait(jid) self.assertEqual( job[1]['err'], 'HitMaxTime' ) boss.fini() def test_async_ondone(self): boss = s_async.Boss() boss.runBossPool(3) data = {} evt = threading.Event() def ondone(job): data['job'] = job evt.set() def woot(): return 10 jid = s_async.jobid() task = s_async.newtask(woot) boss.initJob(jid, task=task, ondone=ondone) self.assertTrue( evt.wait(timeout=1) ) job = data.get('job') self.assertEqual( job[1].get('ret'), 10 ) boss.fini() def test_async_wait_timeout(self): def longtime(): time.sleep(0.1) boss = s_async.Boss() boss.runBossPool(1) jid = s_async.jobid() task = s_async.newtask(longtime) boss.initJob(jid, task=task) self.assertFalse( boss.wait(jid,timeout=0.01) ) self.assertTrue( boss.wait(jid,timeout=1) ) boss.fini()
apache-2.0
losfair/android_kernel_sony_u8500_3_4
scripts/rt-tester/rt-tester.py
11005
5307
#!/usr/bin/python # # rt-mutex tester # # (C) 2006 Thomas Gleixner <[email protected]> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # import os import sys import getopt import shutil import string # Globals quiet = 0 test = 0 comments = 0 sysfsprefix = "/sys/devices/system/rttest/rttest" statusfile = "/status" commandfile = "/command" # Command opcodes cmd_opcodes = { "schedother" : "1", "schedfifo" : "2", "lock" : "3", "locknowait" : "4", "lockint" : "5", "lockintnowait" : "6", "lockcont" : "7", "unlock" : "8", "signal" : "11", "resetevent" : "98", "reset" : "99", } test_opcodes = { "prioeq" : ["P" , "eq" , None], "priolt" : ["P" , "lt" , None], "priogt" : ["P" , "gt" , None], "nprioeq" : ["N" , "eq" , None], "npriolt" : ["N" , "lt" , None], "npriogt" : ["N" , "gt" , None], "unlocked" : ["M" , "eq" , 0], "trylock" : ["M" , "eq" , 1], "blocked" : ["M" , "eq" , 2], "blockedwake" : ["M" , "eq" , 3], "locked" : ["M" , "eq" , 4], "opcodeeq" : ["O" , "eq" , None], "opcodelt" : ["O" , "lt" , None], "opcodegt" : ["O" , "gt" , None], "eventeq" : ["E" , "eq" , None], "eventlt" : ["E" , "lt" , None], "eventgt" : ["E" , "gt" , None], } # Print usage information def usage(): print "rt-tester.py <-c -h -q -t> <testfile>" print " -c display comments after first command" print " -h help" print " -q quiet mode" print " -t test mode (syntax check)" print " testfile: read test specification from testfile" print " otherwise from stdin" return # Print progress when not in quiet mode def progress(str): if not quiet: print str # Analyse a status value def analyse(val, top, arg): intval = int(val) if top[0] == "M": intval = intval / (10 ** int(arg)) intval = intval % 10 argval = top[2] elif top[0] == "O": argval = int(cmd_opcodes.get(arg, arg)) else: argval = int(arg) # progress("%d %s %d" %(intval, top[1], argval)) if top[1] == "eq" and intval == argval: return 1 if top[1] == "lt" and intval < argval: return 1 if top[1] == "gt" and intval > argval: return 1 return 0 # Parse the commandline try: (options, arguments) = getopt.getopt(sys.argv[1:],'chqt') except getopt.GetoptError, ex: usage() sys.exit(1) # Parse commandline options for option, value in options: if option == "-c": comments = 1 elif option == "-q": quiet = 1 elif option == "-t": test = 1 elif option == '-h': usage() sys.exit(0) # Select the input source if arguments: try: fd = open(arguments[0]) except Exception,ex: sys.stderr.write("File not found %s\n" %(arguments[0])) sys.exit(1) else: fd = sys.stdin linenr = 0 # Read the test patterns while 1: linenr = linenr + 1 line = fd.readline() if not len(line): break line = line.strip() parts = line.split(":") if not parts or len(parts) < 1: continue if len(parts[0]) == 0: continue if parts[0].startswith("#"): if comments > 1: progress(line) continue if comments == 1: comments = 2 progress(line) cmd = parts[0].strip().lower() opc = parts[1].strip().lower() tid = parts[2].strip() dat = parts[3].strip() try: # Test or wait for a status value if cmd == "t" or cmd == "w": testop = test_opcodes[opc] fname = "%s%s%s" %(sysfsprefix, tid, statusfile) if test: print fname continue while 1: query = 1 fsta = open(fname, 'r') status = fsta.readline().strip() fsta.close() stat = status.split(",") for s in stat: s = s.strip() if s.startswith(testop[0]): # Separate status value val = s[2:].strip() query = analyse(val, testop, dat) break if query or cmd == "t": break progress(" " + status) if not query: sys.stderr.write("Test failed in line %d\n" %(linenr)) sys.exit(1) # Issue a command to the tester elif cmd == "c": cmdnr = cmd_opcodes[opc] # Build command string and sys filename cmdstr = "%s:%s" %(cmdnr, dat) fname = "%s%s%s" %(sysfsprefix, tid, commandfile) if test: print fname continue fcmd = open(fname, 'w') fcmd.write(cmdstr) fcmd.close() except Exception,ex: sys.stderr.write(str(ex)) sys.stderr.write("\nSyntax error in line %d\n" %(linenr)) if not test: fd.close() sys.exit(1) # Normal exit pass print "Pass" sys.exit(0)
gpl-2.0
rob356/SickRage
lib/unidecode/x06e.py
252
4640
data = ( 'Ben ', # 0x00 'Yuan ', # 0x01 'Wen ', # 0x02 'Re ', # 0x03 'Fei ', # 0x04 'Qing ', # 0x05 'Yuan ', # 0x06 'Ke ', # 0x07 'Ji ', # 0x08 'She ', # 0x09 'Yuan ', # 0x0a 'Shibui ', # 0x0b 'Lu ', # 0x0c 'Zi ', # 0x0d 'Du ', # 0x0e '[?] ', # 0x0f 'Jian ', # 0x10 'Min ', # 0x11 'Pi ', # 0x12 'Tani ', # 0x13 'Yu ', # 0x14 'Yuan ', # 0x15 'Shen ', # 0x16 'Shen ', # 0x17 'Rou ', # 0x18 'Huan ', # 0x19 'Zhu ', # 0x1a 'Jian ', # 0x1b 'Nuan ', # 0x1c 'Yu ', # 0x1d 'Qiu ', # 0x1e 'Ting ', # 0x1f 'Qu ', # 0x20 'Du ', # 0x21 'Feng ', # 0x22 'Zha ', # 0x23 'Bo ', # 0x24 'Wo ', # 0x25 'Wo ', # 0x26 'Di ', # 0x27 'Wei ', # 0x28 'Wen ', # 0x29 'Ru ', # 0x2a 'Xie ', # 0x2b 'Ce ', # 0x2c 'Wei ', # 0x2d 'Ge ', # 0x2e 'Gang ', # 0x2f 'Yan ', # 0x30 'Hong ', # 0x31 'Xuan ', # 0x32 'Mi ', # 0x33 'Ke ', # 0x34 'Mao ', # 0x35 'Ying ', # 0x36 'Yan ', # 0x37 'You ', # 0x38 'Hong ', # 0x39 'Miao ', # 0x3a 'Xing ', # 0x3b 'Mei ', # 0x3c 'Zai ', # 0x3d 'Hun ', # 0x3e 'Nai ', # 0x3f 'Kui ', # 0x40 'Shi ', # 0x41 'E ', # 0x42 'Pai ', # 0x43 'Mei ', # 0x44 'Lian ', # 0x45 'Qi ', # 0x46 'Qi ', # 0x47 'Mei ', # 0x48 'Tian ', # 0x49 'Cou ', # 0x4a 'Wei ', # 0x4b 'Can ', # 0x4c 'Tuan ', # 0x4d 'Mian ', # 0x4e 'Hui ', # 0x4f 'Mo ', # 0x50 'Xu ', # 0x51 'Ji ', # 0x52 'Pen ', # 0x53 'Jian ', # 0x54 'Jian ', # 0x55 'Hu ', # 0x56 'Feng ', # 0x57 'Xiang ', # 0x58 'Yi ', # 0x59 'Yin ', # 0x5a 'Zhan ', # 0x5b 'Shi ', # 0x5c 'Jie ', # 0x5d 'Cheng ', # 0x5e 'Huang ', # 0x5f 'Tan ', # 0x60 'Yu ', # 0x61 'Bi ', # 0x62 'Min ', # 0x63 'Shi ', # 0x64 'Tu ', # 0x65 'Sheng ', # 0x66 'Yong ', # 0x67 'Qu ', # 0x68 'Zhong ', # 0x69 'Suei ', # 0x6a 'Jiu ', # 0x6b 'Jiao ', # 0x6c 'Qiou ', # 0x6d 'Yin ', # 0x6e 'Tang ', # 0x6f 'Long ', # 0x70 'Huo ', # 0x71 'Yuan ', # 0x72 'Nan ', # 0x73 'Ban ', # 0x74 'You ', # 0x75 'Quan ', # 0x76 'Chui ', # 0x77 'Liang ', # 0x78 'Chan ', # 0x79 'Yan ', # 0x7a 'Chun ', # 0x7b 'Nie ', # 0x7c 'Zi ', # 0x7d 'Wan ', # 0x7e 'Shi ', # 0x7f 'Man ', # 0x80 'Ying ', # 0x81 'Ratsu ', # 0x82 'Kui ', # 0x83 '[?] ', # 0x84 'Jian ', # 0x85 'Xu ', # 0x86 'Lu ', # 0x87 'Gui ', # 0x88 'Gai ', # 0x89 '[?] ', # 0x8a '[?] ', # 0x8b 'Po ', # 0x8c 'Jin ', # 0x8d 'Gui ', # 0x8e 'Tang ', # 0x8f 'Yuan ', # 0x90 'Suo ', # 0x91 'Yuan ', # 0x92 'Lian ', # 0x93 'Yao ', # 0x94 'Meng ', # 0x95 'Zhun ', # 0x96 'Sheng ', # 0x97 'Ke ', # 0x98 'Tai ', # 0x99 'Da ', # 0x9a 'Wa ', # 0x9b 'Liu ', # 0x9c 'Gou ', # 0x9d 'Sao ', # 0x9e 'Ming ', # 0x9f 'Zha ', # 0xa0 'Shi ', # 0xa1 'Yi ', # 0xa2 'Lun ', # 0xa3 'Ma ', # 0xa4 'Pu ', # 0xa5 'Wei ', # 0xa6 'Li ', # 0xa7 'Cai ', # 0xa8 'Wu ', # 0xa9 'Xi ', # 0xaa 'Wen ', # 0xab 'Qiang ', # 0xac 'Ze ', # 0xad 'Shi ', # 0xae 'Su ', # 0xaf 'Yi ', # 0xb0 'Zhen ', # 0xb1 'Sou ', # 0xb2 'Yun ', # 0xb3 'Xiu ', # 0xb4 'Yin ', # 0xb5 'Rong ', # 0xb6 'Hun ', # 0xb7 'Su ', # 0xb8 'Su ', # 0xb9 'Ni ', # 0xba 'Ta ', # 0xbb 'Shi ', # 0xbc 'Ru ', # 0xbd 'Wei ', # 0xbe 'Pan ', # 0xbf 'Chu ', # 0xc0 'Chu ', # 0xc1 'Pang ', # 0xc2 'Weng ', # 0xc3 'Cang ', # 0xc4 'Mie ', # 0xc5 'He ', # 0xc6 'Dian ', # 0xc7 'Hao ', # 0xc8 'Huang ', # 0xc9 'Xi ', # 0xca 'Zi ', # 0xcb 'Di ', # 0xcc 'Zhi ', # 0xcd 'Ying ', # 0xce 'Fu ', # 0xcf 'Jie ', # 0xd0 'Hua ', # 0xd1 'Ge ', # 0xd2 'Zi ', # 0xd3 'Tao ', # 0xd4 'Teng ', # 0xd5 'Sui ', # 0xd6 'Bi ', # 0xd7 'Jiao ', # 0xd8 'Hui ', # 0xd9 'Gun ', # 0xda 'Yin ', # 0xdb 'Gao ', # 0xdc 'Long ', # 0xdd 'Zhi ', # 0xde 'Yan ', # 0xdf 'She ', # 0xe0 'Man ', # 0xe1 'Ying ', # 0xe2 'Chun ', # 0xe3 'Lu ', # 0xe4 'Lan ', # 0xe5 'Luan ', # 0xe6 '[?] ', # 0xe7 'Bin ', # 0xe8 'Tan ', # 0xe9 'Yu ', # 0xea 'Sou ', # 0xeb 'Hu ', # 0xec 'Bi ', # 0xed 'Biao ', # 0xee 'Zhi ', # 0xef 'Jiang ', # 0xf0 'Kou ', # 0xf1 'Shen ', # 0xf2 'Shang ', # 0xf3 'Di ', # 0xf4 'Mi ', # 0xf5 'Ao ', # 0xf6 'Lu ', # 0xf7 'Hu ', # 0xf8 'Hu ', # 0xf9 'You ', # 0xfa 'Chan ', # 0xfb 'Fan ', # 0xfc 'Yong ', # 0xfd 'Gun ', # 0xfe 'Man ', # 0xff )
gpl-3.0
akshayaurora/kivy
kivy/uix/colorpicker.py
3
16388
''' Color Picker ============ .. versionadded:: 1.7.0 .. warning:: This widget is experimental. Its use and API can change at any time until this warning is removed. .. image:: images/colorpicker.png :align: right The ColorPicker widget allows a user to select a color from a chromatic wheel where pinch and zoom can be used to change the wheel's saturation. Sliders and TextInputs are also provided for entering the RGBA/HSV/HEX values directly. Usage:: clr_picker = ColorPicker() parent.add_widget(clr_picker) # To monitor changes, we can bind to color property changes def on_color(instance, value): print "RGBA = ", str(value) # or instance.color print "HSV = ", str(instance.hsv) print "HEX = ", str(instance.hex_color) clr_picker.bind(color=on_color) ''' __all__ = ('ColorPicker', 'ColorWheel') from kivy.uix.relativelayout import RelativeLayout from kivy.uix.widget import Widget from kivy.properties import (NumericProperty, BoundedNumericProperty, ListProperty, ObjectProperty, ReferenceListProperty, StringProperty, AliasProperty) from kivy.clock import Clock from kivy.graphics import Mesh, InstructionGroup, Color from kivy.utils import get_color_from_hex, get_hex_from_color from kivy.logger import Logger from math import cos, sin, pi, sqrt, atan from colorsys import rgb_to_hsv, hsv_to_rgb def distance(pt1, pt2): return sqrt((pt1[0] - pt2[0]) ** 2. + (pt1[1] - pt2[1]) ** 2.) def polar_to_rect(origin, r, theta): return origin[0] + r * cos(theta), origin[1] + r * sin(theta) def rect_to_polar(origin, x, y): if x == origin[0]: if y == origin[1]: return (0, 0) elif y > origin[1]: return (y - origin[1], pi / 2.) else: return (origin[1] - y, 3 * pi / 2.) t = atan(float((y - origin[1])) / (x - origin[0])) if x - origin[0] < 0: t += pi if t < 0: t += 2 * pi return (distance((x, y), origin), t) class ColorWheel(Widget): '''Chromatic wheel for the ColorPicker. .. versionchanged:: 1.7.1 `font_size`, `font_name` and `foreground_color` have been removed. The sizing is now the same as others widget, based on 'sp'. Orientation is also automatically determined according to the width/height ratio. ''' r = BoundedNumericProperty(0, min=0, max=1) '''The Red value of the color currently selected. :attr:`r` is a :class:`~kivy.properties.BoundedNumericProperty` and can be a value from 0 to 1. It defaults to 0. ''' g = BoundedNumericProperty(0, min=0, max=1) '''The Green value of the color currently selected. :attr:`g` is a :class:`~kivy.properties.BoundedNumericProperty` and can be a value from 0 to 1. ''' b = BoundedNumericProperty(0, min=0, max=1) '''The Blue value of the color currently selected. :attr:`b` is a :class:`~kivy.properties.BoundedNumericProperty` and can be a value from 0 to 1. ''' a = BoundedNumericProperty(0, min=0, max=1) '''The Alpha value of the color currently selected. :attr:`a` is a :class:`~kivy.properties.BoundedNumericProperty` and can be a value from 0 to 1. ''' color = ReferenceListProperty(r, g, b, a) '''The holds the color currently selected. :attr:`color` is a :class:`~kivy.properties.ReferenceListProperty` and contains a list of `r`, `g`, `b`, `a` values. ''' _origin = ListProperty((100, 100)) _radius = NumericProperty(100) _piece_divisions = NumericProperty(10) _pieces_of_pie = NumericProperty(16) _inertia_slowdown = 1.25 _inertia_cutoff = .25 _num_touches = 0 _pinch_flag = False _hsv = ListProperty([1, 1, 1, 0]) def __init__(self, **kwargs): super(ColorWheel, self).__init__(**kwargs) pdv = self._piece_divisions self.sv_s = [(float(x) / pdv, 1) for x in range(pdv)] + [ (1, float(y) / pdv) for y in reversed(range(pdv))] def on__origin(self, instance, value): self.init_wheel(None) def on__radius(self, instance, value): self.init_wheel(None) def init_wheel(self, dt): # initialize list to hold all meshes self.canvas.clear() self.arcs = [] self.sv_idx = 0 pdv = self._piece_divisions ppie = self._pieces_of_pie for r in range(pdv): for t in range(ppie): self.arcs.append( _ColorArc( self._radius * (float(r) / float(pdv)), self._radius * (float(r + 1) / float(pdv)), 2 * pi * (float(t) / float(ppie)), 2 * pi * (float(t + 1) / float(ppie)), origin=self._origin, color=(float(t) / ppie, self.sv_s[self.sv_idx + r][0], self.sv_s[self.sv_idx + r][1], 1))) self.canvas.add(self.arcs[-1]) def recolor_wheel(self): ppie = self._pieces_of_pie for idx, segment in enumerate(self.arcs): segment.change_color( sv=self.sv_s[int(self.sv_idx + idx / ppie)]) def change_alpha(self, val): for idx, segment in enumerate(self.arcs): segment.change_color(a=val) def inertial_incr_sv_idx(self, dt): # if its already zoomed all the way out, cancel the inertial zoom if self.sv_idx == len(self.sv_s) - self._piece_divisions: return False self.sv_idx += 1 self.recolor_wheel() if dt * self._inertia_slowdown > self._inertia_cutoff: return False else: Clock.schedule_once(self.inertial_incr_sv_idx, dt * self._inertia_slowdown) def inertial_decr_sv_idx(self, dt): # if its already zoomed all the way in, cancel the inertial zoom if self.sv_idx == 0: return False self.sv_idx -= 1 self.recolor_wheel() if dt * self._inertia_slowdown > self._inertia_cutoff: return False else: Clock.schedule_once(self.inertial_decr_sv_idx, dt * self._inertia_slowdown) def on_touch_down(self, touch): r = self._get_touch_r(touch.pos) if r > self._radius: return False # code is still set up to allow pinch to zoom, but this is # disabled for now since it was fiddly with small wheels. # Comment out these lines and adjust on_touch_move to reenable # this. if self._num_touches != 0: return False touch.grab(self) self._num_touches += 1 touch.ud['anchor_r'] = r touch.ud['orig_sv_idx'] = self.sv_idx touch.ud['orig_time'] = Clock.get_time() def on_touch_move(self, touch): if touch.grab_current is not self: return r = self._get_touch_r(touch.pos) goal_sv_idx = (touch.ud['orig_sv_idx'] - int((r - touch.ud['anchor_r']) / (float(self._radius) / self._piece_divisions))) if ( goal_sv_idx != self.sv_idx and goal_sv_idx >= 0 and goal_sv_idx <= len(self.sv_s) - self._piece_divisions ): # this is a pinch to zoom self._pinch_flag = True self.sv_idx = goal_sv_idx self.recolor_wheel() def on_touch_up(self, touch): if touch.grab_current is not self: return touch.ungrab(self) self._num_touches -= 1 if self._pinch_flag: if self._num_touches == 0: # user was pinching, and now both fingers are up. Return # to normal if self.sv_idx > touch.ud['orig_sv_idx']: Clock.schedule_once( self.inertial_incr_sv_idx, (Clock.get_time() - touch.ud['orig_time']) / (self.sv_idx - touch.ud['orig_sv_idx'])) if self.sv_idx < touch.ud['orig_sv_idx']: Clock.schedule_once( self.inertial_decr_sv_idx, (Clock.get_time() - touch.ud['orig_time']) / (self.sv_idx - touch.ud['orig_sv_idx'])) self._pinch_flag = False return else: # user was pinching, and at least one finger remains. We # don't want to treat the remaining fingers as touches return else: r, theta = rect_to_polar(self._origin, *touch.pos) # if touch up is outside the wheel, ignore if r >= self._radius: return # compute which ColorArc is being touched (they aren't # widgets so we don't get collide_point) and set # _hsv based on the selected ColorArc piece = int((theta / (2 * pi)) * self._pieces_of_pie) division = int((r / self._radius) * self._piece_divisions) hsva = list( self.arcs[self._pieces_of_pie * division + piece].color) self.color = list(hsv_to_rgb(*hsva[:3])) + hsva[-1:] def _get_touch_r(self, pos): return distance(pos, self._origin) class _ColorArc(InstructionGroup): def __init__(self, r_min, r_max, theta_min, theta_max, color=(0, 0, 1, 1), origin=(0, 0), **kwargs): super(_ColorArc, self).__init__(**kwargs) self.origin = origin self.r_min = r_min self.r_max = r_max self.theta_min = theta_min self.theta_max = theta_max self.color = color self.color_instr = Color(*color, mode='hsv') self.add(self.color_instr) self.mesh = self.get_mesh() self.add(self.mesh) def __str__(self): return "r_min: %s r_max: %s theta_min: %s theta_max: %s color: %s" % ( self.r_min, self.r_max, self.theta_min, self.theta_max, self.color ) def get_mesh(self): v = [] # first calculate the distance between endpoints of the outer # arc, so we know how many steps to use when calculating # vertices theta_step_outer = 0.1 theta = self.theta_max - self.theta_min d_outer = int(theta / theta_step_outer) theta_step_outer = theta / d_outer if self.r_min == 0: for x in range(0, d_outer, 2): v += (polar_to_rect(self.origin, self.r_max, self.theta_min + x * theta_step_outer ) * 2) v += polar_to_rect(self.origin, 0, 0) * 2 v += (polar_to_rect(self.origin, self.r_max, self.theta_min + (x + 1) * theta_step_outer ) * 2) if not d_outer & 1: # add a last point if d_outer is even v += (polar_to_rect(self.origin, self.r_max, self.theta_min + d_outer * theta_step_outer ) * 2) else: for x in range(d_outer + 1): v += (polar_to_rect(self.origin, self.r_min, self.theta_min + x * theta_step_outer ) * 2) v += (polar_to_rect(self.origin, self.r_max, self.theta_min + x * theta_step_outer ) * 2) return Mesh(vertices=v, indices=range(int(len(v) / 4)), mode='triangle_strip') def change_color(self, color=None, color_delta=None, sv=None, a=None): self.remove(self.color_instr) if color is not None: self.color = color elif color_delta is not None: self.color = [self.color[i] + color_delta[i] for i in range(4)] elif sv is not None: self.color = (self.color[0], sv[0], sv[1], self.color[3]) elif a is not None: self.color = (self.color[0], self.color[1], self.color[2], a) self.color_instr = Color(*self.color, mode='hsv') self.insert(0, self.color_instr) class ColorPicker(RelativeLayout): ''' See module documentation. ''' font_name = StringProperty('data/fonts/RobotoMono-Regular.ttf') '''Specifies the font used on the ColorPicker. :attr:`font_name` is a :class:`~kivy.properties.StringProperty` and defaults to 'data/fonts/RobotoMono-Regular.ttf'. ''' color = ListProperty((1, 1, 1, 1)) '''The :attr:`color` holds the color currently selected in rgba format. :attr:`color` is a :class:`~kivy.properties.ListProperty` and defaults to (1, 1, 1, 1). ''' def _get_hsv(self): return rgb_to_hsv(*self.color[:3]) def _set_hsv(self, value): if self._updating_clr: return self.set_color(value) hsv = AliasProperty(_get_hsv, _set_hsv, bind=('color', )) '''The :attr:`hsv` holds the color currently selected in hsv format. :attr:`hsv` is a :class:`~kivy.properties.ListProperty` and defaults to (1, 1, 1). ''' def _get_hex(self): return get_hex_from_color(self.color) def _set_hex(self, value): if self._updating_clr: return self.set_color(get_color_from_hex(value)[:4]) hex_color = AliasProperty(_get_hex, _set_hex, bind=('color',), cache=True) '''The :attr:`hex_color` holds the currently selected color in hex. :attr:`hex_color` is an :class:`~kivy.properties.AliasProperty` and defaults to `#ffffffff`. ''' wheel = ObjectProperty(None) '''The :attr:`wheel` holds the color wheel. :attr:`wheel` is an :class:`~kivy.properties.ObjectProperty` and defaults to None. ''' _update_clr_ev = _update_hex_ev = None # now used only internally. foreground_color = ListProperty((1, 1, 1, 1)) def _trigger_update_clr(self, mode, clr_idx, text): if self._updating_clr: return self._updating_clr = True self._upd_clr_list = mode, clr_idx, text ev = self._update_clr_ev if ev is None: ev = self._update_clr_ev = Clock.create_trigger(self._update_clr) ev() def _update_clr(self, dt): # to prevent interaction between hsv/rgba, we work internaly using rgba mode, clr_idx, text = self._upd_clr_list try: text = min(255, max(0, float(text))) if mode == 'rgb': self.color[clr_idx] = float(text) / 255. else: hsv = list(self.hsv[:]) hsv[clr_idx] = float(text) / 255. self.color[:3] = hsv_to_rgb(*hsv) except ValueError: Logger.warning('ColorPicker: invalid value : {}'.format(text)) finally: self._updating_clr = False def _update_hex(self, dt): try: if len(self._upd_hex_list) != 9: return self._updating_clr = False self.hex_color = self._upd_hex_list finally: self._updating_clr = False def _trigger_update_hex(self, text): if self._updating_clr: return self._updating_clr = True self._upd_hex_list = text ev = self._update_hex_ev if ev is None: ev = self._update_hex_ev = Clock.create_trigger(self._update_hex) ev() def set_color(self, color): self._updating_clr = True if len(color) == 3: self.color[:3] = color else: self.color = color self._updating_clr = False def __init__(self, **kwargs): self._updating_clr = False super(ColorPicker, self).__init__(**kwargs) if __name__ in ('__android__', '__main__'): from kivy.app import App class ColorPickerApp(App): def build(self): cp = ColorPicker(pos_hint={'center_x': .5, 'center_y': .5}, size_hint=(1, 1)) return cp ColorPickerApp().run()
mit
alhashash/odoomrp-wip
mrp_operations_time_control/models/operation_time.py
16
3343
# -*- encoding: utf-8 -*- ############################################################################## # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see http://www.gnu.org/licenses/. # ############################################################################## from openerp import api, models, fields class MrpProductionWorkcenterLine(models.Model): _inherit = 'mrp.production.workcenter.line' operation_time_lines = fields.One2many('operation.time.line', 'operation_time', string='Operation Time Lines') def _create_operation_line(self): self.env['operation.time.line'].create({ 'start_date': fields.Datetime.now(), 'operation_time': self.id, 'user': self.env.uid}) def _write_end_date_operation_line(self): self.operation_time_lines[-1].end_date = fields.Datetime.now() def action_start_working(self): result = super(MrpProductionWorkcenterLine, self).action_start_working() self._create_operation_line() return result def action_pause(self): result = super(MrpProductionWorkcenterLine, self).action_pause() self._write_end_date_operation_line() return result def action_resume(self): result = super(MrpProductionWorkcenterLine, self).action_resume() self._create_operation_line() return result def action_done(self): result = super(MrpProductionWorkcenterLine, self).action_done() self._write_end_date_operation_line() return result class OperationTimeLine(models.Model): _name = 'operation.time.line' _rec_name = 'operation_time' def _default_user(self): return self.env.uid start_date = fields.Datetime(string='Start Date') end_date = fields.Datetime(string='End Date') operation_time = fields.Many2one('mrp.production.workcenter.line') uptime = fields.Float(string='Uptime', compute='operation_uptime', store=True, digits=(12, 6)) production = fields.Many2one('mrp.production', related='operation_time.production_id', string='Production', store=True) user = fields.Many2one('res.users', string='User', default=_default_user) @api.one @api.depends('start_date', 'end_date') def operation_uptime(self): if self.end_date and self.start_date: timedelta = fields.Datetime.from_string(self.end_date) - \ fields.Datetime.from_string(self.start_date) self.uptime = timedelta.total_seconds() / 3600. else: self.uptime = 0
agpl-3.0
milfeld/lammps
python/examples/mc.py
15
2535
#!/usr/bin/env python -i # preceeding line should have path for Python on your machine # mc.py # Purpose: mimic operation of example/MC/in.mc via Python # Syntax: mc.py in.mc # in.mc = LAMMPS input script import sys,random,math # set these parameters # make sure neigh skin (in in.mc) > 2*deltamove nloop = 3000 deltaperturb = 0.2 deltamove = 0.1 kT = 0.05 random.seed(27848) # parse command line argv = sys.argv if len(argv) != 2: print "Syntax: mc.py in.mc" sys.exit() infile = sys.argv[1] from lammps import lammps lmp = lammps() # run infile one line at a time # just sets up MC problem lines = open(infile,'r').readlines() for line in lines: lmp.command(line) lmp.command("variable e equal pe") # run 0 to get energy of perfect lattice # emin = minimum energy lmp.command("run 0") natoms = lmp.extract_global("natoms",0) emin = lmp.extract_compute("thermo_pe",0,0) / natoms lmp.command("variable emin equal $e") # disorder the system # estart = initial energy x = lmp.extract_atom("x",3) for i in xrange(natoms): x[i][0] += deltaperturb * (2*random.random()-1) x[i][1] += deltaperturb * (2*random.random()-1) lmp.command("variable elast equal $e") lmp.command("thermo_style custom step v_emin v_elast pe") lmp.command("run 0") x = lmp.extract_atom("x",3) lmp.command("variable elast equal $e") estart = lmp.extract_compute("thermo_pe",0,0) / natoms # loop over Monte Carlo moves # extract x after every run, in case reneighboring changed ptr in LAMMPS elast = estart naccept = 0 for i in xrange(nloop): iatom = random.randrange(0,natoms) x0 = x[iatom][0] y0 = x[iatom][1] x[iatom][0] += deltamove * (2*random.random()-1) x[iatom][1] += deltamove * (2*random.random()-1) lmp.command("run 1 pre no post no") x = lmp.extract_atom("x",3) e = lmp.extract_compute("thermo_pe",0,0) / natoms if e <= elast: elast = e lmp.command("variable elast equal $e") naccept += 1 elif random.random() <= math.exp(natoms*(elast-e)/kT): elast = e lmp.command("variable elast equal $e") naccept += 1 else: x[iatom][0] = x0 x[iatom][1] = y0 # final energy and stats lmp.command("variable nbuild equal nbuild") nbuild = lmp.extract_variable("nbuild",None,0) lmp.command("run 0") estop = lmp.extract_compute("thermo_pe",0,0) / natoms print "MC stats:" print " starting energy =",estart print " final energy =",estop print " minimum energy of perfect lattice =",emin print " accepted MC moves =",naccept print " neighbor list rebuilds =",nbuild
gpl-2.0
linglung/ytdl
youtube_dl/extractor/morningstar.py
220
1732
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor class MorningstarIE(InfoExtractor): IE_DESC = 'morningstar.com' _VALID_URL = r'https?://(?:www\.)?morningstar\.com/[cC]over/video[cC]enter\.aspx\?id=(?P<id>[0-9]+)' _TEST = { 'url': 'http://www.morningstar.com/cover/videocenter.aspx?id=615869', 'md5': '6c0acface7a787aadc8391e4bbf7b0f5', 'info_dict': { 'id': '615869', 'ext': 'mp4', 'title': 'Get Ahead of the Curve on 2013 Taxes', 'description': "Vanguard's Joel Dickson on managing higher tax rates for high-income earners and fund capital-gain distributions in 2013.", 'thumbnail': r're:^https?://.*m(?:orning)?star\.com/.+thumb\.jpg$' } } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') webpage = self._download_webpage(url, video_id) title = self._html_search_regex( r'<h1 id="titleLink">(.*?)</h1>', webpage, 'title') video_url = self._html_search_regex( r'<input type="hidden" id="hidVideoUrl" value="([^"]+)"', webpage, 'video URL') thumbnail = self._html_search_regex( r'<input type="hidden" id="hidSnapshot" value="([^"]+)"', webpage, 'thumbnail', fatal=False) description = self._html_search_regex( r'<div id="mstarDeck".*?>(.*?)</div>', webpage, 'description', fatal=False) return { 'id': video_id, 'title': title, 'url': video_url, 'thumbnail': thumbnail, 'description': description, }
unlicense
defionscode/ansible
test/units/modules/network/slxos/test_slxos_config.py
30
8136
# # (c) 2018 Extreme Networks Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # from __future__ import (absolute_import, division, print_function) __metaclass__ = type from units.compat.mock import patch from ansible.modules.network.slxos import slxos_config from units.modules.utils import set_module_args from .slxos_module import TestSlxosModule, load_fixture class TestSlxosConfigModule(TestSlxosModule): module = slxos_config def setUp(self): super(TestSlxosConfigModule, self).setUp() self.mock_get_config = patch('ansible.modules.network.slxos.slxos_config.get_config') self.get_config = self.mock_get_config.start() self.mock_load_config = patch('ansible.modules.network.slxos.slxos_config.load_config') self.load_config = self.mock_load_config.start() self.mock_run_commands = patch('ansible.modules.network.slxos.slxos_config.run_commands') self.run_commands = self.mock_run_commands.start() def tearDown(self): super(TestSlxosConfigModule, self).tearDown() self.mock_get_config.stop() self.mock_load_config.stop() self.mock_run_commands.stop() def load_fixtures(self, commands=None): config_file = 'slxos_config_config.cfg' self.get_config.return_value = load_fixture(config_file) self.load_config.return_value = None def test_slxos_config_unchanged(self): src = load_fixture('slxos_config_config.cfg') set_module_args(dict(src=src)) self.execute_module() def test_slxos_config_src(self): src = load_fixture('slxos_config_src.cfg') set_module_args(dict(src=src)) commands = ['hostname foo', 'interface Ethernet 0/0', 'no ip address'] self.execute_module(changed=True, commands=commands) def test_slxos_config_backup(self): set_module_args(dict(backup=True)) result = self.execute_module() self.assertIn('__backup__', result) def test_slxos_config_save_always(self): self.run_commands.return_value = "Hostname foo" set_module_args(dict(save_when='always')) self.execute_module(changed=True) self.assertEqual(self.run_commands.call_count, 1) self.assertEqual(self.get_config.call_count, 0) self.assertEqual(self.load_config.call_count, 0) args = self.run_commands.call_args[0][1] self.assertIn('copy running-config startup-config', args['command']) def test_slxos_config_save_changed_true(self): src = load_fixture('slxos_config_src.cfg') set_module_args(dict(src=src, save_when='changed')) commands = ['hostname foo', 'interface Ethernet 0/0', 'no ip address'] self.execute_module(changed=True, commands=commands) self.assertEqual(self.run_commands.call_count, 1) self.assertEqual(self.get_config.call_count, 1) self.assertEqual(self.load_config.call_count, 1) args = self.run_commands.call_args[0][1] self.assertIn('copy running-config startup-config', args['command']) def test_slxos_config_save_changed_false(self): set_module_args(dict(save_when='changed')) self.execute_module(changed=False) self.assertEqual(self.run_commands.call_count, 0) self.assertEqual(self.get_config.call_count, 0) self.assertEqual(self.load_config.call_count, 0) def test_slxos_config_lines_wo_parents(self): set_module_args(dict(lines=['hostname foo'])) commands = ['hostname foo'] self.execute_module(changed=True, commands=commands) def test_slxos_config_lines_w_parents(self): set_module_args(dict(lines=['shutdown'], parents=['interface Ethernet 0/0'])) commands = ['interface Ethernet 0/0', 'shutdown'] self.execute_module(changed=True, commands=commands) def test_slxos_config_before(self): set_module_args(dict(lines=['hostname foo'], before=['test1', 'test2'])) commands = ['test1', 'test2', 'hostname foo'] self.execute_module(changed=True, commands=commands, sort=False) def test_slxos_config_after(self): set_module_args(dict(lines=['hostname foo'], after=['test1', 'test2'])) commands = ['hostname foo', 'test1', 'test2'] self.execute_module(changed=True, commands=commands, sort=False) def test_slxos_config_before_after_no_change(self): set_module_args(dict(lines=['hostname router'], before=['test1', 'test2'], after=['test3', 'test4'])) self.execute_module() def test_slxos_config_config(self): config = 'hostname localhost' set_module_args(dict(lines=['hostname router'], config=config)) commands = ['hostname router'] self.execute_module(changed=True, commands=commands) def test_slxos_config_replace_block(self): lines = ['description test string', 'test string'] parents = ['interface Ethernet 0/0'] set_module_args(dict(lines=lines, replace='block', parents=parents)) commands = parents + lines self.execute_module(changed=True, commands=commands) def test_slxos_config_match_none(self): lines = ['hostname router'] set_module_args(dict(lines=lines, match='none')) self.execute_module(changed=True, commands=lines) def test_slxos_config_match_none(self): lines = ['ip address 1.2.3.4 255.255.255.0', 'description test string'] parents = ['interface Ethernet 0/0'] set_module_args(dict(lines=lines, parents=parents, match='none')) commands = parents + lines self.execute_module(changed=True, commands=commands, sort=False) def test_slxos_config_match_strict(self): lines = ['ip address 1.2.3.4 255.255.255.0', 'description test string', 'shutdown'] parents = ['interface Ethernet 0/0'] set_module_args(dict(lines=lines, parents=parents, match='strict')) commands = parents + ['shutdown'] self.execute_module(changed=True, commands=commands, sort=False) def test_slxos_config_match_exact(self): lines = ['ip address 1.2.3.4 255.255.255.0', 'description test string', 'shutdown'] parents = ['interface Ethernet 0/0'] set_module_args(dict(lines=lines, parents=parents, match='exact')) commands = parents + lines self.execute_module(changed=True, commands=commands, sort=False) def test_slxos_config_src_and_lines_fails(self): args = dict(src='foo', lines='foo') set_module_args(args) self.execute_module(failed=True) def test_slxos_config_src_and_parents_fails(self): args = dict(src='foo', parents='foo') set_module_args(args) self.execute_module(failed=True) def test_slxos_config_match_exact_requires_lines(self): args = dict(match='exact') set_module_args(args) self.execute_module(failed=True) def test_slxos_config_match_strict_requires_lines(self): args = dict(match='strict') set_module_args(args) self.execute_module(failed=True) def test_slxos_config_replace_block_requires_lines(self): args = dict(replace='block') set_module_args(args) self.execute_module(failed=True) def test_slxos_config_replace_config_requires_src(self): args = dict(replace='config') set_module_args(args) self.execute_module(failed=True)
gpl-3.0
felipedau/pyaxo
examples/axotor.py
2
24592
#!/usr/bin/env python import socket import threading import sys import os import curses import socks import stem.process from wh import WHMgr import random from binascii import a2b_base64 as a2b from binascii import b2a_base64 as b2a from getpass import getpass from smp import SMP from stem.control import Controller from stem.util import term from curses.textpad import Textbox from contextlib import contextmanager from pyaxo import Axolotl, hash_ from time import sleep """ Standalone chat script using libsodium for encryption with the Axolotl ratchet for key management. This version of the chat client makes connections over the tor network. The server creates an ephemeral hidden service and the client connects to the hidden service. You will need to load the following additional python modules for this to work: stem, pysocks, txtorcon, pysocks, and magic-wormhole. They are available on pypi via pip. Axotor also requires tor (>=2.9.1). Currently this is in the unstable branch. So you may need to update your distribution's tor repository accordingly. The only inputs required are the nicks of the conversants, as well as a master key. The master key must be of the form N-x where N is an integer (1-3 digits should be fine) and x is a lower-case alphanumeric string. The initial axolotl database configuration and credential exchange are derived from the master key. The master key should be exchanged out-of-band between the two conversants before communication can be established. An example master key might be 293-xyzzy (don't use this one). On start, the program initializes a tor server and exchanges axolotl and hidden service authentication credentials over tor using PAKE (password-authenticated key exchange). The specific implementation of PAKE used is https://github.com/warner/magic-wormhole. The server side creates an ephemeral hidden service that requires basic_auth to connect. The hidden service and axolotl credentials are ephemeral. They exist only in ram, and will be deleted upon exit from the program. Exit by typing .quit at the chat prompt. The client also does an authentication step using the Socialist Millionaire's Protocol. During startup, after a network connection is established, you will be prompted for a secret. If the secret matches that input by the other party, a chat is established and the input window text appears in green. If the secret does not match the other party's secret, you will be prompted whether or not to continue. If you continue, the input window text will appear in red to remind you that the session is unauthenticated. The Axolotl protocol is actually authenticated through the key agreement process when the credentials are created. You may wonder why the additional SMP authentication step is included. This SMP step is another way to assure you that the other party to your session is actually who you think it is. In axotor.py, no database or key is ever stored to disk. Usage: 1. One side starts the server with: axotor.py -s 2. The other side connects the client to the server with: axotor.py -c 3. .quit at the chat prompt will quit (don't forget the "dot") 4. .send <filename> will send a file to the other party. The file can be from anywhere in the filesystem on the sending computer (~/a/b/<filename> supported) and will be stored in the receiver's local directory. Axochat requires the Axolotl module at https://github.com/rxcomm/pyaxo Copyright (C) 2015-2017 by David R. Andersen <[email protected]> This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ TOR_SERVER_PORT = 9054 TOR_SERVER_CONTROL_PORT = 9055 TOR_CLIENT_PORT = 9154 TOR_CLIENT_CONTROL_PORT = 9155 CLIENT_FILE_TX_PORT = 2000 SERVER_FILE_TX_PORT = 2001 # An attempt to limit the damage from this bug in curses: # https://bugs.python.org/issue13051 # The input textbox is 8 rows high. So assuming a maximum # terminal width of 512 columns, we arrive at 8x512=4096. # Most terminal windows should be smaller than this. sys.setrecursionlimit(4096) @contextmanager def socketcontext(*args, **kwargs): s = socket.socket(*args, **kwargs) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) yield s s.close() @contextmanager def torcontext(): try: s = socks.socksocket() s.set_proxy(socks.SOCKS5, '127.0.0.1', TOR_CLIENT_PORT) yield s s.close() except socks.SOCKS5Error: print '' print 'You need to wait long enough for the Hidden Service' print 'at the server to be established. Try again in a' print 'minute or two.' class _Textbox(Textbox): """ curses.textpad.Textbox requires users to ^g on completion, which is sort of annoying for an interactive chat client such as this, which typically only reuquires an enter. This subclass fixes this problem by signalling completion on Enter as well as ^g. Also, map <Backspace> key to ^h. """ def __init__(*args, **kwargs): Textbox.__init__(*args, **kwargs) def do_command(self, ch): if ch == curses.KEY_RESIZE: resizeWindows() for i in range(8): # delete 8 input window lines Textbox.do_command(self, 1) Textbox.do_command(self, 11) Textbox.do_command(self, 16) return Textbox.do_command(self, 7) if ch == 10: # Enter return 0 if ch == 127: # Backspace return 8 return Textbox.do_command(self, ch) def validator(ch): """ Update screen if necessary and release the lock so receiveThread can run """ global screen_needs_update try: if screen_needs_update: curses.doupdate() screen_needs_update = False return ch finally: winlock.release() sleep(0.01) # let receiveThread in if necessary winlock.acquire() def windowFactory(): stdscr = curses.initscr() curses.noecho() curses.start_color() curses.use_default_colors() curses.init_pair(1, curses.COLOR_RED, -1) curses.init_pair(2, curses.COLOR_GREEN, -1) curses.init_pair(3, curses.COLOR_YELLOW, -1) curses.cbreak() curses.curs_set(1) (sizey, sizex) = stdscr.getmaxyx() input_win = curses.newwin(8, sizex, sizey-8, 0) output_win = curses.newwin(sizey-8, sizex, 0, 0) input_win.idlok(1) input_win.scrollok(1) input_win.nodelay(1) input_win.leaveok(0) input_win.timeout(100) output_win.idlok(1) output_win.scrollok(1) output_win.leaveok(0) return stdscr, input_win, output_win def closeWindows(stdscr): curses.nocbreak() stdscr.keypad(0) curses.echo() curses.endwin() def resizeWindows(): global stdscr, input_win, output_win, textpad, text_color temp_win = output_win yold, xold = output_win.getmaxyx() stdscr, input_win, output_win = windowFactory() stdscr.noutrefresh() ynew, xnew = output_win.getmaxyx() if yold > ynew: sminrow = yold - ynew dminrow = 0 else: sminrow = 0 dminrow = ynew - yold temp_win.overwrite(output_win, sminrow, 0, dminrow, 0, ynew-1, xnew-1) del temp_win output_win.move(ynew-1, 0) output_win.noutrefresh() input_win.attron(text_color) input_win.noutrefresh() curses.doupdate() textpad = _Textbox(input_win, insert_mode=True) textpad.stripspaces = True def usage(): print 'Usage: ' + sys.argv[0] + ' -(s,c)' print ' -s: start a chat in server mode' print ' -c: start a chat in client mode' print 'quit with .quit' print 'send file with .send <filename>' sys.exit(1) def reportTransferSocketError(): global output_win with winlock: output_win.addstr('Socket error: Something went wrong.\n', curses.color_pair(1)) output_win.refresh() sys.exit() def sendFile(s, filename, abort): global axolotl, output_win if abort: with cryptlock: data = axolotl.encrypt('ABORT') + 'EOP' s.send(data) try: s.recv(3, socket.MSG_WAITALL) except socket.error: pass sys.exit() else: with winlock: output_win.addstr('Sending file %s...\n' % filename, curses.color_pair(3)) output_win.refresh() with open(filename, 'rb') as f: data = f.read() if len(data) == 0: data = 'Sender tried to send a null file!' with cryptlock: data = axolotl.encrypt(data) s.send(data + 'EOP') try: s.recv(3, socket.MSG_WAITALL) except socket.error: pass def receiveFile(s, filename): global axolotl, output_win data = '' while data[-3:] != 'EOP': rcv = s.recv(4096) data = data + rcv if not rcv: with winlock: output_win.addstr('Receiving %s aborted...\n' % filename, curses.color_pair(1)) output_win.refresh() try: s.send('EOP') except socket.error: pass sys.exit() with cryptlock: data = axolotl.decrypt(data[:-3]) if data == 'ABORT': with winlock: output_win.addstr('Receiving %s aborted...\n' % filename, curses.color_pair(1)) output_win.refresh() sys.exit() with open(filename, 'wb') as f: f.write(data) try: s.send('EOP') except socket.error: pass def uploadThread(onion, command): global output_win with transferlock: filename = command.split(':> .send ')[1].strip() filename = os.path.expanduser(filename) if not os.path.exists(filename) or os.path.isdir(filename): with winlock: output_win.addstr('File %s does not exist...\n' % filename, curses.color_pair(1)) output_win.refresh() abort = True else: abort = False if onion is None: with socketcontext(socket.AF_INET, socket.SOCK_STREAM) as s: try: s.bind(('localhost', SERVER_FILE_TX_PORT)) s.listen(1) conn, addr = s.accept() except socket.error: reportTransferSocketError() sendFile(conn, filename, abort) else: with torcontext() as s: try: s.connect((onion, CLIENT_FILE_TX_PORT)) except socket.error: reportTransferSocketError() sendFile(s, filename, abort) with winlock: output_win.addstr('%s sent...\n' % filename, curses.color_pair(3)) output_win.refresh() def downloadThread(onion, command): global output_win with transferlock: filename = command.split(':> .send ')[1].strip().split('/').pop() with winlock: output_win.addstr('Receiving file %s...\n' % filename, curses.color_pair(3)) output_win.refresh() if onion is None: with socketcontext(socket.AF_INET, socket.SOCK_STREAM) as s: try: s.bind(('localhost', CLIENT_FILE_TX_PORT)) s.listen(1) conn, addr = s.accept() except socket.error: reportTransferSocketError() receiveFile(conn, filename) else: with torcontext() as s: try: s.connect((onion, SERVER_FILE_TX_PORT)) except socket.error: reportTransferSocketError() receiveFile(s, filename) with winlock: output_win.addstr('%s received...\n' % filename, curses.color_pair(3)) output_win.refresh() def receiveThread(sock, text_color, onion): global screen_needs_update, a, stdscr, input_win, output_win while True: data = '' while data[-3:] != 'EOP': rcv = sock.recv(1024) if not rcv: input_win.move(0, 0) input_win.addstr('Disconnected - Ctrl-C to exit!', text_color) input_win.refresh() sys.exit() data = data + rcv data_list = data.split('EOP') with winlock: (cursory, cursorx) = input_win.getyx() for data in data_list: if data != '': with cryptlock: data = axolotl.decrypt(data) if ':> .quit' in data: closeWindows(stdscr) print 'The other party exited the chat...' sleep(1.5) os._exit(0) if ':> .send ' in data: t = threading.Thread(target=downloadThread, args=(onion,data)) t.start() data = '' output_win.addstr(data) input_win.move(cursory, cursorx) input_win.cursyncup() input_win.noutrefresh() output_win.noutrefresh() screen_needs_update = True def chatThread(sock, smp_match, onion): global screen_needs_update, axolotl, stdscr, input_win, \ output_win, textpad, text_color stdscr, input_win, output_win = windowFactory() y, x = output_win.getmaxyx() output_win.move(y-1, 0) if smp_match: text_color = curses.color_pair(2) # green else: text_color = curses.color_pair(1) # red input_win.attron(text_color) input_win.addstr(0, 0, NICK + ':> ') textpad = _Textbox(input_win, insert_mode=True) textpad.stripspaces = True t = threading.Thread(target=receiveThread, args=(sock, text_color, onion)) t.daemon = True t.start() try: while True: with winlock: data = textpad.edit(validator) if len(data) != 0 and chr(127) not in data: input_win.clear() input_win.addstr(NICK+':> ') output_win.addstr(data.replace('\n', '') + '\n', text_color) output_win.noutrefresh() input_win.move(0, len(NICK)+3) input_win.cursyncup() input_win.noutrefresh() screen_needs_update = True data = data.replace('\n', '') + '\n' try: with cryptlock: sock.send(axolotl.encrypt(data) + 'EOP') except socket.error: input_win.addstr('Disconnected') input_win.refresh() closeWindows(stdscr) sys.exit() if NICK+':> .quit' in data: closeWindows(stdscr) print 'Notifying the other party that you are quitting...' sys.exit() elif NICK+':> .send ' in data: t = threading.Thread(target=uploadThread, args=(onion,data)) t.start() else: input_win.addstr(NICK+':> ') input_win.move(0, len(NICK)+3) input_win.cursyncup() input_win.noutrefresh() screen_needs_update = True if screen_needs_update: curses.doupdate() screen_needs_update = False except KeyboardInterrupt: closeWindows(stdscr) def tor(port, controlport, tor_dir, descriptor_cookie): tor_process = stem.process.launch_tor_with_config( tor_cmd = 'tor', config = { 'ControlPort': str(controlport), 'SocksPort' : str(port), 'Log' : [ 'NOTICE stdout', 'ERR file /tmp/tor_error_log', ], 'DataDirectory' : tor_dir, 'CookieAuthentication': '1', 'HidServAuth': descriptor_cookie, }, completion_percent = 100, take_ownership = True, timeout = 90, init_msg_handler = print_bootstrap_lines, ) return tor_process def print_bootstrap_lines(line): if 'Bootstrapped ' in line: print(term.format(line, term.Color.RED)) def clientController(descriptor_cookie, onion): controller = Controller.from_port(address='127.0.0.1', port=TOR_CLIENT_CONTROL_PORT) try: controller.authenticate() controller.set_options([ ('HidServAuth', onion + ' ' + descriptor_cookie), ]) except Exception as e: print e return controller def ephemeralHiddenService(): PORT = 50000 HOST = '127.0.0.1' print 'Waiting for Hidden Service descriptor to be published...' print ' (this may take some time)' controller = Controller.from_port(address='127.0.0.1', port=TOR_SERVER_CONTROL_PORT) controller.authenticate() try: hs = controller.create_ephemeral_hidden_service([PORT, CLIENT_FILE_TX_PORT, SERVER_FILE_TX_PORT], basic_auth={'axotor': None}, await_publication=True) except Exception as e: print e return controller, hs.client_auth['axotor'], hs.service_id +'.onion' def credentialsSend(mkey, cookie, ratchet_key, onion): w = WHMgr(unicode(mkey), cookie+'___'+ratchet_key+'___'+onion, u'tcp:127.0.0.1:'+unicode(TOR_SERVER_CONTROL_PORT)) w.send() w.run() return w.confirmed def credentialsReceive(mkey): w = WHMgr(unicode(mkey), None, u'tcp:127.0.0.1:'+unicode(TOR_CLIENT_CONTROL_PORT)) w.receive() w.run() return w.data def smptest(secret, sock, is_server): global axolotl # Create an SMP object with the calculated secret smp = SMP(secret) if is_server: # Do the SMP protocol buffer = sock.recv(2439, socket.MSG_WAITALL) padlength = ord(buffer[-1:]) buffer = axolotl.decrypt(buffer[:-padlength]) buffer = smp.step2(buffer) buffer = axolotl.encrypt(buffer) padlength = 4539-len(buffer) buffer = buffer+padlength*chr(padlength) # pad to fixed length sock.send(buffer) buffer = sock.recv(3469, socket.MSG_WAITALL) padlength = ord(buffer[-1:]) buffer = axolotl.decrypt(buffer[:-padlength]) buffer = smp.step4(buffer) buffer = axolotl.encrypt(buffer) padlength = 1369-len(buffer) buffer = buffer+padlength*chr(padlength) # pad to fixed length sock.send(buffer) else: # Do the SMP protocol buffer = smp.step1() buffer = axolotl.encrypt(buffer) padlength = 2439-len(buffer) buffer = buffer+padlength*chr(padlength) # pad to fixed length sock.send(buffer) buffer = sock.recv(4539, socket.MSG_WAITALL) padlength = ord(buffer[-1:]) buffer = axolotl.decrypt(buffer[:-padlength]) buffer = smp.step3(buffer) buffer = axolotl.encrypt(buffer) padlength = 3469-len(buffer) buffer = buffer+padlength*chr(padlength) # pad to fixed length sock.send(buffer) buffer = sock.recv(1369, socket.MSG_WAITALL) padlength = ord(buffer[-1:]) buffer = axolotl.decrypt(buffer[:-padlength]) smp.step5(buffer) # Check if the secrets match if smp.match: print 'Secrets Match!' sleep(1) smp_match = True else: print 'Secrets DO NOT Match!' smp_match = False return smp_match def doSMP(sock, is_server): global axolotl ans = raw_input('Run SMP authentication step? (y/N)? ') if not ans == 'y': ans = 'N' sock.send(axolotl.encrypt(ans)) data = sock.recv(125, socket.MSG_WAITALL) data = axolotl.decrypt(data) if ans == 'N' and data == 'y': print 'Other party requested SMP authentication' if ans == 'y' or data == 'y': print 'Performing per-session SMP authentication...' ans = getpass('Enter SMP secret: ') print 'Running SMP protocol...' secret = ans + axolotl.state['CONVid'] smp_match = smptest(secret, sock, is_server) if not smp_match: ans = raw_input('Continue? (y/N) ') if ans != 'y': print 'Exiting...' sys.exit() else: print 'OK - skipping SMP step and assuming ' + \ 'the other party is already authenticated...' smp_match = True sleep(2) return smp_match if __name__ == '__main__': global axolotl try: mode = sys.argv[1] except: usage() NICK = raw_input('Enter your nick: ') OTHER_NICK = 'x' winlock = threading.Lock() transferlock = threading.Lock() cryptlock = threading.Lock() screen_needs_update = False HOST = '127.0.0.1' PORT=50000 mkey = getpass('What is the masterkey (format: NNN-xxxx)? ') if mode == '-s': axolotl = Axolotl(NICK, dbname=OTHER_NICK+'.db', dbpassphrase=None, nonthreaded_sql=False) axolotl.createState(other_name=OTHER_NICK, mkey=hash_(mkey), mode=False) tor_process = tor(TOR_SERVER_PORT, TOR_SERVER_CONTROL_PORT, '/tmp/tor.server', '') hs, cookie, onion = ephemeralHiddenService() print 'Exchanging credentials via tor...' if credentialsSend(mkey, cookie, b2a(axolotl.state['DHRs']).strip(), onion): pass else: sys.exit(1) print 'Credentials sent, waiting for the other party to connect...' with socketcontext(socket.AF_INET, socket.SOCK_STREAM) as s: s.bind((HOST, PORT)) s.listen(1) conn, addr = s.accept() print 'Connected...' smp_match = doSMP(conn, True) chatThread(conn, smp_match, None) elif mode == '-c': axolotl = Axolotl(NICK, dbname=OTHER_NICK+'.db', dbpassphrase=None, nonthreaded_sql=False) tor_process = tor(TOR_CLIENT_PORT, TOR_CLIENT_CONTROL_PORT, '/tmp/tor.client', '') print 'Exchanging credentials via tor...' creds = credentialsReceive(mkey) if not creds: print 'Master Key Mismatch!' print 'Exiting...' sys.exit() cookie, rkey, onion = creds.split('___') controller = clientController(cookie, onion) axolotl.createState(other_name=OTHER_NICK, mkey=hash_(mkey), mode=True, other_ratchetKey=a2b(rkey)) print 'Credentials received, connecting to the other party...' with torcontext() as s: s.connect((onion, PORT)) print 'Connected...' smp_match = doSMP(s, False) chatThread(s, smp_match, onion) else: usage()
gpl-3.0
kawamuray/ganeti
lib/watcher/state.py
2
7572
# # # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. """Module keeping state for Ganeti watcher. """ import os import time import logging from ganeti import utils from ganeti import serializer from ganeti import errors # Delete any record that is older than 8 hours; this value is based on # the fact that the current retry counter is 5, and watcher runs every # 5 minutes, so it takes around half an hour to exceed the retry # counter, so 8 hours (16*1/2h) seems like a reasonable reset time RETRY_EXPIRATION = 8 * 3600 KEY_CLEANUP_COUNT = "cleanup_count" KEY_CLEANUP_WHEN = "cleanup_when" KEY_RESTART_COUNT = "restart_count" KEY_RESTART_WHEN = "restart_when" KEY_BOOT_ID = "bootid" def OpenStateFile(path): """Opens the state file and acquires a lock on it. @type path: string @param path: Path to state file """ # The two-step dance below is necessary to allow both opening existing # file read/write and creating if not existing. Vanilla open will truncate # an existing file -or- allow creating if not existing. statefile_fd = os.open(path, os.O_RDWR | os.O_CREAT) # Try to acquire lock on state file. If this fails, another watcher instance # might already be running or another program is temporarily blocking the # watcher from running. try: utils.LockFile(statefile_fd) except errors.LockError, err: logging.error("Can't acquire lock on state file %s: %s", path, err) return None return os.fdopen(statefile_fd, "w+") class WatcherState(object): """Interface to a state file recording restart attempts. """ def __init__(self, statefile): """Open, lock, read and parse the file. @type statefile: file @param statefile: State file object """ self.statefile = statefile try: state_data = self.statefile.read() if not state_data: self._data = {} else: self._data = serializer.Load(state_data) except Exception, msg: # pylint: disable=W0703 # Ignore errors while loading the file and treat it as empty self._data = {} logging.warning(("Invalid state file. Using defaults." " Error message: %s"), msg) if "instance" not in self._data: self._data["instance"] = {} if "node" not in self._data: self._data["node"] = {} self._orig_data = serializer.Dump(self._data) def Save(self, filename): """Save state to file, then unlock and close it. """ assert self.statefile serialized_form = serializer.Dump(self._data) if self._orig_data == serialized_form: logging.debug("Data didn't change, just touching status file") os.utime(filename, None) return # We need to make sure the file is locked before renaming it, otherwise # starting ganeti-watcher again at the same time will create a conflict. fd = utils.WriteFile(filename, data=serialized_form, prewrite=utils.LockFile, close=False) self.statefile = os.fdopen(fd, "w+") def Close(self): """Unlock configuration file and close it. """ assert self.statefile # Files are automatically unlocked when closing them self.statefile.close() self.statefile = None def GetNodeBootID(self, name): """Returns the last boot ID of a node or None. """ ndata = self._data["node"] if name in ndata and KEY_BOOT_ID in ndata[name]: return ndata[name][KEY_BOOT_ID] return None def SetNodeBootID(self, name, bootid): """Sets the boot ID of a node. """ assert bootid ndata = self._data["node"] ndata.setdefault(name, {})[KEY_BOOT_ID] = bootid def NumberOfRestartAttempts(self, instance_name): """Returns number of previous restart attempts. @type instance_name: string @param instance_name: the name of the instance to look up """ idata = self._data["instance"] if instance_name in idata: return idata[instance_name][KEY_RESTART_COUNT] return 0 def NumberOfCleanupAttempts(self, instance_name): """Returns number of previous cleanup attempts. @type instance_name: string @param instance_name: the name of the instance to look up """ idata = self._data["instance"] if instance_name in idata: return idata[instance_name][KEY_CLEANUP_COUNT] return 0 def MaintainInstanceList(self, instances): """Perform maintenance on the recorded instances. @type instances: list of string @param instances: the list of currently existing instances """ idict = self._data["instance"] # First, delete obsolete instances obsolete_instances = set(idict).difference(instances) for inst in obsolete_instances: logging.debug("Forgetting obsolete instance %s", inst) idict.pop(inst, None) # Second, delete expired records earliest = time.time() - RETRY_EXPIRATION expired_instances = [i for i in idict if idict[i][KEY_RESTART_WHEN] < earliest] for inst in expired_instances: logging.debug("Expiring record for instance %s", inst) idict.pop(inst, None) @staticmethod def _RecordAttempt(instances, instance_name, key_when, key_count): """Record an event. @type instances: dict @param instances: contains instance data indexed by instance_name @type instance_name: string @param instance_name: name of the instance involved in the event @type key_when: @param key_when: dict key for the information for when the event occurred @type key_count: int @param key_count: dict key for the information for how many times the event occurred """ instance = instances.setdefault(instance_name, {}) instance[key_when] = time.time() instance[key_count] = instance.get(key_count, 0) + 1 def RecordRestartAttempt(self, instance_name): """Record a restart attempt. @type instance_name: string @param instance_name: the name of the instance being restarted """ self._RecordAttempt(self._data["instance"], instance_name, KEY_RESTART_WHEN, KEY_RESTART_COUNT) def RecordCleanupAttempt(self, instance_name): """Record a cleanup attempt. @type instance_name: string @param instance_name: the name of the instance being cleaned up """ self._RecordAttempt(self._data["instance"], instance_name, KEY_CLEANUP_WHEN, KEY_CLEANUP_COUNT) def RemoveInstance(self, instance_name): """Update state to reflect that a machine is running. This method removes the record for a named instance (as we only track down instances). @type instance_name: string @param instance_name: the name of the instance to remove from books """ idata = self._data["instance"] idata.pop(instance_name, None)
gpl-2.0
imclab/CoECI-CMS-Healthcare-Fraud-Prevention
partnerclient/decision_module/decision_module/helper.py
3
1511
''' Copyright (C) 2013 TopCoder Inc., All Rights Reserved. ''' ''' This is the module that provides useful helper functions. Thread Safety: The implementation is thread safe. @author: TCSASSEMLBER @version: 1.0 ''' def log_entrance(logger, signature, parasMap): ''' Logs for entrance into public methods at DEBUG level. @param logger: the logger object @param signature: the method signature @param parasMap: the passed parameters ''' logger.debug('[Entering method ' + signature + ']') if parasMap != None and len(parasMap.items()) > 0: paraStr = '[Input parameters[' for (k,v) in parasMap.items(): paraStr += (str(k) + ':' + str(v) + ', ') paraStr += ']]' logger.debug(paraStr) def log_exit(logger, signature, parasList): ''' Logs for exit from public methods at DEBUG level. @param logger: the logger object @param signature: the method signature @param parasList: the objects to return ''' logger.debug('[Exiting method ' + signature + ']') if parasList != None and len(parasList) > 0: logger.debug('[Output parameter ' + str(parasList) + ']') def log_exception(logger, signature, e): ''' Logging exception at ERROR level. @param logger: the logger object @param signature: the method signature @param e: the error ''' # This will log the traceback. logger.error('[Error in method ' + signature + ': Details ' + str(e) + ']') return e
apache-2.0
hoangt/tpzsimul.gem5
src/arch/x86/isa/insts/simd128/integer/logical/pand.py
91
3202
# Copyright (c) 2007 The Hewlett-Packard Development Company # All rights reserved. # # The license below extends only to copyright in the software and shall # not be construed as granting a license to any other intellectual # property including but not limited to intellectual property relating # to a hardware implementation of the functionality of the software # licensed hereunder. You may use the software subject to the license # terms below provided that you ensure that this notice is replicated # unmodified and in its entirety in all distributions of the software, # modified or unmodified, in source code or in binary form. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Gabe Black microcode = ''' def macroop PAND_XMM_XMM { mand xmml, xmml, xmmlm mand xmmh, xmmh, xmmhm }; def macroop PAND_XMM_M { lea t1, seg, sib, disp, dataSize=asz ldfp ufp1, seg, [1, t0, t1], dataSize=8 ldfp ufp2, seg, [1, t0, t1], 8, dataSize=8 mand xmml, xmml, ufp1 mand xmmh, xmmh, ufp2 }; def macroop PAND_XMM_P { rdip t7 lea t1, seg, riprel, disp, dataSize=asz ldfp ufp1, seg, [1, t0, t1], dataSize=8 ldfp ufp2, seg, [1, t0, t1], 8, dataSize=8 mand xmml, xmml, ufp1 mand xmmh, xmmh, ufp2 }; def macroop PANDN_XMM_XMM { mandn xmml, xmml, xmmlm mandn xmmh, xmmh, xmmhm }; def macroop PANDN_XMM_M { lea t1, seg, sib, disp, dataSize=asz ldfp ufp1, seg, [1, t0, t1], dataSize=8 ldfp ufp2, seg, [1, t0, t1], 8, dataSize=8 mandn xmml, xmml, ufp1 mandn xmmh, xmmh, ufp2 }; def macroop PANDN_XMM_P { rdip t7 lea t1, seg, riprel, disp, dataSize=asz ldfp ufp1, seg, [1, t0, t1], dataSize=8 ldfp ufp2, seg, [1, t0, t1], 8, dataSize=8 mandn xmml, xmml, ufp1 mandn xmmh, xmmh, ufp2 }; '''
bsd-3-clause
arowla/csvkit
tests/test_unicsv.py
21
6571
#!/usr/bin/env python # -*- coding: utf-8 -*- import csv import os import six try: import unittest2 as unittest except ImportError: import unittest from csvkit import unicsv @unittest.skipIf(six.PY3, "Not supported in Python 3.") class TestUnicodeCSVReader(unittest.TestCase): def test_utf8(self): with open('examples/test_utf8.csv') as f: reader = unicsv.UnicodeCSVReader(f, encoding='utf-8') self.assertEqual(next(reader), ['a', 'b', 'c']) self.assertEqual(next(reader), ['1', '2', '3']) self.assertEqual(next(reader), ['4', '5', u'ʤ']) def test_latin1(self): with open('examples/test_latin1.csv') as f: reader = unicsv.UnicodeCSVReader(f, encoding='latin1') self.assertEqual(next(reader), ['a', 'b', 'c']) self.assertEqual(next(reader), ['1', '2', '3']) self.assertEqual(next(reader), ['4', '5', u'©']) def test_utf16_big(self): with open('examples/test_utf16_big.csv') as f: reader = unicsv.UnicodeCSVReader(f, encoding='utf-16') self.assertEqual(next(reader), ['a', 'b', 'c']) self.assertEqual(next(reader), ['1', '2', '3']) self.assertEqual(next(reader), ['4', '5', u'ʤ']) def test_utf16_little(self): with open('examples/test_utf16_little.csv') as f: reader = unicsv.UnicodeCSVReader(f, encoding='utf-16') self.assertEqual(next(reader), ['a', 'b', 'c']) self.assertEqual(next(reader), ['1', '2', '3']) self.assertEqual(next(reader), ['4', '5', u'ʤ']) @unittest.skipIf(six.PY3, "Not supported in Python 3.") class TestUnicodeCSVWriter(unittest.TestCase): def test_utf8(self): output = six.StringIO() writer = unicsv.UnicodeCSVWriter(output, encoding='utf-8') self.assertEqual(writer._eight_bit, True) writer.writerow(['a', 'b', 'c']) writer.writerow(['1', '2', '3']) writer.writerow(['4', '5', u'ʤ']) written = six.StringIO(output.getvalue()) reader = unicsv.UnicodeCSVReader(written, encoding='utf-8') self.assertEqual(next(reader), ['a', 'b', 'c']) self.assertEqual(next(reader), ['1', '2', '3']) self.assertEqual(next(reader), ['4', '5', u'ʤ']) def test_latin1(self): output = six.StringIO() writer = unicsv.UnicodeCSVWriter(output, encoding='latin1') self.assertEqual(writer._eight_bit, True) writer.writerow(['a', 'b', 'c']) writer.writerow(['1', '2', '3']) writer.writerow(['4', '5', u'©']) written = six.StringIO(output.getvalue()) reader = unicsv.UnicodeCSVReader(written, encoding='latin1') self.assertEqual(next(reader), ['a', 'b', 'c']) self.assertEqual(next(reader), ['1', '2', '3']) self.assertEqual(next(reader), ['4', '5', u'©']) def test_utf16_big(self): output = six.StringIO() writer = unicsv.UnicodeCSVWriter(output, encoding='utf-16-be') self.assertEqual(writer._eight_bit, False) writer.writerow(['a', 'b', 'c']) writer.writerow(['1', '2', '3']) writer.writerow(['4', '5', u'ʤ']) written = six.StringIO(output.getvalue()) reader = unicsv.UnicodeCSVReader(written, encoding='utf-16-be') self.assertEqual(next(reader), ['a', 'b', 'c']) self.assertEqual(next(reader), ['1', '2', '3']) self.assertEqual(next(reader), ['4', '5', u'\u02A4']) def test_utf16_little(self): output = six.StringIO() writer = unicsv.UnicodeCSVWriter(output, encoding='utf-16-le') self.assertEqual(writer._eight_bit, False) writer.writerow(['a', 'b', 'c']) writer.writerow(['1', '2', '3']) writer.writerow(['4', '5', u'ʤ']) written = six.StringIO(output.getvalue()) reader = unicsv.UnicodeCSVReader(written, encoding='utf-16-le') self.assertEqual(next(reader), ['a', 'b', 'c']) self.assertEqual(next(reader), ['1', '2', '3']) self.assertEqual(next(reader), ['4', '5', u'\u02A4']) @unittest.skipIf(six.PY3, "Not supported in Python 3.") class TestUnicodeCSVDictReader(unittest.TestCase): def setUp(self): self.f = open('examples/dummy.csv') def tearDown(self): self.f.close() def test_reader(self): reader = unicsv.UnicodeCSVDictReader(self.f) self.assertEqual(next(reader), { u'a': u'1', u'b': u'2', u'c': u'3' }) def test_latin1(self): with open('examples/test_latin1.csv') as f: reader = unicsv.UnicodeCSVDictReader(f, encoding='latin1') self.assertEqual(next(reader), { u'a': u'1', u'b': u'2', u'c': u'3' }) self.assertEqual(next(reader), { u'a': u'4', u'b': u'5', u'c': u'©' }) @unittest.skipIf(six.PY3, "Not supported in Python 3.") class TestUnicodeCSVDictWriter(unittest.TestCase): def setUp(self): self.output = six.StringIO() def tearDown(self): self.output.close() def test_writer(self): writer = unicsv.UnicodeCSVDictWriter(self.output, ['a', 'b', 'c'], lineterminator='\n') writer.writeheader() writer.writerow({ u'a': u'1', u'b': u'2', u'c': u'☃' }) result = self.output.getvalue() self.assertEqual(result, 'a,b,c\n1,2,☃\n') @unittest.skipIf(six.PY3, "Not supported in Python 3.") class TestMaxFieldSize(unittest.TestCase): def setUp(self): self.lim = csv.field_size_limit() with open('dummy.csv', 'w') as f: f.write('a' * 10) def tearDown(self): # Resetting limit to avoid failure in other tests. csv.field_size_limit(self.lim) os.remove('dummy.csv') def test_maxfieldsize(self): # Testing --maxfieldsize for failure. Creating data using str * int. with open('dummy.csv', 'r') as f: c = unicsv.UnicodeCSVReader(f, maxfieldsize=9) try: c.next() except unicsv.FieldSizeLimitError: pass else: raise AssertionError('Expected unicsv.FieldSizeLimitError') # Now testing higher --maxfieldsize. with open('dummy.csv', 'r') as f: c = unicsv.UnicodeCSVReader(f, maxfieldsize=11) self.assertEqual(['a' * 10], c.next())
mit
shuangshuangwang/spark
python/pyspark/sql/window.py
23
12863
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import sys from pyspark import since, SparkContext from pyspark.sql.column import _to_seq, _to_java_column __all__ = ["Window", "WindowSpec"] def _to_java_cols(cols): sc = SparkContext._active_spark_context if len(cols) == 1 and isinstance(cols[0], list): cols = cols[0] return _to_seq(sc, cols, _to_java_column) class Window(object): """ Utility functions for defining window in DataFrames. .. versionadded:: 1.4 Notes ----- When ordering is not defined, an unbounded window frame (rowFrame, unboundedPreceding, unboundedFollowing) is used by default. When ordering is defined, a growing window frame (rangeFrame, unboundedPreceding, currentRow) is used by default. Examples -------- >>> # ORDER BY date ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW >>> window = Window.orderBy("date").rowsBetween(Window.unboundedPreceding, Window.currentRow) >>> # PARTITION BY country ORDER BY date RANGE BETWEEN 3 PRECEDING AND 3 FOLLOWING >>> window = Window.orderBy("date").partitionBy("country").rangeBetween(-3, 3) """ _JAVA_MIN_LONG = -(1 << 63) # -9223372036854775808 _JAVA_MAX_LONG = (1 << 63) - 1 # 9223372036854775807 _PRECEDING_THRESHOLD = max(-sys.maxsize, _JAVA_MIN_LONG) _FOLLOWING_THRESHOLD = min(sys.maxsize, _JAVA_MAX_LONG) unboundedPreceding = _JAVA_MIN_LONG unboundedFollowing = _JAVA_MAX_LONG currentRow = 0 @staticmethod @since(1.4) def partitionBy(*cols): """ Creates a :class:`WindowSpec` with the partitioning defined. """ sc = SparkContext._active_spark_context jspec = sc._jvm.org.apache.spark.sql.expressions.Window.partitionBy(_to_java_cols(cols)) return WindowSpec(jspec) @staticmethod @since(1.4) def orderBy(*cols): """ Creates a :class:`WindowSpec` with the ordering defined. """ sc = SparkContext._active_spark_context jspec = sc._jvm.org.apache.spark.sql.expressions.Window.orderBy(_to_java_cols(cols)) return WindowSpec(jspec) @staticmethod def rowsBetween(start, end): """ Creates a :class:`WindowSpec` with the frame boundaries defined, from `start` (inclusive) to `end` (inclusive). Both `start` and `end` are relative positions from the current row. For example, "0" means "current row", while "-1" means the row before the current row, and "5" means the fifth row after the current row. We recommend users use ``Window.unboundedPreceding``, ``Window.unboundedFollowing``, and ``Window.currentRow`` to specify special boundary values, rather than using integral values directly. A row based boundary is based on the position of the row within the partition. An offset indicates the number of rows above or below the current row, the frame for the current row starts or ends. For instance, given a row based sliding frame with a lower bound offset of -1 and a upper bound offset of +2. The frame for row with index 5 would range from index 4 to index 7. .. versionadded:: 2.1.0 Parameters ---------- start : int boundary start, inclusive. The frame is unbounded if this is ``Window.unboundedPreceding``, or any value less than or equal to -9223372036854775808. end : int boundary end, inclusive. The frame is unbounded if this is ``Window.unboundedFollowing``, or any value greater than or equal to 9223372036854775807. Examples -------- >>> from pyspark.sql import Window >>> from pyspark.sql import functions as func >>> from pyspark.sql import SQLContext >>> sc = SparkContext.getOrCreate() >>> sqlContext = SQLContext(sc) >>> tup = [(1, "a"), (1, "a"), (2, "a"), (1, "b"), (2, "b"), (3, "b")] >>> df = sqlContext.createDataFrame(tup, ["id", "category"]) >>> window = Window.partitionBy("category").orderBy("id").rowsBetween(Window.currentRow, 1) >>> df.withColumn("sum", func.sum("id").over(window)).sort("id", "category", "sum").show() +---+--------+---+ | id|category|sum| +---+--------+---+ | 1| a| 2| | 1| a| 3| | 1| b| 3| | 2| a| 2| | 2| b| 5| | 3| b| 3| +---+--------+---+ """ if start <= Window._PRECEDING_THRESHOLD: start = Window.unboundedPreceding if end >= Window._FOLLOWING_THRESHOLD: end = Window.unboundedFollowing sc = SparkContext._active_spark_context jspec = sc._jvm.org.apache.spark.sql.expressions.Window.rowsBetween(start, end) return WindowSpec(jspec) @staticmethod def rangeBetween(start, end): """ Creates a :class:`WindowSpec` with the frame boundaries defined, from `start` (inclusive) to `end` (inclusive). Both `start` and `end` are relative from the current row. For example, "0" means "current row", while "-1" means one off before the current row, and "5" means the five off after the current row. We recommend users use ``Window.unboundedPreceding``, ``Window.unboundedFollowing``, and ``Window.currentRow`` to specify special boundary values, rather than using integral values directly. A range-based boundary is based on the actual value of the ORDER BY expression(s). An offset is used to alter the value of the ORDER BY expression, for instance if the current ORDER BY expression has a value of 10 and the lower bound offset is -3, the resulting lower bound for the current row will be 10 - 3 = 7. This however puts a number of constraints on the ORDER BY expressions: there can be only one expression and this expression must have a numerical data type. An exception can be made when the offset is unbounded, because no value modification is needed, in this case multiple and non-numeric ORDER BY expression are allowed. .. versionadded:: 2.1.0 Parameters ---------- start : int boundary start, inclusive. The frame is unbounded if this is ``Window.unboundedPreceding``, or any value less than or equal to max(-sys.maxsize, -9223372036854775808). end : int boundary end, inclusive. The frame is unbounded if this is ``Window.unboundedFollowing``, or any value greater than or equal to min(sys.maxsize, 9223372036854775807). Examples -------- >>> from pyspark.sql import Window >>> from pyspark.sql import functions as func >>> from pyspark.sql import SQLContext >>> sc = SparkContext.getOrCreate() >>> sqlContext = SQLContext(sc) >>> tup = [(1, "a"), (1, "a"), (2, "a"), (1, "b"), (2, "b"), (3, "b")] >>> df = sqlContext.createDataFrame(tup, ["id", "category"]) >>> window = Window.partitionBy("category").orderBy("id").rangeBetween(Window.currentRow, 1) >>> df.withColumn("sum", func.sum("id").over(window)).sort("id", "category").show() +---+--------+---+ | id|category|sum| +---+--------+---+ | 1| a| 4| | 1| a| 4| | 1| b| 3| | 2| a| 2| | 2| b| 5| | 3| b| 3| +---+--------+---+ """ if start <= Window._PRECEDING_THRESHOLD: start = Window.unboundedPreceding if end >= Window._FOLLOWING_THRESHOLD: end = Window.unboundedFollowing sc = SparkContext._active_spark_context jspec = sc._jvm.org.apache.spark.sql.expressions.Window.rangeBetween(start, end) return WindowSpec(jspec) class WindowSpec(object): """ A window specification that defines the partitioning, ordering, and frame boundaries. Use the static methods in :class:`Window` to create a :class:`WindowSpec`. .. versionadded:: 1.4.0 """ def __init__(self, jspec): self._jspec = jspec def partitionBy(self, *cols): """ Defines the partitioning columns in a :class:`WindowSpec`. .. versionadded:: 1.4.0 Parameters ---------- cols : str, :class:`Column` or list names of columns or expressions """ return WindowSpec(self._jspec.partitionBy(_to_java_cols(cols))) def orderBy(self, *cols): """ Defines the ordering columns in a :class:`WindowSpec`. .. versionadded:: 1.4.0 Parameters ---------- cols : str, :class:`Column` or list names of columns or expressions """ return WindowSpec(self._jspec.orderBy(_to_java_cols(cols))) def rowsBetween(self, start, end): """ Defines the frame boundaries, from `start` (inclusive) to `end` (inclusive). Both `start` and `end` are relative positions from the current row. For example, "0" means "current row", while "-1" means the row before the current row, and "5" means the fifth row after the current row. We recommend users use ``Window.unboundedPreceding``, ``Window.unboundedFollowing``, and ``Window.currentRow`` to specify special boundary values, rather than using integral values directly. .. versionadded:: 1.4.0 Parameters ---------- start : int boundary start, inclusive. The frame is unbounded if this is ``Window.unboundedPreceding``, or any value less than or equal to max(-sys.maxsize, -9223372036854775808). end : int boundary end, inclusive. The frame is unbounded if this is ``Window.unboundedFollowing``, or any value greater than or equal to min(sys.maxsize, 9223372036854775807). """ if start <= Window._PRECEDING_THRESHOLD: start = Window.unboundedPreceding if end >= Window._FOLLOWING_THRESHOLD: end = Window.unboundedFollowing return WindowSpec(self._jspec.rowsBetween(start, end)) def rangeBetween(self, start, end): """ Defines the frame boundaries, from `start` (inclusive) to `end` (inclusive). Both `start` and `end` are relative from the current row. For example, "0" means "current row", while "-1" means one off before the current row, and "5" means the five off after the current row. We recommend users use ``Window.unboundedPreceding``, ``Window.unboundedFollowing``, and ``Window.currentRow`` to specify special boundary values, rather than using integral values directly. .. versionadded:: 1.4.0 Parameters ---------- start : int boundary start, inclusive. The frame is unbounded if this is ``Window.unboundedPreceding``, or any value less than or equal to max(-sys.maxsize, -9223372036854775808). end : int boundary end, inclusive. The frame is unbounded if this is ``Window.unboundedFollowing``, or any value greater than or equal to min(sys.maxsize, 9223372036854775807). """ if start <= Window._PRECEDING_THRESHOLD: start = Window.unboundedPreceding if end >= Window._FOLLOWING_THRESHOLD: end = Window.unboundedFollowing return WindowSpec(self._jspec.rangeBetween(start, end)) def _test(): import doctest import pyspark.sql.window SparkContext('local[4]', 'PythonTest') globs = pyspark.sql.window.__dict__.copy() (failure_count, test_count) = doctest.testmod( pyspark.sql.window, globs=globs, optionflags=doctest.NORMALIZE_WHITESPACE) if failure_count: sys.exit(-1) if __name__ == "__main__": _test()
apache-2.0
yencarnacion/jaikuengine
.google_appengine/lib/django-1.5/tests/regressiontests/admin_util/models.py
115
1303
from django.db import models from django.utils import six from django.utils.encoding import python_2_unicode_compatible class Article(models.Model): """ A simple Article model for testing """ site = models.ForeignKey('sites.Site', related_name="admin_articles") title = models.CharField(max_length=100) title2 = models.CharField(max_length=100, verbose_name="another name") created = models.DateTimeField() def test_from_model(self): return "nothing" def test_from_model_with_override(self): return "nothing" test_from_model_with_override.short_description = "not What you Expect" @python_2_unicode_compatible class Count(models.Model): num = models.PositiveSmallIntegerField() parent = models.ForeignKey('self', null=True) def __str__(self): return six.text_type(self.num) class Event(models.Model): date = models.DateTimeField(auto_now_add=True) class Location(models.Model): event = models.OneToOneField(Event, verbose_name='awesome event') class Guest(models.Model): event = models.OneToOneField(Event) name = models.CharField(max_length=255) class Meta: verbose_name = "awesome guest" class EventGuide(models.Model): event = models.ForeignKey(Event, on_delete=models.DO_NOTHING)
apache-2.0
Clyde-fare/scikit-learn
sklearn/metrics/ranking.py
79
25426
"""Metrics to assess performance on classification task given scores Functions named as ``*_score`` return a scalar value to maximize: the higher the better Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize: the lower the better """ # Authors: Alexandre Gramfort <[email protected]> # Mathieu Blondel <[email protected]> # Olivier Grisel <[email protected]> # Arnaud Joly <[email protected]> # Jochen Wersdorfer <[email protected]> # Lars Buitinck <[email protected]> # Joel Nothman <[email protected]> # Noel Dawe <[email protected]> # License: BSD 3 clause from __future__ import division import warnings import numpy as np from scipy.sparse import csr_matrix from ..utils import check_consistent_length from ..utils import column_or_1d, check_array from ..utils.multiclass import type_of_target from ..utils.fixes import isclose from ..utils.fixes import bincount from ..utils.stats import rankdata from ..utils.sparsefuncs import count_nonzero from .base import _average_binary_score from .base import UndefinedMetricWarning def auc(x, y, reorder=False): """Compute Area Under the Curve (AUC) using the trapezoidal rule This is a general function, given points on a curve. For computing the area under the ROC-curve, see :func:`roc_auc_score`. Parameters ---------- x : array, shape = [n] x coordinates. y : array, shape = [n] y coordinates. reorder : boolean, optional (default=False) If True, assume that the curve is ascending in the case of ties, as for an ROC curve. If the curve is non-ascending, the result will be wrong. Returns ------- auc : float Examples -------- >>> import numpy as np >>> from sklearn import metrics >>> y = np.array([1, 1, 2, 2]) >>> pred = np.array([0.1, 0.4, 0.35, 0.8]) >>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2) >>> metrics.auc(fpr, tpr) 0.75 See also -------- roc_auc_score : Computes the area under the ROC curve precision_recall_curve : Compute precision-recall pairs for different probability thresholds """ check_consistent_length(x, y) x = column_or_1d(x) y = column_or_1d(y) if x.shape[0] < 2: raise ValueError('At least 2 points are needed to compute' ' area under curve, but x.shape = %s' % x.shape) direction = 1 if reorder: # reorder the data points according to the x axis and using y to # break ties order = np.lexsort((y, x)) x, y = x[order], y[order] else: dx = np.diff(x) if np.any(dx < 0): if np.all(dx <= 0): direction = -1 else: raise ValueError("Reordering is not turned on, and " "the x array is not increasing: %s" % x) area = direction * np.trapz(y, x) return area def average_precision_score(y_true, y_score, average="macro", sample_weight=None): """Compute average precision (AP) from prediction scores This score corresponds to the area under the precision-recall curve. Note: this implementation is restricted to the binary classification task or multilabel classification task. Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`. Parameters ---------- y_true : array, shape = [n_samples] or [n_samples, n_classes] True binary labels in binary label indicators. y_score : array, shape = [n_samples] or [n_samples, n_classes] Target scores, can either be probability estimates of the positive class, confidence values, or binary decisions. average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted'] If ``None``, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data: ``'micro'``: Calculate metrics globally by considering each element of the label indicator matrix as a label. ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. ``'weighted'``: Calculate metrics for each label, and find their average, weighted by support (the number of true instances for each label). ``'samples'``: Calculate metrics for each instance, and find their average. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- average_precision : float References ---------- .. [1] `Wikipedia entry for the Average precision <http://en.wikipedia.org/wiki/Average_precision>`_ See also -------- roc_auc_score : Area under the ROC curve precision_recall_curve : Compute precision-recall pairs for different probability thresholds Examples -------- >>> import numpy as np >>> from sklearn.metrics import average_precision_score >>> y_true = np.array([0, 0, 1, 1]) >>> y_scores = np.array([0.1, 0.4, 0.35, 0.8]) >>> average_precision_score(y_true, y_scores) # doctest: +ELLIPSIS 0.79... """ def _binary_average_precision(y_true, y_score, sample_weight=None): precision, recall, thresholds = precision_recall_curve( y_true, y_score, sample_weight=sample_weight) return auc(recall, precision) return _average_binary_score(_binary_average_precision, y_true, y_score, average, sample_weight=sample_weight) def roc_auc_score(y_true, y_score, average="macro", sample_weight=None): """Compute Area Under the Curve (AUC) from prediction scores Note: this implementation is restricted to the binary classification task or multilabel classification task in label indicator format. Read more in the :ref:`User Guide <roc_metrics>`. Parameters ---------- y_true : array, shape = [n_samples] or [n_samples, n_classes] True binary labels in binary label indicators. y_score : array, shape = [n_samples] or [n_samples, n_classes] Target scores, can either be probability estimates of the positive class, confidence values, or binary decisions. average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted'] If ``None``, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data: ``'micro'``: Calculate metrics globally by considering each element of the label indicator matrix as a label. ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. ``'weighted'``: Calculate metrics for each label, and find their average, weighted by support (the number of true instances for each label). ``'samples'``: Calculate metrics for each instance, and find their average. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- auc : float References ---------- .. [1] `Wikipedia entry for the Receiver operating characteristic <http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_ See also -------- average_precision_score : Area under the precision-recall curve roc_curve : Compute Receiver operating characteristic (ROC) Examples -------- >>> import numpy as np >>> from sklearn.metrics import roc_auc_score >>> y_true = np.array([0, 0, 1, 1]) >>> y_scores = np.array([0.1, 0.4, 0.35, 0.8]) >>> roc_auc_score(y_true, y_scores) 0.75 """ def _binary_roc_auc_score(y_true, y_score, sample_weight=None): if len(np.unique(y_true)) != 2: raise ValueError("Only one class present in y_true. ROC AUC score " "is not defined in that case.") fpr, tpr, tresholds = roc_curve(y_true, y_score, sample_weight=sample_weight) return auc(fpr, tpr, reorder=True) return _average_binary_score( _binary_roc_auc_score, y_true, y_score, average, sample_weight=sample_weight) def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None): """Calculate true and false positives per binary classification threshold. Parameters ---------- y_true : array, shape = [n_samples] True targets of binary classification y_score : array, shape = [n_samples] Estimated probabilities or decision function pos_label : int, optional (default=None) The label of the positive class sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- fps : array, shape = [n_thresholds] A count of false positives, at index i being the number of negative samples assigned a score >= thresholds[i]. The total number of negative samples is equal to fps[-1] (thus true negatives are given by fps[-1] - fps). tps : array, shape = [n_thresholds <= len(np.unique(y_score))] An increasing count of true positives, at index i being the number of positive samples assigned a score >= thresholds[i]. The total number of positive samples is equal to tps[-1] (thus false negatives are given by tps[-1] - tps). thresholds : array, shape = [n_thresholds] Decreasing score values. """ check_consistent_length(y_true, y_score) y_true = column_or_1d(y_true) y_score = column_or_1d(y_score) if sample_weight is not None: sample_weight = column_or_1d(sample_weight) # ensure binary classification if pos_label is not specified classes = np.unique(y_true) if (pos_label is None and not (np.all(classes == [0, 1]) or np.all(classes == [-1, 1]) or np.all(classes == [0]) or np.all(classes == [-1]) or np.all(classes == [1]))): raise ValueError("Data is not binary and pos_label is not specified") elif pos_label is None: pos_label = 1. # make y_true a boolean vector y_true = (y_true == pos_label) # sort scores and corresponding truth values desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1] y_score = y_score[desc_score_indices] y_true = y_true[desc_score_indices] if sample_weight is not None: weight = sample_weight[desc_score_indices] else: weight = 1. # y_score typically has many tied values. Here we extract # the indices associated with the distinct values. We also # concatenate a value for the end of the curve. # We need to use isclose to avoid spurious repeated thresholds # stemming from floating point roundoff errors. distinct_value_indices = np.where(np.logical_not(isclose( np.diff(y_score), 0)))[0] threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1] # accumulate the true positives with decreasing threshold tps = (y_true * weight).cumsum()[threshold_idxs] if sample_weight is not None: fps = weight.cumsum()[threshold_idxs] - tps else: fps = 1 + threshold_idxs - tps return fps, tps, y_score[threshold_idxs] def precision_recall_curve(y_true, probas_pred, pos_label=None, sample_weight=None): """Compute precision-recall pairs for different probability thresholds Note: this implementation is restricted to the binary classification task. The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of true positives and ``fp`` the number of false positives. The precision is intuitively the ability of the classifier not to label as positive a sample that is negative. The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of true positives and ``fn`` the number of false negatives. The recall is intuitively the ability of the classifier to find all the positive samples. The last precision and recall values are 1. and 0. respectively and do not have a corresponding threshold. This ensures that the graph starts on the x axis. Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`. Parameters ---------- y_true : array, shape = [n_samples] True targets of binary classification in range {-1, 1} or {0, 1}. probas_pred : array, shape = [n_samples] Estimated probabilities or decision function. pos_label : int, optional (default=None) The label of the positive class sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- precision : array, shape = [n_thresholds + 1] Precision values such that element i is the precision of predictions with score >= thresholds[i] and the last element is 1. recall : array, shape = [n_thresholds + 1] Decreasing recall values such that element i is the recall of predictions with score >= thresholds[i] and the last element is 0. thresholds : array, shape = [n_thresholds <= len(np.unique(probas_pred))] Increasing thresholds on the decision function used to compute precision and recall. Examples -------- >>> import numpy as np >>> from sklearn.metrics import precision_recall_curve >>> y_true = np.array([0, 0, 1, 1]) >>> y_scores = np.array([0.1, 0.4, 0.35, 0.8]) >>> precision, recall, thresholds = precision_recall_curve( ... y_true, y_scores) >>> precision # doctest: +ELLIPSIS array([ 0.66..., 0.5 , 1. , 1. ]) >>> recall array([ 1. , 0.5, 0.5, 0. ]) >>> thresholds array([ 0.35, 0.4 , 0.8 ]) """ fps, tps, thresholds = _binary_clf_curve(y_true, probas_pred, pos_label=pos_label, sample_weight=sample_weight) precision = tps / (tps + fps) recall = tps / tps[-1] # stop when full recall attained # and reverse the outputs so recall is decreasing last_ind = tps.searchsorted(tps[-1]) sl = slice(last_ind, None, -1) return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl] def roc_curve(y_true, y_score, pos_label=None, sample_weight=None): """Compute Receiver operating characteristic (ROC) Note: this implementation is restricted to the binary classification task. Read more in the :ref:`User Guide <roc_metrics>`. Parameters ---------- y_true : array, shape = [n_samples] True binary labels in range {0, 1} or {-1, 1}. If labels are not binary, pos_label should be explicitly given. y_score : array, shape = [n_samples] Target scores, can either be probability estimates of the positive class or confidence values. pos_label : int Label considered as positive and others are considered negative. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- fpr : array, shape = [>2] Increasing false positive rates such that element i is the false positive rate of predictions with score >= thresholds[i]. tpr : array, shape = [>2] Increasing true positive rates such that element i is the true positive rate of predictions with score >= thresholds[i]. thresholds : array, shape = [n_thresholds] Decreasing thresholds on the decision function used to compute fpr and tpr. `thresholds[0]` represents no instances being predicted and is arbitrarily set to `max(y_score) + 1`. See also -------- roc_auc_score : Compute Area Under the Curve (AUC) from prediction scores Notes ----- Since the thresholds are sorted from low to high values, they are reversed upon returning them to ensure they correspond to both ``fpr`` and ``tpr``, which are sorted in reversed order during their calculation. References ---------- .. [1] `Wikipedia entry for the Receiver operating characteristic <http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_ Examples -------- >>> import numpy as np >>> from sklearn import metrics >>> y = np.array([1, 1, 2, 2]) >>> scores = np.array([0.1, 0.4, 0.35, 0.8]) >>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2) >>> fpr array([ 0. , 0.5, 0.5, 1. ]) >>> tpr array([ 0.5, 0.5, 1. , 1. ]) >>> thresholds array([ 0.8 , 0.4 , 0.35, 0.1 ]) """ fps, tps, thresholds = _binary_clf_curve( y_true, y_score, pos_label=pos_label, sample_weight=sample_weight) if tps.size == 0 or fps[0] != 0: # Add an extra threshold position if necessary tps = np.r_[0, tps] fps = np.r_[0, fps] thresholds = np.r_[thresholds[0] + 1, thresholds] if fps[-1] <= 0: warnings.warn("No negative samples in y_true, " "false positive value should be meaningless", UndefinedMetricWarning) fpr = np.repeat(np.nan, fps.shape) else: fpr = fps / fps[-1] if tps[-1] <= 0: warnings.warn("No positive samples in y_true, " "true positive value should be meaningless", UndefinedMetricWarning) tpr = np.repeat(np.nan, tps.shape) else: tpr = tps / tps[-1] return fpr, tpr, thresholds def label_ranking_average_precision_score(y_true, y_score): """Compute ranking-based average precision Label ranking average precision (LRAP) is the average over each ground truth label assigned to each sample, of the ratio of true vs. total labels with lower score. This metric is used in multilabel ranking problem, where the goal is to give better rank to the labels associated to each sample. The obtained score is always strictly greater than 0 and the best value is 1. Read more in the :ref:`User Guide <label_ranking_average_precision>`. Parameters ---------- y_true : array or sparse matrix, shape = [n_samples, n_labels] True binary labels in binary indicator format. y_score : array, shape = [n_samples, n_labels] Target scores, can either be probability estimates of the positive class, confidence values, or binary decisions. Returns ------- score : float Examples -------- >>> import numpy as np >>> from sklearn.metrics import label_ranking_average_precision_score >>> y_true = np.array([[1, 0, 0], [0, 0, 1]]) >>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]]) >>> label_ranking_average_precision_score(y_true, y_score) \ # doctest: +ELLIPSIS 0.416... """ check_consistent_length(y_true, y_score) y_true = check_array(y_true, ensure_2d=False) y_score = check_array(y_score, ensure_2d=False) if y_true.shape != y_score.shape: raise ValueError("y_true and y_score have different shape") # Handle badly formated array and the degenerate case with one label y_type = type_of_target(y_true) if (y_type != "multilabel-indicator" and not (y_type == "binary" and y_true.ndim == 2)): raise ValueError("{0} format is not supported".format(y_type)) y_true = csr_matrix(y_true) y_score = -y_score n_samples, n_labels = y_true.shape out = 0. for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])): relevant = y_true.indices[start:stop] if (relevant.size == 0 or relevant.size == n_labels): # If all labels are relevant or unrelevant, the score is also # equal to 1. The label ranking has no meaning. out += 1. continue scores_i = y_score[i] rank = rankdata(scores_i, 'max')[relevant] L = rankdata(scores_i[relevant], 'max') out += (L / rank).mean() return out / n_samples def coverage_error(y_true, y_score, sample_weight=None): """Coverage error measure Compute how far we need to go through the ranked scores to cover all true labels. The best value is equal to the average number of labels in ``y_true`` per sample. Ties in ``y_scores`` are broken by giving maximal rank that would have been assigned to all tied values. Read more in the :ref:`User Guide <coverage_error>`. Parameters ---------- y_true : array, shape = [n_samples, n_labels] True binary labels in binary indicator format. y_score : array, shape = [n_samples, n_labels] Target scores, can either be probability estimates of the positive class, confidence values, or binary decisions. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- coverage_error : float References ---------- .. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010). Mining multi-label data. In Data mining and knowledge discovery handbook (pp. 667-685). Springer US. """ y_true = check_array(y_true, ensure_2d=False) y_score = check_array(y_score, ensure_2d=False) check_consistent_length(y_true, y_score, sample_weight) y_type = type_of_target(y_true) if y_type != "multilabel-indicator": raise ValueError("{0} format is not supported".format(y_type)) if y_true.shape != y_score.shape: raise ValueError("y_true and y_score have different shape") y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true)) y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1)) coverage = (y_score >= y_min_relevant).sum(axis=1) coverage = coverage.filled(0) return np.average(coverage, weights=sample_weight) def label_ranking_loss(y_true, y_score, sample_weight=None): """Compute Ranking loss measure Compute the average number of label pairs that are incorrectly ordered given y_score weighted by the size of the label set and the number of labels not in the label set. This is similar to the error set size, but weighted by the number of relevant and irrelevant labels. The best performance is achieved with a ranking loss of zero. Read more in the :ref:`User Guide <label_ranking_loss>`. Parameters ---------- y_true : array or sparse matrix, shape = [n_samples, n_labels] True binary labels in binary indicator format. y_score : array, shape = [n_samples, n_labels] Target scores, can either be probability estimates of the positive class, confidence values, or binary decisions. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- loss : float References ---------- .. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010). Mining multi-label data. In Data mining and knowledge discovery handbook (pp. 667-685). Springer US. """ y_true = check_array(y_true, ensure_2d=False, accept_sparse='csr') y_score = check_array(y_score, ensure_2d=False) check_consistent_length(y_true, y_score, sample_weight) y_type = type_of_target(y_true) if y_type not in ("multilabel-indicator",): raise ValueError("{0} format is not supported".format(y_type)) if y_true.shape != y_score.shape: raise ValueError("y_true and y_score have different shape") n_samples, n_labels = y_true.shape y_true = csr_matrix(y_true) loss = np.zeros(n_samples) for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])): # Sort and bin the label scores unique_scores, unique_inverse = np.unique(y_score[i], return_inverse=True) true_at_reversed_rank = bincount( unique_inverse[y_true.indices[start:stop]], minlength=len(unique_scores)) all_at_reversed_rank = bincount(unique_inverse, minlength=len(unique_scores)) false_at_reversed_rank = all_at_reversed_rank - true_at_reversed_rank # if the scores are ordered, it's possible to count the number of # incorrectly ordered paires in linear time by cumulatively counting # how many false labels of a given score have a score higher than the # accumulated true labels with lower score. loss[i] = np.dot(true_at_reversed_rank.cumsum(), false_at_reversed_rank) n_positives = count_nonzero(y_true, axis=1) with np.errstate(divide="ignore", invalid="ignore"): loss /= ((n_labels - n_positives) * n_positives) # When there is no positive or no negative labels, those values should # be consider as correct, i.e. the ranking doesn't matter. loss[np.logical_or(n_positives == 0, n_positives == n_labels)] = 0. return np.average(loss, weights=sample_weight)
bsd-3-clause
GunnerJnr/_CodeInstitute
Stream-3/Full-Stack-Development/19.Djangos-Testing-Framework/4.How-To-Test-Models/we_are_social/accounts/forms.py
8
1889
from django import forms from django.contrib.auth.forms import UserCreationForm from django.core.exceptions import ValidationError from accounts.models import User class UserRegistrationForm(UserCreationForm): MONTH_ABBREVIATIONS = [ 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'June', 'July', 'Aug', 'Sept', 'Oct', 'Nov', 'Dec' ] MONTH_CHOICES = list(enumerate(MONTH_ABBREVIATIONS, 1)) YEAR_CHOICES = [(i, i) for i in range(2017, 2038)] credit_card_number = forms.CharField(label='Credit Card Number') cvv = forms.CharField(label='Security Code (CVV)') expiry_month = forms.ChoiceField(label="Month", choices=MONTH_CHOICES) expiry_year = forms.ChoiceField(label="Year", choices=YEAR_CHOICES) stripe_id = forms.CharField(widget=forms.HiddenInput) password1 = forms.CharField( label='Password', widget=forms.PasswordInput ) password2 = forms.CharField( label='Password Confirmation', widget=forms.PasswordInput ) class Meta: model = User fields = ['email', 'password1', 'password2', 'stripe_id'] exclude = ['username'] def clean_password2(self): password1 = self.cleaned_data.get('password1') password2 = self.cleaned_data.get('password2') if password1 and password2 and password1 != password2: message = "Passwords do not match" raise ValidationError(message) return password2 def save(self, commit=True): instance = super(UserRegistrationForm, self).save(commit=False) # automatically set to email address to create a unique identifier instance.username = instance.email if commit: instance.save() return instance class UserLoginForm(forms.Form): email = forms.EmailField() password = forms.CharField(widget=forms.PasswordInput)
mit
atlassian/boto
tests/integration/ec2/elb/test_connection.py
100
12554
# Copyright (c) 2010 Hunter Blanks http://artifex.org/~hblanks/ # All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. """ Initial, and very limited, unit tests for ELBConnection. """ import boto import time from tests.compat import unittest from boto.ec2.elb import ELBConnection import boto.ec2.elb class ELBConnectionTest(unittest.TestCase): ec2 = True def setUp(self): """Creates a named load balancer that can be safely deleted at the end of each test""" self.conn = ELBConnection() self.name = 'elb-boto-unit-test' self.availability_zones = ['us-east-1a'] self.listeners = [(80, 8000, 'HTTP')] self.balancer = self.conn.create_load_balancer( self.name, self.availability_zones, self.listeners) # S3 bucket for log tests self.s3 = boto.connect_s3() self.timestamp = str(int(time.time())) self.bucket_name = 'boto-elb-%s' % self.timestamp self.bucket = self.s3.create_bucket(self.bucket_name) self.bucket.set_canned_acl('public-read-write') self.addCleanup(self.cleanup_bucket, self.bucket) def cleanup_bucket(self, bucket): for key in bucket.get_all_keys(): key.delete() bucket.delete() def tearDown(self): """ Deletes the test load balancer after every test. It does not delete EVERY load balancer in your account""" self.balancer.delete() def test_build_list_params(self): params = {} self.conn.build_list_params( params, ['thing1', 'thing2', 'thing3'], 'ThingName%d') expected_params = { 'ThingName1': 'thing1', 'ThingName2': 'thing2', 'ThingName3': 'thing3' } self.assertEqual(params, expected_params) # TODO: for these next tests, consider sleeping until our load # balancer comes up, then testing for connectivity to # balancer.dns_name, along the lines of the existing EC2 unit tests. def test_create_load_balancer(self): self.assertEqual(self.balancer.name, self.name) self.assertEqual(self.balancer.availability_zones, self.availability_zones) self.assertEqual(self.balancer.listeners, self.listeners) balancers = self.conn.get_all_load_balancers() self.assertEqual([lb.name for lb in balancers], [self.name]) def test_create_load_balancer_listeners(self): more_listeners = [(443, 8001, 'HTTP')] self.conn.create_load_balancer_listeners(self.name, more_listeners) balancers = self.conn.get_all_load_balancers() self.assertEqual([lb.name for lb in balancers], [self.name]) self.assertEqual( sorted(l.get_tuple() for l in balancers[0].listeners), sorted(self.listeners + more_listeners) ) def test_delete_load_balancer_listeners(self): mod_listeners = [(80, 8000, 'HTTP'), (443, 8001, 'HTTP')] mod_name = self.name + "-mod" self.mod_balancer = self.conn.create_load_balancer( mod_name, self.availability_zones, mod_listeners) mod_balancers = self.conn.get_all_load_balancers( load_balancer_names=[mod_name]) self.assertEqual([lb.name for lb in mod_balancers], [mod_name]) self.assertEqual( sorted([l.get_tuple() for l in mod_balancers[0].listeners]), sorted(mod_listeners)) self.conn.delete_load_balancer_listeners(self.mod_balancer.name, [443]) mod_balancers = self.conn.get_all_load_balancers( load_balancer_names=[mod_name]) self.assertEqual([lb.name for lb in mod_balancers], [mod_name]) self.assertEqual([l.get_tuple() for l in mod_balancers[0].listeners], mod_listeners[:1]) self.mod_balancer.delete() def test_create_load_balancer_listeners_with_policies(self): more_listeners = [(443, 8001, 'HTTP')] self.conn.create_load_balancer_listeners(self.name, more_listeners) lb_policy_name = 'lb-policy' self.conn.create_lb_cookie_stickiness_policy( 1000, self.name, lb_policy_name) self.conn.set_lb_policies_of_listener( self.name, self.listeners[0][0], lb_policy_name) app_policy_name = 'app-policy' self.conn.create_app_cookie_stickiness_policy( 'appcookie', self.name, app_policy_name) self.conn.set_lb_policies_of_listener( self.name, more_listeners[0][0], app_policy_name) balancers = self.conn.get_all_load_balancers( load_balancer_names=[self.name]) self.assertEqual([lb.name for lb in balancers], [self.name]) self.assertEqual( sorted(l.get_tuple() for l in balancers[0].listeners), sorted(self.listeners + more_listeners) ) # Policy names should be checked here once they are supported # in the Listener object. def test_create_load_balancer_backend_with_policies(self): other_policy_name = 'enable-proxy-protocol' backend_port = 8081 self.conn.create_lb_policy( self.name, other_policy_name, 'ProxyProtocolPolicyType', {'ProxyProtocol': True}) self.conn.set_lb_policies_of_backend_server( self.name, backend_port, [other_policy_name]) balancers = self.conn.get_all_load_balancers( load_balancer_names=[self.name]) self.assertEqual([lb.name for lb in balancers], [self.name]) self.assertEqual(len(balancers[0].policies.other_policies), 1) self.assertEqual(balancers[0].policies.other_policies[0].policy_name, other_policy_name) self.assertEqual(len(balancers[0].backends), 1) self.assertEqual(balancers[0].backends[0].instance_port, backend_port) self.assertEqual(balancers[0].backends[0].policies[0].policy_name, other_policy_name) self.conn.set_lb_policies_of_backend_server(self.name, backend_port, []) balancers = self.conn.get_all_load_balancers( load_balancer_names=[self.name]) self.assertEqual([lb.name for lb in balancers], [self.name]) self.assertEqual(len(balancers[0].policies.other_policies), 1) self.assertEqual(len(balancers[0].backends), 0) def test_create_load_balancer_complex_listeners(self): complex_listeners = [ (8080, 80, 'HTTP', 'HTTP'), (2525, 25, 'TCP', 'TCP'), ] self.conn.create_load_balancer_listeners( self.name, complex_listeners=complex_listeners ) balancers = self.conn.get_all_load_balancers( load_balancer_names=[self.name] ) self.assertEqual([lb.name for lb in balancers], [self.name]) self.assertEqual( sorted(l.get_complex_tuple() for l in balancers[0].listeners), # We need an extra 'HTTP' here over what ``self.listeners`` uses. sorted([(80, 8000, 'HTTP', 'HTTP')] + complex_listeners) ) def test_load_balancer_access_log(self): attributes = self.balancer.get_attributes() self.assertEqual(False, attributes.access_log.enabled) attributes.access_log.enabled = True attributes.access_log.s3_bucket_name = self.bucket_name attributes.access_log.s3_bucket_prefix = 'access-logs' attributes.access_log.emit_interval = 5 self.conn.modify_lb_attribute(self.balancer.name, 'accessLog', attributes.access_log) new_attributes = self.balancer.get_attributes() self.assertEqual(True, new_attributes.access_log.enabled) self.assertEqual(self.bucket_name, new_attributes.access_log.s3_bucket_name) self.assertEqual('access-logs', new_attributes.access_log.s3_bucket_prefix) self.assertEqual(5, new_attributes.access_log.emit_interval) def test_load_balancer_get_attributes(self): attributes = self.balancer.get_attributes() connection_draining = self.conn.get_lb_attribute(self.balancer.name, 'ConnectionDraining') self.assertEqual(connection_draining.enabled, attributes.connection_draining.enabled) self.assertEqual(connection_draining.timeout, attributes.connection_draining.timeout) access_log = self.conn.get_lb_attribute(self.balancer.name, 'AccessLog') self.assertEqual(access_log.enabled, attributes.access_log.enabled) self.assertEqual(access_log.s3_bucket_name, attributes.access_log.s3_bucket_name) self.assertEqual(access_log.s3_bucket_prefix, attributes.access_log.s3_bucket_prefix) self.assertEqual(access_log.emit_interval, attributes.access_log.emit_interval) cross_zone_load_balancing = self.conn.get_lb_attribute( self.balancer.name, 'CrossZoneLoadBalancing') self.assertEqual(cross_zone_load_balancing, attributes.cross_zone_load_balancing.enabled) def change_and_verify_load_balancer_connection_draining( self, enabled, timeout=None): attributes = self.balancer.get_attributes() attributes.connection_draining.enabled = enabled if timeout is not None: attributes.connection_draining.timeout = timeout self.conn.modify_lb_attribute( self.balancer.name, 'ConnectionDraining', attributes.connection_draining) attributes = self.balancer.get_attributes() self.assertEqual(enabled, attributes.connection_draining.enabled) if timeout is not None: self.assertEqual(timeout, attributes.connection_draining.timeout) def test_load_balancer_connection_draining_config(self): self.change_and_verify_load_balancer_connection_draining(True, 128) self.change_and_verify_load_balancer_connection_draining(True, 256) self.change_and_verify_load_balancer_connection_draining(False) self.change_and_verify_load_balancer_connection_draining(True, 64) def test_set_load_balancer_policies_of_listeners(self): more_listeners = [(443, 8001, 'HTTP')] self.conn.create_load_balancer_listeners(self.name, more_listeners) lb_policy_name = 'lb-policy' self.conn.create_lb_cookie_stickiness_policy( 1000, self.name, lb_policy_name ) self.conn.set_lb_policies_of_listener( self.name, self.listeners[0][0], lb_policy_name ) # Try to remove the policy by passing empty list. # http://docs.aws.amazon.com/ElasticLoadBalancing/latest/APIReference/API_SetLoadBalancerPoliciesOfListener.html # documents this as the way to remove policies. self.conn.set_lb_policies_of_listener( self.name, self.listeners[0][0], [] ) def test_can_make_sigv4_call(self): connection = boto.ec2.elb.connect_to_region('eu-central-1') lbs = connection.get_all_load_balancers() self.assertTrue(isinstance(lbs, list)) if __name__ == '__main__': unittest.main()
mit
suiyuan2009/tensorflow
tensorflow/python/kernel_tests/sparse_tensors_map_ops_test.py
134
9952
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for SparseTensorsMap.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.client import session from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib from tensorflow.python.ops import array_ops from tensorflow.python.ops import sparse_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test # pylint: disable=protected-access add_sparse_to_tensors_map = sparse_ops._add_sparse_to_tensors_map add_many_sparse_to_tensors_map = sparse_ops._add_many_sparse_to_tensors_map take_many_sparse_from_tensors_map = ( sparse_ops._take_many_sparse_from_tensors_map) # pylint: enable=protected-access class SparseTensorsMapTest(test.TestCase): def _SparseTensorPlaceholder(self, dtype=None): if dtype is None: dtype = dtypes.int32 return sparse_tensor_lib.SparseTensor( array_ops.placeholder(dtypes.int64), array_ops.placeholder(dtype), array_ops.placeholder(dtypes.int64)) def _SparseTensorValue_5x6(self, permutation): ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]]).astype(np.int64) val = np.array([0, 10, 13, 14, 32, 33]).astype(np.int32) ind = ind[permutation] val = val[permutation] shape = np.array([5, 6]).astype(np.int64) return sparse_tensor_lib.SparseTensorValue(ind, val, shape) def _SparseTensorValue_3x4(self, permutation): ind = np.array([[0, 0], [1, 0], [1, 2], [1, 3], [2, 2], [2, 3]]).astype(np.int64) val = np.array([0, 10, 13, 14, 32, 33]).astype(np.int32) ind = ind[permutation] val = val[permutation] shape = np.array([3, 4]).astype(np.int64) return sparse_tensor_lib.SparseTensorValue(ind, val, shape) def _SparseTensorValue_1x1x1(self): ind = np.array([[0, 0, 0]]).astype(np.int64) val = np.array([0]).astype(np.int32) shape = np.array([3, 4, 5]).astype(np.int64) return sparse_tensor_lib.SparseTensorValue(ind, val, shape) def testAddTakeMany(self): with self.test_session(graph=ops.Graph(), use_gpu=False) as sess: sp_input0 = self._SparseTensorValue_5x6(np.arange(6)) sp_input1 = self._SparseTensorValue_3x4(np.arange(6)) handle0 = add_sparse_to_tensors_map(sp_input0, shared_name="a") handle1 = add_sparse_to_tensors_map(sp_input1, shared_name="a") self.assertEqual(handle0.get_shape(), ()) handles_concat = array_ops.stack([handle0, handle1]) sp_out = take_many_sparse_from_tensors_map( sparse_map_op=handle0.op, sparse_handles=handles_concat) combined_indices, combined_values, combined_shape = sess.run(sp_out) self.assertAllEqual(combined_indices[:6, 0], [0] * 6) # minibatch 0 self.assertAllEqual(combined_indices[:6, 1:], sp_input0[0]) self.assertAllEqual(combined_indices[6:, 0], [1] * 6) # minibatch 1 self.assertAllEqual(combined_indices[6:, 1:], sp_input1[0]) self.assertAllEqual(combined_values[:6], sp_input0[1]) self.assertAllEqual(combined_values[6:], sp_input1[1]) self.assertAllEqual(combined_shape, [2, 5, 6]) def testFeedAddTakeMany(self): with self.test_session(use_gpu=False) as sess: sp_input = self._SparseTensorPlaceholder() input0_val = self._SparseTensorValue_5x6(np.arange(6)) input1_val = self._SparseTensorValue_3x4(np.arange(6)) handle = add_sparse_to_tensors_map(sp_input) handle0_value = sess.run(handle, feed_dict={sp_input: input0_val}) handle1_value = sess.run(handle, feed_dict={sp_input: input1_val}) sparse_handles = ops.convert_to_tensor( [handle0_value, handle1_value], dtype=dtypes.int64) sp_roundtrip = take_many_sparse_from_tensors_map( sparse_map_op=handle.op, sparse_handles=sparse_handles) combined_indices, combined_values, combined_shape = sess.run(sp_roundtrip) self.assertAllEqual(combined_indices[:6, 0], [0] * 6) # minibatch 0 self.assertAllEqual(combined_indices[:6, 1:], input0_val[0]) self.assertAllEqual(combined_indices[6:, 0], [1] * 6) # minibatch 1 self.assertAllEqual(combined_indices[6:, 1:], input1_val[0]) self.assertAllEqual(combined_values[:6], input0_val[1]) self.assertAllEqual(combined_values[6:], input1_val[1]) self.assertAllEqual(combined_shape, [2, 5, 6]) def testAddManyTakeManyRoundTrip(self): with self.test_session(use_gpu=False) as sess: # N == 4 because shape_value == [4, 5] indices_value = np.array([[0, 0], [0, 1], [2, 0]], dtype=np.int64) values_value = np.array([b"a", b"b", b"c"]) shape_value = np.array([4, 5], dtype=np.int64) sparse_tensor = self._SparseTensorPlaceholder(dtype=dtypes.string) handles = add_many_sparse_to_tensors_map(sparse_tensor) roundtrip = take_many_sparse_from_tensors_map( sparse_map_op=handles.op, sparse_handles=handles) handles_value, roundtrip_value = sess.run( [handles, roundtrip], feed_dict={ sparse_tensor.indices: indices_value, sparse_tensor.values: values_value, sparse_tensor.dense_shape: shape_value }) self.assertEqual(handles_value.shape, (4,)) self.assertAllEqual(roundtrip_value.indices, indices_value) self.assertAllEqual(roundtrip_value.values, values_value) self.assertAllEqual(roundtrip_value.dense_shape, shape_value) def testDeserializeFailsInconsistentRank(self): with self.test_session(use_gpu=False) as sess: sp_input = self._SparseTensorPlaceholder() input0_val = self._SparseTensorValue_5x6(np.arange(6)) input1_val = self._SparseTensorValue_1x1x1() handle = add_sparse_to_tensors_map(sp_input) handle0_value = sess.run(handle, feed_dict={sp_input: input0_val}) handle1_value = sess.run(handle, feed_dict={sp_input: input1_val}) handle_concat = ops.convert_to_tensor( [handle0_value, handle1_value], dtype=dtypes.int64) sp_roundtrip = take_many_sparse_from_tensors_map( sparse_map_op=handle.op, sparse_handles=handle_concat) with self.assertRaisesOpError( r"Inconsistent rank across SparseTensors: rank prior to " r"SparseTensor\[1\] was: 3 but rank of SparseTensor\[1\] is: 4"): sess.run(sp_roundtrip) def testTakeManyFailsWrongInputOp(self): with self.test_session(use_gpu=False) as sess: input_val = self._SparseTensorValue_5x6(np.arange(6)) handle = add_sparse_to_tensors_map(input_val) handle_value = sess.run(handle) bad_handle = handle_value + 10 sp_roundtrip = take_many_sparse_from_tensors_map( sparse_map_op=handle.op, sparse_handles=[handle_value, bad_handle]) with self.assertRaisesOpError(r"Unable to find SparseTensor: 10"): sess.run(sp_roundtrip) class BenchmarkSparseTensorsMapVsSerialization(test.Benchmark): def benchmarkVeryLarge2DFloatSparseTensor(self): np.random.seed(127) num_elements = 10000 batch_size = 64 indices_batch = np.random.randint( batch_size, size=num_elements, dtype=np.int64) indices_value = np.arange(num_elements, dtype=np.int64) indices = np.asarray( sorted(zip(indices_batch, indices_value)), dtype=np.int64) values = ["feature_value_for_embedding_lookup"] * num_elements shape = np.asarray([batch_size, num_elements], dtype=np.int64) with session.Session() as sess: with ops.device("/cpu:0"): indices = variables.Variable(indices) values = variables.Variable(values) shape = variables.Variable(shape) st = sparse_tensor_lib.SparseTensor(indices, values, shape) st_handles = add_many_sparse_to_tensors_map(st) st_roundtrip = take_many_sparse_from_tensors_map( sparse_map_op=st_handles.op, sparse_handles=st_handles) st_roundtrip_op = st_roundtrip.values.op st_serialized = sparse_ops.serialize_many_sparse(st) st_deserialized = sparse_ops.deserialize_many_sparse( st_serialized, dtype=values.dtype) st_deserialized_op = st_deserialized.values.op variables.global_variables_initializer().run() st_roundtrip_values = sess.run(st_roundtrip) st_deserialized_values = sess.run(st_deserialized) np.testing.assert_equal(st_roundtrip_values.values, st_deserialized_values.values) np.testing.assert_equal(st_roundtrip_values.indices, st_deserialized_values.indices) np.testing.assert_equal(st_roundtrip_values.dense_shape, st_deserialized_values.dense_shape) self.run_op_benchmark( sess, st_roundtrip_op, min_iters=2000, name="benchmark_very_large_2d_float_st_tensor_maps") self.run_op_benchmark( sess, st_deserialized_op, min_iters=2000, name="benchmark_very_large_2d_float_st_serialization") if __name__ == "__main__": test.main()
apache-2.0
wrouesnel/ansible
lib/ansible/plugins/netconf/__init__.py
4
10724
# # (c) 2017 Red Hat Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # from __future__ import (absolute_import, division, print_function) __metaclass__ = type from abc import ABCMeta, abstractmethod from functools import wraps from ansible.errors import AnsibleError from ansible.module_utils.six import with_metaclass from ansible.module_utils._text import to_bytes try: from ncclient.operations import RPCError from ncclient.xml_ import to_xml, to_ele except ImportError: raise AnsibleError("ncclient is not installed") def ensure_connected(func): @wraps(func) def wrapped(self, *args, **kwargs): if not self._connection._connected: self._connection._connect() return func(self, *args, **kwargs) return wrapped class NetconfBase(with_metaclass(ABCMeta, object)): """ A base class for implementing Netconf connections .. note:: Unlike most of Ansible, nearly all strings in :class:`TerminalBase` plugins are byte strings. This is because of how close to the underlying platform these plugins operate. Remember to mark literal strings as byte string (``b"string"``) and to use :func:`~ansible.module_utils._text.to_bytes` and :func:`~ansible.module_utils._text.to_text` to avoid unexpected problems. List of supported rpc's: :get: Retrieves running configuration and device state information :get_config: Retrieves the specified configuration from the device :edit_config: Loads the specified commands into the remote device :commit: Load configuration from candidate to running :discard_changes: Discard changes to candidate datastore :validate: Validate the contents of the specified configuration. :lock: Allows the client to lock the configuration system of a device. :unlock: Release a configuration lock, previously obtained with the lock operation. :copy_config: create or replace an entire configuration datastore with the contents of another complete configuration datastore. :get-schema: Retrieves the required schema from the device :get_capabilities: Retrieves device information and supported rpc methods For JUNOS: :execute_rpc: RPC to be execute on remote device :load_configuration: Loads given configuration on device Note: rpc support depends on the capabilites of remote device. :returns: Returns output received from remote device as byte string Note: the 'result' or 'error' from response should to be converted to object of ElementTree using 'fromstring' to parse output as xml doc 'get_capabilities()' returns 'result' as a json string. Usage: from ansible.module_utils.connection import Connection conn = Connection() data = conn.execute_rpc(rpc) reply = fromstring(reply) data = conn.get_capabilities() json.loads(data) conn.load_configuration(config=[''set system ntp server 1.1.1.1''], action='set', format='text') """ def __init__(self, connection): self._connection = connection self.m = self._connection._manager @ensure_connected def rpc(self, name): """RPC to be execute on remote device :name: Name of rpc in string format""" try: obj = to_ele(to_bytes(name, errors='surrogate_or_strict')) resp = self.m.rpc(obj) return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml except RPCError as exc: msg = exc.data_xml if hasattr(exc, 'data_xml') else exc.xml raise Exception(to_xml(msg)) @ensure_connected def get_config(self, *args, **kwargs): """Retrieve all or part of a specified configuration. :source: name of the configuration datastore being queried :filter: specifies the portion of the configuration to retrieve (by default entire configuration is retrieved)""" resp = self.m.get_config(*args, **kwargs) return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml @ensure_connected def get(self, *args, **kwargs): """Retrieve running configuration and device state information. *filter* specifies the portion of the configuration to retrieve (by default entire configuration is retrieved) """ resp = self.m.get(*args, **kwargs) return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml @ensure_connected def edit_config(self, *args, **kwargs): """Loads all or part of the specified *config* to the *target* configuration datastore. :target: is the name of the configuration datastore being edited :config: is the configuration, which must be rooted in the `config` element. It can be specified either as a string or an :class:`~xml.etree.ElementTree.Element`. :default_operation: if specified must be one of { `"merge"`, `"replace"`, or `"none"` } :test_option: if specified must be one of { `"test_then_set"`, `"set"` } :error_option: if specified must be one of { `"stop-on-error"`, `"continue-on-error"`, `"rollback-on-error"` } The `"rollback-on-error"` *error_option* depends on the `:rollback-on-error` capability. """ resp = self.m.edit_config(*args, **kwargs) return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml @ensure_connected def validate(self, *args, **kwargs): """Validate the contents of the specified configuration. :source: is the name of the configuration datastore being validated or `config` element containing the configuration subtree to be validated """ resp = self.m.validate(*args, **kwargs) return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml @ensure_connected def copy_config(self, *args, **kwargs): """Create or replace an entire configuration datastore with the contents of another complete configuration datastore. :source: is the name of the configuration datastore to use as the source of the copy operation or `config` element containing the configuration subtree to copy :target: is the name of the configuration datastore to use as the destination of the copy operation""" resp = self.m.copy_config(*args, **kwargs) return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml @ensure_connected def lock(self, *args, **kwargs): """Allows the client to lock the configuration system of a device. *target* is the name of the configuration datastore to lock """ resp = self.m.lock(*args, **kwargs) return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml @ensure_connected def unlock(self, *args, **kwargs): """Release a configuration lock, previously obtained with the lock operation. :target: is the name of the configuration datastore to unlock """ resp = self.m.unlock(*args, **kwargs) return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml @ensure_connected def discard_changes(self, *args, **kwargs): """Revert the candidate configuration to the currently running configuration. Any uncommitted changes are discarded.""" resp = self.m.discard_changes(*args, **kwargs) return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml @ensure_connected def commit(self, *args, **kwargs): """Commit the candidate configuration as the device's new current configuration. Depends on the `:candidate` capability. A confirmed commit (i.e. if *confirmed* is `True`) is reverted if there is no followup commit within the *timeout* interval. If no timeout is specified the confirm timeout defaults to 600 seconds (10 minutes). A confirming commit may have the *confirmed* parameter but this is not required. Depends on the `:confirmed-commit` capability. :confirmed: whether this is a confirmed commit :timeout: specifies the confirm timeout in seconds """ resp = self.m.commit(*args, **kwargs) return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml @ensure_connected def validate(self, *args, **kwargs): """Validate the contents of the specified configuration. :source: name of configuration data store""" resp = self.m.validate(*args, **kwargs) return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml @ensure_connected def get_schema(self, *args, **kwargs): """Retrieves the required schema from the device """ resp = self.m.get_schema(*args, **kwargs) return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml @ensure_connected def locked(self, *args, **kwargs): resp = self.m.locked(*args, **kwargs) return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml @abstractmethod def get_capabilities(self): """Retrieves device information and supported rpc methods by device platform and return result as a string """ pass @staticmethod def guess_network_os(obj): """Identifies the operating system of network device. """ pass def get_base_rpc(self): """Returns list of base rpc method supported by remote device""" return ['get_config', 'edit_config', 'get_capabilities', 'get'] def put_file(self, source, destination): """Copies file over scp to remote device""" pass def fetch_file(self, source, destination): """Fetch file over scp from remote device""" pass # TODO Restore .xml, when ncclient supports it for all platforms
gpl-3.0
BtbN/xbmc
lib/gtest/test/gtest_output_test.py
1733
12005
#!/usr/bin/env python # # Copyright 2008, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Tests the text output of Google C++ Testing Framework. SYNOPSIS gtest_output_test.py --build_dir=BUILD/DIR --gengolden # where BUILD/DIR contains the built gtest_output_test_ file. gtest_output_test.py --gengolden gtest_output_test.py """ __author__ = '[email protected] (Zhanyong Wan)' import os import re import sys import gtest_test_utils # The flag for generating the golden file GENGOLDEN_FLAG = '--gengolden' CATCH_EXCEPTIONS_ENV_VAR_NAME = 'GTEST_CATCH_EXCEPTIONS' IS_WINDOWS = os.name == 'nt' # TODO([email protected]): remove the _lin suffix. GOLDEN_NAME = 'gtest_output_test_golden_lin.txt' PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_output_test_') # At least one command we exercise must not have the # --gtest_internal_skip_environment_and_ad_hoc_tests flag. COMMAND_LIST_TESTS = ({}, [PROGRAM_PATH, '--gtest_list_tests']) COMMAND_WITH_COLOR = ({}, [PROGRAM_PATH, '--gtest_color=yes']) COMMAND_WITH_TIME = ({}, [PROGRAM_PATH, '--gtest_print_time', '--gtest_internal_skip_environment_and_ad_hoc_tests', '--gtest_filter=FatalFailureTest.*:LoggingTest.*']) COMMAND_WITH_DISABLED = ( {}, [PROGRAM_PATH, '--gtest_also_run_disabled_tests', '--gtest_internal_skip_environment_and_ad_hoc_tests', '--gtest_filter=*DISABLED_*']) COMMAND_WITH_SHARDING = ( {'GTEST_SHARD_INDEX': '1', 'GTEST_TOTAL_SHARDS': '2'}, [PROGRAM_PATH, '--gtest_internal_skip_environment_and_ad_hoc_tests', '--gtest_filter=PassingTest.*']) GOLDEN_PATH = os.path.join(gtest_test_utils.GetSourceDir(), GOLDEN_NAME) def ToUnixLineEnding(s): """Changes all Windows/Mac line endings in s to UNIX line endings.""" return s.replace('\r\n', '\n').replace('\r', '\n') def RemoveLocations(test_output): """Removes all file location info from a Google Test program's output. Args: test_output: the output of a Google Test program. Returns: output with all file location info (in the form of 'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or 'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by 'FILE_NAME:#: '. """ return re.sub(r'.*[/\\](.+)(\:\d+|\(\d+\))\: ', r'\1:#: ', test_output) def RemoveStackTraceDetails(output): """Removes all stack traces from a Google Test program's output.""" # *? means "find the shortest string that matches". return re.sub(r'Stack trace:(.|\n)*?\n\n', 'Stack trace: (omitted)\n\n', output) def RemoveStackTraces(output): """Removes all traces of stack traces from a Google Test program's output.""" # *? means "find the shortest string that matches". return re.sub(r'Stack trace:(.|\n)*?\n\n', '', output) def RemoveTime(output): """Removes all time information from a Google Test program's output.""" return re.sub(r'\(\d+ ms', '(? ms', output) def RemoveTypeInfoDetails(test_output): """Removes compiler-specific type info from Google Test program's output. Args: test_output: the output of a Google Test program. Returns: output with type information normalized to canonical form. """ # some compilers output the name of type 'unsigned int' as 'unsigned' return re.sub(r'unsigned int', 'unsigned', test_output) def NormalizeToCurrentPlatform(test_output): """Normalizes platform specific output details for easier comparison.""" if IS_WINDOWS: # Removes the color information that is not present on Windows. test_output = re.sub('\x1b\\[(0;3\d)?m', '', test_output) # Changes failure message headers into the Windows format. test_output = re.sub(r': Failure\n', r': error: ', test_output) # Changes file(line_number) to file:line_number. test_output = re.sub(r'((\w|\.)+)\((\d+)\):', r'\1:\3:', test_output) return test_output def RemoveTestCounts(output): """Removes test counts from a Google Test program's output.""" output = re.sub(r'\d+ tests?, listed below', '? tests, listed below', output) output = re.sub(r'\d+ FAILED TESTS', '? FAILED TESTS', output) output = re.sub(r'\d+ tests? from \d+ test cases?', '? tests from ? test cases', output) output = re.sub(r'\d+ tests? from ([a-zA-Z_])', r'? tests from \1', output) return re.sub(r'\d+ tests?\.', '? tests.', output) def RemoveMatchingTests(test_output, pattern): """Removes output of specified tests from a Google Test program's output. This function strips not only the beginning and the end of a test but also all output in between. Args: test_output: A string containing the test output. pattern: A regex string that matches names of test cases or tests to remove. Returns: Contents of test_output with tests whose names match pattern removed. """ test_output = re.sub( r'.*\[ RUN \] .*%s(.|\n)*?\[( FAILED | OK )\] .*%s.*\n' % ( pattern, pattern), '', test_output) return re.sub(r'.*%s.*\n' % pattern, '', test_output) def NormalizeOutput(output): """Normalizes output (the output of gtest_output_test_.exe).""" output = ToUnixLineEnding(output) output = RemoveLocations(output) output = RemoveStackTraceDetails(output) output = RemoveTime(output) return output def GetShellCommandOutput(env_cmd): """Runs a command in a sub-process, and returns its output in a string. Args: env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra environment variables to set, and element 1 is a string with the command and any flags. Returns: A string with the command's combined standard and diagnostic output. """ # Spawns cmd in a sub-process, and gets its standard I/O file objects. # Set and save the environment properly. environ = os.environ.copy() environ.update(env_cmd[0]) p = gtest_test_utils.Subprocess(env_cmd[1], env=environ) return p.output def GetCommandOutput(env_cmd): """Runs a command and returns its output with all file location info stripped off. Args: env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra environment variables to set, and element 1 is a string with the command and any flags. """ # Disables exception pop-ups on Windows. environ, cmdline = env_cmd environ = dict(environ) # Ensures we are modifying a copy. environ[CATCH_EXCEPTIONS_ENV_VAR_NAME] = '1' return NormalizeOutput(GetShellCommandOutput((environ, cmdline))) def GetOutputOfAllCommands(): """Returns concatenated output from several representative commands.""" return (GetCommandOutput(COMMAND_WITH_COLOR) + GetCommandOutput(COMMAND_WITH_TIME) + GetCommandOutput(COMMAND_WITH_DISABLED) + GetCommandOutput(COMMAND_WITH_SHARDING)) test_list = GetShellCommandOutput(COMMAND_LIST_TESTS) SUPPORTS_DEATH_TESTS = 'DeathTest' in test_list SUPPORTS_TYPED_TESTS = 'TypedTest' in test_list SUPPORTS_THREADS = 'ExpectFailureWithThreadsTest' in test_list SUPPORTS_STACK_TRACES = False CAN_GENERATE_GOLDEN_FILE = (SUPPORTS_DEATH_TESTS and SUPPORTS_TYPED_TESTS and SUPPORTS_THREADS) class GTestOutputTest(gtest_test_utils.TestCase): def RemoveUnsupportedTests(self, test_output): if not SUPPORTS_DEATH_TESTS: test_output = RemoveMatchingTests(test_output, 'DeathTest') if not SUPPORTS_TYPED_TESTS: test_output = RemoveMatchingTests(test_output, 'TypedTest') test_output = RemoveMatchingTests(test_output, 'TypedDeathTest') test_output = RemoveMatchingTests(test_output, 'TypeParamDeathTest') if not SUPPORTS_THREADS: test_output = RemoveMatchingTests(test_output, 'ExpectFailureWithThreadsTest') test_output = RemoveMatchingTests(test_output, 'ScopedFakeTestPartResultReporterTest') test_output = RemoveMatchingTests(test_output, 'WorksConcurrently') if not SUPPORTS_STACK_TRACES: test_output = RemoveStackTraces(test_output) return test_output def testOutput(self): output = GetOutputOfAllCommands() golden_file = open(GOLDEN_PATH, 'rb') # A mis-configured source control system can cause \r appear in EOL # sequences when we read the golden file irrespective of an operating # system used. Therefore, we need to strip those \r's from newlines # unconditionally. golden = ToUnixLineEnding(golden_file.read()) golden_file.close() # We want the test to pass regardless of certain features being # supported or not. # We still have to remove type name specifics in all cases. normalized_actual = RemoveTypeInfoDetails(output) normalized_golden = RemoveTypeInfoDetails(golden) if CAN_GENERATE_GOLDEN_FILE: self.assertEqual(normalized_golden, normalized_actual) else: normalized_actual = NormalizeToCurrentPlatform( RemoveTestCounts(normalized_actual)) normalized_golden = NormalizeToCurrentPlatform( RemoveTestCounts(self.RemoveUnsupportedTests(normalized_golden))) # This code is very handy when debugging golden file differences: if os.getenv('DEBUG_GTEST_OUTPUT_TEST'): open(os.path.join( gtest_test_utils.GetSourceDir(), '_gtest_output_test_normalized_actual.txt'), 'wb').write( normalized_actual) open(os.path.join( gtest_test_utils.GetSourceDir(), '_gtest_output_test_normalized_golden.txt'), 'wb').write( normalized_golden) self.assertEqual(normalized_golden, normalized_actual) if __name__ == '__main__': if sys.argv[1:] == [GENGOLDEN_FLAG]: if CAN_GENERATE_GOLDEN_FILE: output = GetOutputOfAllCommands() golden_file = open(GOLDEN_PATH, 'wb') golden_file.write(output) golden_file.close() else: message = ( """Unable to write a golden file when compiled in an environment that does not support all the required features (death tests, typed tests, and multiple threads). Please generate the golden file using a binary built with those features enabled.""") sys.stderr.write(message) sys.exit(1) else: gtest_test_utils.Main()
gpl-2.0
skulbrane/metagoofil
hachoir_parser/program/exe_res.py
95
15292
""" Parser for resource of Microsoft Windows Portable Executable (PE). Documentation: - Wine project VS_FIXEDFILEINFO structure, file include/winver.h Author: Victor Stinner Creation date: 2007-01-19 """ from hachoir_core.field import (FieldSet, ParserError, Enum, Bit, Bits, SeekableFieldSet, UInt16, UInt32, TimestampUnix32, RawBytes, PaddingBytes, NullBytes, NullBits, CString, String) from hachoir_core.text_handler import textHandler, filesizeHandler, hexadecimal from hachoir_core.tools import createDict, paddingSize, alignValue, makePrintable from hachoir_core.error import HACHOIR_ERRORS from hachoir_parser.common.win32 import BitmapInfoHeader MAX_DEPTH = 5 MAX_INDEX_PER_HEADER = 300 MAX_NAME_PER_HEADER = MAX_INDEX_PER_HEADER class Version(FieldSet): static_size = 32 def createFields(self): yield textHandler(UInt16(self, "minor", "Minor version number"), hexadecimal) yield textHandler(UInt16(self, "major", "Major version number"), hexadecimal) def createValue(self): return self["major"].value + float(self["minor"].value) / 10000 MAJOR_OS_NAME = { 1: "DOS", 2: "OS/2 16-bit", 3: "OS/2 32-bit", 4: "Windows NT", } MINOR_OS_BASE = 0 MINOR_OS_NAME = { 0: "Base", 1: "Windows 16-bit", 2: "Presentation Manager 16-bit", 3: "Presentation Manager 32-bit", 4: "Windows 32-bit", } FILETYPE_DRIVER = 3 FILETYPE_FONT = 4 FILETYPE_NAME = { 1: "Application", 2: "DLL", 3: "Driver", 4: "Font", 5: "VXD", 7: "Static library", } DRIVER_SUBTYPE_NAME = { 1: "Printer", 2: "Keyboard", 3: "Language", 4: "Display", 5: "Mouse", 6: "Network", 7: "System", 8: "Installable", 9: "Sound", 10: "Communications", } FONT_SUBTYPE_NAME = { 1: "Raster", 2: "Vector", 3: "TrueType", } class VersionInfoBinary(FieldSet): def createFields(self): yield textHandler(UInt32(self, "magic", "File information magic (0xFEEF04BD)"), hexadecimal) if self["magic"].value != 0xFEEF04BD: raise ParserError("EXE resource: invalid file info magic") yield Version(self, "struct_ver", "Structure version (1.0)") yield Version(self, "file_ver_ms", "File version MS") yield Version(self, "file_ver_ls", "File version LS") yield Version(self, "product_ver_ms", "Product version MS") yield Version(self, "product_ver_ls", "Product version LS") yield textHandler(UInt32(self, "file_flags_mask"), hexadecimal) yield Bit(self, "debug") yield Bit(self, "prerelease") yield Bit(self, "patched") yield Bit(self, "private_build") yield Bit(self, "info_inferred") yield Bit(self, "special_build") yield NullBits(self, "reserved", 26) yield Enum(textHandler(UInt16(self, "file_os_major"), hexadecimal), MAJOR_OS_NAME) yield Enum(textHandler(UInt16(self, "file_os_minor"), hexadecimal), MINOR_OS_NAME) yield Enum(textHandler(UInt32(self, "file_type"), hexadecimal), FILETYPE_NAME) field = textHandler(UInt32(self, "file_subfile"), hexadecimal) if field.value == FILETYPE_DRIVER: field = Enum(field, DRIVER_SUBTYPE_NAME) elif field.value == FILETYPE_FONT: field = Enum(field, FONT_SUBTYPE_NAME) yield field yield TimestampUnix32(self, "date_ms") yield TimestampUnix32(self, "date_ls") class VersionInfoNode(FieldSet): TYPE_STRING = 1 TYPE_NAME = { 0: "binary", 1: "string", } def __init__(self, parent, name, is_32bit=True): FieldSet.__init__(self, parent, name) self._size = alignValue(self["size"].value, 4) * 8 self.is_32bit = is_32bit def createFields(self): yield UInt16(self, "size", "Node size (in bytes)") yield UInt16(self, "data_size") yield Enum(UInt16(self, "type"), self.TYPE_NAME) yield CString(self, "name", charset="UTF-16-LE") size = paddingSize(self.current_size//8, 4) if size: yield NullBytes(self, "padding[]", size) size = self["data_size"].value if size: if self["type"].value == self.TYPE_STRING: if self.is_32bit: size *= 2 yield String(self, "value", size, charset="UTF-16-LE", truncate="\0") elif self["name"].value == "VS_VERSION_INFO": yield VersionInfoBinary(self, "value", size=size*8) if self["value/file_flags_mask"].value == 0: self.is_32bit = False else: yield RawBytes(self, "value", size) while 12 <= (self.size - self.current_size) // 8: yield VersionInfoNode(self, "node[]", self.is_32bit) size = (self.size - self.current_size) // 8 if size: yield NullBytes(self, "padding[]", size) def createDescription(self): text = "Version info node: %s" % self["name"].value if self["type"].value == self.TYPE_STRING and "value" in self: text += "=%s" % self["value"].value return text def parseVersionInfo(parent): yield VersionInfoNode(parent, "node[]") def parseIcon(parent): yield BitmapInfoHeader(parent, "bmp_header") size = (parent.size - parent.current_size) // 8 if size: yield RawBytes(parent, "raw", size) class WindowsString(FieldSet): def createFields(self): yield UInt16(self, "length", "Number of 16-bit characters") size = self["length"].value * 2 if size: yield String(self, "text", size, charset="UTF-16-LE") def createValue(self): if "text" in self: return self["text"].value else: return u"" def createDisplay(self): return makePrintable(self.value, "UTF-8", to_unicode=True, quote='"') def parseStringTable(parent): while not parent.eof: yield WindowsString(parent, "string[]") RESOURCE_TYPE = { 1: ("cursor[]", "Cursor", None), 2: ("bitmap[]", "Bitmap", None), 3: ("icon[]", "Icon", parseIcon), 4: ("menu[]", "Menu", None), 5: ("dialog[]", "Dialog", None), 6: ("string_table[]", "String table", parseStringTable), 7: ("font_dir[]", "Font directory", None), 8: ("font[]", "Font", None), 9: ("accelerators[]", "Accelerators", None), 10: ("raw_res[]", "Unformatted resource data", None), 11: ("message_table[]", "Message table", None), 12: ("group_cursor[]", "Group cursor", None), 14: ("group_icon[]", "Group icon", None), 16: ("version_info", "Version information", parseVersionInfo), } class Entry(FieldSet): static_size = 16*8 def __init__(self, parent, name, inode=None): FieldSet.__init__(self, parent, name) self.inode = inode def createFields(self): yield textHandler(UInt32(self, "rva"), hexadecimal) yield filesizeHandler(UInt32(self, "size")) yield UInt32(self, "codepage") yield NullBytes(self, "reserved", 4) def createDescription(self): return "Entry #%u: offset=%s size=%s" % ( self.inode["offset"].value, self["rva"].display, self["size"].display) class NameOffset(FieldSet): def createFields(self): yield UInt32(self, "name") yield Bits(self, "offset", 31) yield Bit(self, "is_name") class IndexOffset(FieldSet): TYPE_DESC = createDict(RESOURCE_TYPE, 1) def __init__(self, parent, name, res_type=None): FieldSet.__init__(self, parent, name) self.res_type = res_type def createFields(self): yield Enum(UInt32(self, "type"), self.TYPE_DESC) yield Bits(self, "offset", 31) yield Bit(self, "is_subdir") def createDescription(self): if self["is_subdir"].value: return "Sub-directory: %s at %s" % (self["type"].display, self["offset"].value) else: return "Index: ID %s at %s" % (self["type"].display, self["offset"].value) class ResourceContent(FieldSet): def __init__(self, parent, name, entry, size=None): FieldSet.__init__(self, parent, name, size=entry["size"].value*8) self.entry = entry res_type = self.getResType() if res_type in RESOURCE_TYPE: self._name, description, self._parser = RESOURCE_TYPE[res_type] else: self._parser = None def getResID(self): return self.entry.inode["offset"].value def getResType(self): return self.entry.inode.res_type def createFields(self): if self._parser: for field in self._parser(self): yield field else: yield RawBytes(self, "content", self.size//8) def createDescription(self): return "Resource #%u content: type=%s" % ( self.getResID(), self.getResType()) class Header(FieldSet): static_size = 16*8 def createFields(self): yield NullBytes(self, "options", 4) yield TimestampUnix32(self, "creation_date") yield UInt16(self, "maj_ver", "Major version") yield UInt16(self, "min_ver", "Minor version") yield UInt16(self, "nb_name", "Number of named entries") yield UInt16(self, "nb_index", "Number of indexed entries") def createDescription(self): text = "Resource header" info = [] if self["nb_name"].value: info.append("%u name" % self["nb_name"].value) if self["nb_index"].value: info.append("%u index" % self["nb_index"].value) if self["creation_date"].value: info.append(self["creation_date"].display) if info: return "%s: %s" % (text, ", ".join(info)) else: return text class Name(FieldSet): def createFields(self): yield UInt16(self, "length") size = min(self["length"].value, 255) if size: yield String(self, "name", size, charset="UTF-16LE") class Directory(FieldSet): def __init__(self, parent, name, res_type=None): FieldSet.__init__(self, parent, name) nb_entries = self["header/nb_name"].value + self["header/nb_index"].value self._size = Header.static_size + nb_entries * 64 self.res_type = res_type def createFields(self): yield Header(self, "header") if MAX_NAME_PER_HEADER < self["header/nb_name"].value: raise ParserError("EXE resource: invalid number of name (%s)" % self["header/nb_name"].value) if MAX_INDEX_PER_HEADER < self["header/nb_index"].value: raise ParserError("EXE resource: invalid number of index (%s)" % self["header/nb_index"].value) hdr = self["header"] for index in xrange(hdr["nb_name"].value): yield NameOffset(self, "name[]") for index in xrange(hdr["nb_index"].value): yield IndexOffset(self, "index[]", self.res_type) def createDescription(self): return self["header"].description class PE_Resource(SeekableFieldSet): def __init__(self, parent, name, section, size): SeekableFieldSet.__init__(self, parent, name, size=size) self.section = section def parseSub(self, directory, name, depth): indexes = [] for index in directory.array("index"): if index["is_subdir"].value: indexes.append(index) #indexes.sort(key=lambda index: index["offset"].value) for index in indexes: self.seekByte(index["offset"].value) if depth == 1: res_type = index["type"].value else: res_type = directory.res_type yield Directory(self, name, res_type) def createFields(self): # Parse directories depth = 0 subdir = Directory(self, "root") yield subdir subdirs = [subdir] alldirs = [subdir] while subdirs: depth += 1 if MAX_DEPTH < depth: self.error("EXE resource: depth too high (%s), stop parsing directories" % depth) break newsubdirs = [] for index, subdir in enumerate(subdirs): name = "directory[%u][%u][]" % (depth, index) try: for field in self.parseSub(subdir, name, depth): if field.__class__ == Directory: newsubdirs.append(field) yield field except HACHOIR_ERRORS, err: self.error("Unable to create directory %s: %s" % (name, err)) subdirs = newsubdirs alldirs.extend(subdirs) # Create resource list resources = [] for directory in alldirs: for index in directory.array("index"): if not index["is_subdir"].value: resources.append(index) # Parse entries entries = [] for resource in resources: offset = resource["offset"].value if offset is None: continue self.seekByte(offset) entry = Entry(self, "entry[]", inode=resource) yield entry entries.append(entry) entries.sort(key=lambda entry: entry["rva"].value) # Parse resource content for entry in entries: try: offset = self.section.rva2file(entry["rva"].value) padding = self.seekByte(offset, relative=False) if padding: yield padding yield ResourceContent(self, "content[]", entry) except HACHOIR_ERRORS, err: self.warning("Error when parsing entry %s: %s" % (entry.path, err)) size = (self.size - self.current_size) // 8 if size: yield PaddingBytes(self, "padding_end", size) class NE_VersionInfoNode(FieldSet): TYPE_STRING = 1 TYPE_NAME = { 0: "binary", 1: "string", } def __init__(self, parent, name): FieldSet.__init__(self, parent, name) self._size = alignValue(self["size"].value, 4) * 8 def createFields(self): yield UInt16(self, "size", "Node size (in bytes)") yield UInt16(self, "data_size") yield CString(self, "name", charset="ISO-8859-1") size = paddingSize(self.current_size//8, 4) if size: yield NullBytes(self, "padding[]", size) size = self["data_size"].value if size: if self["name"].value == "VS_VERSION_INFO": yield VersionInfoBinary(self, "value", size=size*8) else: yield String(self, "value", size, charset="ISO-8859-1") while 12 <= (self.size - self.current_size) // 8: yield NE_VersionInfoNode(self, "node[]") size = (self.size - self.current_size) // 8 if size: yield NullBytes(self, "padding[]", size) def createDescription(self): text = "Version info node: %s" % self["name"].value # if self["type"].value == self.TYPE_STRING and "value" in self: # text += "=%s" % self["value"].value return text
gpl-2.0
chdecultot/erpnext
erpnext/hr/doctype/employee_attendance_tool/employee_attendance_tool.py
7
1956
# -*- coding: utf-8 -*- # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors # For license information, please see license.txt from __future__ import unicode_literals import frappe import json from frappe.model.document import Document class EmployeeAttendanceTool(Document): pass @frappe.whitelist() def get_employees(date, department = None, branch = None, company = None): attendance_not_marked = [] attendance_marked = [] filters = {"status": "Active", "date_of_joining": ["<=", date]} if department != "All": filters["department"] = department if branch != "All": filters["branch"] = branch if company != "All": filters["company"] = company employee_list = frappe.get_list("Employee", fields=["employee", "employee_name"], filters=filters, order_by="employee_name") marked_employee = {} for emp in frappe.get_list("Attendance", fields=["employee", "status"], filters={"attendance_date": date}): marked_employee[emp['employee']] = emp['status'] for employee in employee_list: employee['status'] = marked_employee.get(employee['employee']) if employee['employee'] not in marked_employee: attendance_not_marked.append(employee) else: attendance_marked.append(employee) return { "marked": attendance_marked, "unmarked": attendance_not_marked } @frappe.whitelist() def mark_employee_attendance(employee_list, status, date, leave_type=None, company=None): employee_list = json.loads(employee_list) for employee in employee_list: attendance = frappe.new_doc("Attendance") attendance.employee = employee['employee'] attendance.employee_name = employee['employee_name'] attendance.attendance_date = date attendance.status = status if status == "On Leave" and leave_type: attendance.leave_type = leave_type if company: attendance.company = company else: attendance.company = frappe.db.get_value("Employee", employee['employee'], "Company") attendance.submit()
gpl-3.0
BT-ojossen/odoo
addons/crm_helpdesk/__openerp__.py
260
1956
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Helpdesk', 'category': 'Customer Relationship Management', 'version': '1.0', 'description': """ Helpdesk Management. ==================== Like records and processing of claims, Helpdesk and Support are good tools to trace your interventions. This menu is more adapted to oral communication, which is not necessarily related to a claim. Select a customer, add notes and categorize your interventions with a channel and a priority level. """, 'author': 'OpenERP SA', 'website': 'https://www.odoo.com', 'depends': ['crm'], 'data': [ 'crm_helpdesk_view.xml', 'crm_helpdesk_menu.xml', 'security/ir.model.access.csv', 'report/crm_helpdesk_report_view.xml', 'crm_helpdesk_data.xml', ], 'demo': ['crm_helpdesk_demo.xml'], 'test': ['test/process/help-desk.yml'], 'installable': True, 'auto_install': False, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
jaimahajan1997/sympy
sympy/physics/quantum/density.py
82
9834
from __future__ import print_function, division from itertools import product from sympy import Tuple, Add, Mul, Matrix, log, expand, Rational from sympy.core.trace import Tr from sympy.printing.pretty.stringpict import prettyForm from sympy.physics.quantum.dagger import Dagger from sympy.physics.quantum.operator import HermitianOperator from sympy.physics.quantum.represent import represent from sympy.physics.quantum.matrixutils import numpy_ndarray, scipy_sparse_matrix, to_numpy from sympy.physics.quantum.tensorproduct import TensorProduct, tensor_product_simp class Density(HermitianOperator): """Density operator for representing mixed states. TODO: Density operator support for Qubits Parameters ========== values : tuples/lists Each tuple/list should be of form (state, prob) or [state,prob] Examples ======== Create a density operator with 2 states represented by Kets. >>> from sympy.physics.quantum.state import Ket >>> from sympy.physics.quantum.density import Density >>> d = Density([Ket(0), 0.5], [Ket(1),0.5]) >>> d 'Density'((|0>, 0.5),(|1>, 0.5)) """ @classmethod def _eval_args(cls, args): # call this to qsympify the args args = super(Density, cls)._eval_args(args) for arg in args: # Check if arg is a tuple if not (isinstance(arg, Tuple) and len(arg) == 2): raise ValueError("Each argument should be of form [state,prob]" " or ( state, prob )") return args def states(self): """Return list of all states. Examples ======== >>> from sympy.physics.quantum.state import Ket >>> from sympy.physics.quantum.density import Density >>> d = Density([Ket(0), 0.5], [Ket(1),0.5]) >>> d.states() (|0>, |1>) """ return Tuple(*[arg[0] for arg in self.args]) def probs(self): """Return list of all probabilities. Examples ======== >>> from sympy.physics.quantum.state import Ket >>> from sympy.physics.quantum.density import Density >>> d = Density([Ket(0), 0.5], [Ket(1),0.5]) >>> d.probs() (0.5, 0.5) """ return Tuple(*[arg[1] for arg in self.args]) def get_state(self, index): """Return specfic state by index. Parameters ========== index : index of state to be returned Examples ======== >>> from sympy.physics.quantum.state import Ket >>> from sympy.physics.quantum.density import Density >>> d = Density([Ket(0), 0.5], [Ket(1),0.5]) >>> d.states()[1] |1> """ state = self.args[index][0] return state def get_prob(self, index): """Return probability of specific state by index. Parameters =========== index : index of states whose probability is returned. Examples ======== >>> from sympy.physics.quantum.state import Ket >>> from sympy.physics.quantum.density import Density >>> d = Density([Ket(0), 0.5], [Ket(1),0.5]) >>> d.probs()[1] 0.500000000000000 """ prob = self.args[index][1] return prob def apply_op(self, op): """op will operate on each individual state. Parameters ========== op : Operator Examples ======== >>> from sympy.physics.quantum.state import Ket >>> from sympy.physics.quantum.density import Density >>> from sympy.physics.quantum.operator import Operator >>> A = Operator('A') >>> d = Density([Ket(0), 0.5], [Ket(1),0.5]) >>> d.apply_op(A) 'Density'((A*|0>, 0.5),(A*|1>, 0.5)) """ new_args = [(op*state, prob) for (state, prob) in self.args] return Density(*new_args) def doit(self, **hints): """Expand the density operator into an outer product format. Examples ======== >>> from sympy.physics.quantum.state import Ket >>> from sympy.physics.quantum.density import Density >>> from sympy.physics.quantum.operator import Operator >>> A = Operator('A') >>> d = Density([Ket(0), 0.5], [Ket(1),0.5]) >>> d.doit() 0.5*|0><0| + 0.5*|1><1| """ terms = [] for (state, prob) in self.args: state = state.expand() # needed to break up (a+b)*c if (isinstance(state, Add)): for arg in product(state.args, repeat=2): terms.append(prob * self._generate_outer_prod(arg[0], arg[1])) else: terms.append(prob * self._generate_outer_prod(state, state)) return Add(*terms) def _generate_outer_prod(self, arg1, arg2): c_part1, nc_part1 = arg1.args_cnc() c_part2, nc_part2 = arg2.args_cnc() if ( len(nc_part1) == 0 or len(nc_part2) == 0 ): raise ValueError('Atleast one-pair of' ' Non-commutative instance required' ' for outer product.') # Muls of Tensor Products should be expanded # before this function is called if (isinstance(nc_part1[0], TensorProduct) and len(nc_part1) == 1 and len(nc_part2) == 1): op = tensor_product_simp(nc_part1[0] * Dagger(nc_part2[0])) else: op = Mul(*nc_part1) * Dagger(Mul(*nc_part2)) return Mul(*c_part1)*Mul(*c_part2)*op def _represent(self, **options): return represent(self.doit(), **options) def _print_operator_name_latex(self, printer, *args): return printer._print(r'\rho', *args) def _print_operator_name_pretty(self, printer, *args): return prettyForm(unichr('\N{GREEK SMALL LETTER RHO}')) def _eval_trace(self, **kwargs): indices = kwargs.get('indices', []) return Tr(self.doit(), indices).doit() def entropy(self): """ Compute the entropy of a density matrix. Refer to density.entropy() method for examples. """ return entropy(self) def entropy(density): """Compute the entropy of a matrix/density object. This computes -Tr(density*ln(density)) using the eigenvalue decomposition of density, which is given as either a Density instance or a matrix (numpy.ndarray, sympy.Matrix or scipy.sparse). Parameters ========== density : density matrix of type Density, sympy matrix, scipy.sparse or numpy.ndarray Examples ======== >>> from sympy.physics.quantum.density import Density, entropy >>> from sympy.physics.quantum.represent import represent >>> from sympy.physics.quantum.matrixutils import scipy_sparse_matrix >>> from sympy.physics.quantum.spin import JzKet, Jz >>> from sympy import S, log >>> up = JzKet(S(1)/2,S(1)/2) >>> down = JzKet(S(1)/2,-S(1)/2) >>> d = Density((up,0.5),(down,0.5)) >>> entropy(d) log(2)/2 """ if isinstance(density, Density): density = represent(density) # represent in Matrix if isinstance(density, scipy_sparse_matrix): density = to_numpy(density) if isinstance(density, Matrix): eigvals = density.eigenvals().keys() return expand(-sum(e*log(e) for e in eigvals)) elif isinstance(density, numpy_ndarray): import numpy as np eigvals = np.linalg.eigvals(density) return -np.sum(eigvals*np.log(eigvals)) else: raise ValueError( "numpy.ndarray, scipy.sparse or sympy matrix expected") def fidelity(state1, state2): """ Computes the fidelity [1]_ between two quantum states The arguments provided to this function should be a square matrix or a Density object. If it is a square matrix, it is assumed to be diagonalizable. Parameters: ========== state1, state2 : a density matrix or Matrix Examples ======== >>> from sympy import S, sqrt >>> from sympy.physics.quantum.dagger import Dagger >>> from sympy.physics.quantum.spin import JzKet >>> from sympy.physics.quantum.density import Density, fidelity >>> from sympy.physics.quantum.represent import represent >>> >>> up = JzKet(S(1)/2,S(1)/2) >>> down = JzKet(S(1)/2,-S(1)/2) >>> amp = 1/sqrt(2) >>> updown = (amp * up) + (amp * down) >>> >>> # represent turns Kets into matrices >>> up_dm = represent(up * Dagger(up)) >>> down_dm = represent(down * Dagger(down)) >>> updown_dm = represent(updown * Dagger(updown)) >>> >>> fidelity(up_dm, up_dm) 1 >>> fidelity(up_dm, down_dm) #orthogonal states 0 >>> fidelity(up_dm, updown_dm).evalf().round(3) 0.707 References ========== .. [1] http://en.wikipedia.org/wiki/Fidelity_of_quantum_states """ state1 = represent(state1) if isinstance(state1, Density) else state1 state2 = represent(state2) if isinstance(state2, Density) else state2 if (not isinstance(state1, Matrix) or not isinstance(state2, Matrix)): raise ValueError("state1 and state2 must be of type Density or Matrix " "received type=%s for state1 and type=%s for state2" % (type(state1), type(state2))) if ( state1.shape != state2.shape and state1.is_square): raise ValueError("The dimensions of both args should be equal and the " "matrix obtained should be a square matrix") sqrt_state1 = state1**Rational(1, 2) return Tr((sqrt_state1 * state2 * sqrt_state1)**Rational(1, 2)).doit()
bsd-3-clause
KarlTDebiec/westpa
lib/examples/wca-dimer_openmm/bruteforce/wcadimer.py
12
10747
#!/usr/local/bin/env python #============================================================================================= # MODULE DOCSTRING #============================================================================================= """ WCA fluid and WCA dimer systems. DESCRIPTION COPYRIGHT @author John D. Chodera <[email protected]> All code in this repository is released under the GNU General Public License. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. TODO """ #============================================================================================= # GLOBAL IMPORTS #============================================================================================= import numpy import simtk import simtk.unit as units import simtk.openmm as openmm #============================================================================================= # CONSTANTS #============================================================================================= kB = units.BOLTZMANN_CONSTANT_kB * units.AVOGADRO_CONSTANT_NA #============================================================================================= # DEFAULT PARAMETERS #============================================================================================= natoms = 216 # number of particles # WCA fluid parameters (argon). mass = 39.9 * units.amu # reference mass sigma = 3.4 * units.angstrom # reference lengthscale epsilon = 120.0 * units.kelvin * kB # reference energy r_WCA = 2.**(1./6.) * sigma # interaction truncation range tau = units.sqrt(sigma**2 * mass / epsilon) # characteristic timescale # Simulation conditions. temperature = 0.824 / (kB / epsilon) # temperature kT = kB * temperature # thermal energy beta = 1.0 / kT # inverse temperature density = 0.96 / sigma**3 # default density stable_timestep = 0.001 * tau # stable timestep collision_rate = 1 / tau # collision rate for Langevin interator # Dimer potential parameters. h = 5.0 * kT # barrier height r0 = r_WCA # compact state distance w = 0.5 * r_WCA # extended state distance is r0 + 2*w #============================================================================================= # WCA Fluid #============================================================================================= def WCAFluid(N=natoms, density=density, mm=None, mass=mass, epsilon=epsilon, sigma=sigma): """ Create a Weeks-Chandler-Andersen system. OPTIONAL ARGUMENTS N (int) - total number of atoms (default: 150) density (float) - N sigma^3 / V (default: 0.96) sigma epsilon """ # Choose OpenMM package. if mm is None: mm = openmm # Create system system = mm.System() # Compute total system volume. volume = N / density # Make system cubic in dimension. length = volume**(1.0/3.0) # TODO: Can we change this to use tuples or 3x3 array? a = units.Quantity(numpy.array([1.0, 0.0, 0.0], numpy.float32), units.nanometer) * length/units.nanometer b = units.Quantity(numpy.array([0.0, 1.0, 0.0], numpy.float32), units.nanometer) * length/units.nanometer c = units.Quantity(numpy.array([0.0, 0.0, 1.0], numpy.float32), units.nanometer) * length/units.nanometer print "box edge length = %s" % str(length) system.setDefaultPeriodicBoxVectors(a, b, c) # Add particles to system. for n in range(N): system.addParticle(mass) # Create nonbonded force term implementing Kob-Andersen two-component Lennard-Jones interaction. energy_expression = '4.0*epsilon*((sigma/r)^12 - (sigma/r)^6) + epsilon' # Create force. force = mm.CustomNonbondedForce(energy_expression) # Set epsilon and sigma global parameters. force.addGlobalParameter('epsilon', epsilon) force.addGlobalParameter('sigma', sigma) # Add particles for n in range(N): force.addParticle([]) # Set periodic boundary conditions with cutoff. force.setNonbondedMethod(mm.CustomNonbondedForce.CutoffNonPeriodic) print "setting cutoff distance to %s" % str(r_WCA) force.setCutoffDistance(r_WCA) # Add nonbonded force term to the system. system.addForce(force) # Create initial coordinates using random positions. coordinates = units.Quantity(numpy.random.rand(N,3), units.nanometer) * (length / units.nanometer) # Return system and coordinates. return [system, coordinates] #============================================================================================= # WCA dimer plus fluid #============================================================================================= def WCADimer(N=natoms, density=density, mm=None, mass=mass, epsilon=epsilon, sigma=sigma, h=h, r0=r0, w=w): """ Create a bistable bonded pair of particles (indices 0 and 1) optionally surrounded by a Weeks-Chandler-Andersen fluid. The bistable potential has form U(r) = h*(1-((r-r0-w)/w)^2)^2 where r0 is the compact state separation, r0+2w is the extended state separation, and h is the barrier height. The WCA potential has form U(r) = 4 epsilon [ (sigma/r)^12 - (sigma/r)^6 ] + epsilon (r < r*) = 0 (r >= r*) where r* = 2^(1/6) sigma. OPTIONAL ARGUMENTS N (int) - total number of atoms (default: 2) density (float) - number density of particles (default: 0.96 / sigma**3) mass (simtk.unit.Quantity of mass) - particle mass (default: 39.948 amu) sigma (simtk.unit.Quantity of length) - Lennard-Jones sigma parameter (default: 0.3405 nm) epsilon (simtk.unit.Quantity of energy) - Lennard-Jones well depth (default: (119.8 Kelvin)*kB) h (simtk.unit.Quantity of energy) - bistable potential barrier height (default: ???) r0 (simtk.unit.Quantity of length) - bistable potential compact state separation (default: ???) w (simtk.unit.Quantity of length) - bistable potential extended state separation is r0+2*w (default: ???) """ # Choose OpenMM package. if mm is None: mm = openmm # Compute cutoff for WCA fluid. r_WCA = 2.**(1./6.) * sigma # cutoff at minimum of potential # Create system system = mm.System() # Compute total system volume. volume = N / density # Make system cubic in dimension. length = volume**(1.0/3.0) a = units.Quantity(numpy.array([1.0, 0.0, 0.0], numpy.float32), units.nanometer) * length/units.nanometer b = units.Quantity(numpy.array([0.0, 1.0, 0.0], numpy.float32), units.nanometer) * length/units.nanometer c = units.Quantity(numpy.array([0.0, 0.0, 1.0], numpy.float32), units.nanometer) * length/units.nanometer print "box edge length = %s" % str(length) system.setDefaultPeriodicBoxVectors(a, b, c) # Add particles to system. for n in range(N): system.addParticle(mass) # WCA: Lennard-Jones truncated at minim and shifted so potential is zero at cutoff. energy_expression = '4.0*epsilon*((sigma/r)^12 - (sigma/r)^6) + epsilon' # Create force. force = mm.CustomNonbondedForce(energy_expression) # Set epsilon and sigma global parameters. force.addGlobalParameter('epsilon', epsilon) force.addGlobalParameter('sigma', sigma) # Add particles for n in range(N): force.addParticle([]) # Add exclusion between bonded particles. force.addExclusion(0,1) # Set periodic boundary conditions with cutoff. if (N > 2): force.setNonbondedMethod(mm.CustomNonbondedForce.CutoffPeriodic) else: force.setNonbondedMethod(mm.CustomNonbondedForce.CutoffNonPeriodic) print "setting cutoff distance to %s" % str(r_WCA) force.setCutoffDistance(r_WCA) # Add nonbonded force term to the system. system.addForce(force) # Add dimer potential to first two particles. dimer_force = openmm.CustomBondForce('h*(1-((r-r0-w)/w)^2)^2;') dimer_force.addGlobalParameter('h', h) # barrier height dimer_force.addGlobalParameter('r0', r0) # compact state separation dimer_force.addGlobalParameter('w', w) # second minimum is at r0 + 2*w dimer_force.addBond(0, 1, []) system.addForce(dimer_force) # Create initial coordinates using random positions. coordinates = units.Quantity(numpy.random.rand(N,3), units.nanometer) * (length / units.nanometer) # Reposition dimer particles at compact minimum. coordinates[0,:] *= 0.0 coordinates[1,:] *= 0.0 coordinates[1,0] = r0 # Return system and coordinates. return [system, coordinates] #============================================================================================= # WCA dimer in vacuum #============================================================================================= def WCADimerVacuum(mm=None, mass=mass, epsilon=epsilon, sigma=sigma, h=h, r0=r0, w=w): """ Create a bistable dimer. OPTIONAL ARGUMENTS """ # Choose OpenMM package. if mm is None: mm = openmm # Create system system = mm.System() # Add particles to system. for n in range(2): system.addParticle(mass) # Add dimer potential to first two particles. dimer_force = openmm.CustomBondForce('h*(1-((r-r0-w)/w)^2)^2;') dimer_force.addGlobalParameter('h', h) # barrier height dimer_force.addGlobalParameter('r0', r0) # compact state separation dimer_force.addGlobalParameter('w', w) # second minimum is at r0 + 2*w dimer_force.addBond(0, 1, []) system.addForce(dimer_force) # Create initial coordinates using random positions. coordinates = units.Quantity(numpy.zeros([2,3], numpy.float64), units.nanometer) # Reposition dimer particles at compact minimum. coordinates[0,:] *= 0.0 coordinates[1,:] *= 0.0 coordinates[1,0] = r0 # Return system and coordinates. return [system, coordinates]
gpl-3.0
wolverineav/neutron
neutron/core_extensions/base.py
32
1533
# Copyright (c) 2015 Red Hat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six NETWORK = 'network' PORT = 'port' CORE_RESOURCES = [NETWORK, PORT] @six.add_metaclass(abc.ABCMeta) class CoreResourceExtension(object): @abc.abstractmethod def process_fields(self, context, resource_type, requested_resource, actual_resource): """Process extension fields. :param context: neutron api request context :param resource_type: core resource type (one of CORE_RESOURCES) :param requested_resource: resource dict that contains extension fields :param actual_resource: actual resource dict known to plugin """ @abc.abstractmethod def extract_fields(self, resource_type, resource): """Extract extension fields. :param resource_type: core resource type (one of CORE_RESOURCES) :param resource: resource dict that contains extension fields """
apache-2.0
40423105/2016fallcadp_hw
plugin/liquid_tags/graphviz.py
245
3198
""" GraphViz Tag --------- This implements a Liquid-style graphviz tag for Pelican. You can use different Graphviz programs like dot, neato, twopi etc. [1] [1] http://www.graphviz.org/ Syntax ------ {% graphviz <program> { <DOT code> } %} Examples -------- {% graphviz dot { digraph graphname { a -> b -> c; b -> d; } } %} {% graphviz twopi { <code goes here> } %} {% graphviz neato { <code goes here> } %} ... Output ------ <div class="graphviz" style="text-align: center;"><img src="data:image/png;base64,_BASE64_IMAGE DATA_/></div> """ import base64 import re from .mdx_liquid_tags import LiquidTags SYNTAX = '{% dot graphviz [program] [dot code] %}' DOT_BLOCK_RE = re.compile(r'^\s*(?P<program>\w+)\s*\{\s*(?P<code>.*\})\s*\}$', re.MULTILINE | re.DOTALL) def run_graphviz(program, code, options=[], format='png'): """ Runs graphviz programs and returns image data Copied from https://github.com/tkf/ipython-hierarchymagic/blob/master/hierarchymagic.py """ import os from subprocess import Popen, PIPE dot_args = [program] + options + ['-T', format] if os.name == 'nt': # Avoid opening shell window. # * https://github.com/tkf/ipython-hierarchymagic/issues/1 # * http://stackoverflow.com/a/2935727/727827 p = Popen(dot_args, stdout=PIPE, stdin=PIPE, stderr=PIPE, creationflags=0x08000000) else: p = Popen(dot_args, stdout=PIPE, stdin=PIPE, stderr=PIPE) wentwrong = False try: # Graphviz may close standard input when an error occurs, # resulting in a broken pipe on communicate() stdout, stderr = p.communicate(code.encode('utf-8')) except (OSError, IOError) as err: if err.errno != EPIPE: raise wentwrong = True except IOError as err: if err.errno != EINVAL: raise wentwrong = True if wentwrong: # in this case, read the standard output and standard error streams # directly, to get the error message(s) stdout, stderr = p.stdout.read(), p.stderr.read() p.wait() if p.returncode != 0: raise RuntimeError('dot exited with error:\n[stderr]\n{0}'.format(stderr.decode('utf-8'))) return stdout @LiquidTags.register('graphviz') def graphviz_parser(preprocessor, tag, markup): """ Simple Graphviz parser """ # Parse the markup string m = DOT_BLOCK_RE.search(markup) if m: # Get program and DOT code code = m.group('code') program = m.group('program').strip() # Run specified program with our markup output = run_graphviz(program, code) # Return Base64 encoded image return '<div class="graphviz" style="text-align: center;"><img src="data:image/png;base64,%s"></div>' % base64.b64encode(output) else: raise ValueError('Error processing input. ' 'Expected syntax: {0}'.format(SYNTAX)) #---------------------------------------------------------------------- # This import allows image tag to be a Pelican plugin from .liquid_tags import register
agpl-3.0
kbc-developers/android_kernel_samsung_jfdcm
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py
12980
5411
# SchedGui.py - Python extension for perf script, basic GUI code for # traces drawing and overview. # # Copyright (C) 2010 by Frederic Weisbecker <[email protected]> # # This software is distributed under the terms of the GNU General # Public License ("GPL") version 2 as published by the Free Software # Foundation. try: import wx except ImportError: raise ImportError, "You need to install the wxpython lib for this script" class RootFrame(wx.Frame): Y_OFFSET = 100 RECT_HEIGHT = 100 RECT_SPACE = 50 EVENT_MARKING_WIDTH = 5 def __init__(self, sched_tracer, title, parent = None, id = -1): wx.Frame.__init__(self, parent, id, title) (self.screen_width, self.screen_height) = wx.GetDisplaySize() self.screen_width -= 10 self.screen_height -= 10 self.zoom = 0.5 self.scroll_scale = 20 self.sched_tracer = sched_tracer self.sched_tracer.set_root_win(self) (self.ts_start, self.ts_end) = sched_tracer.interval() self.update_width_virtual() self.nr_rects = sched_tracer.nr_rectangles() + 1 self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)) # whole window panel self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height)) # scrollable container self.scroll = wx.ScrolledWindow(self.panel) self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale) self.scroll.EnableScrolling(True, True) self.scroll.SetFocus() # scrollable drawing area self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2)) self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint) self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press) self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down) self.scroll.Bind(wx.EVT_PAINT, self.on_paint) self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press) self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down) self.scroll.Fit() self.Fit() self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING) self.txt = None self.Show(True) def us_to_px(self, val): return val / (10 ** 3) * self.zoom def px_to_us(self, val): return (val / self.zoom) * (10 ** 3) def scroll_start(self): (x, y) = self.scroll.GetViewStart() return (x * self.scroll_scale, y * self.scroll_scale) def scroll_start_us(self): (x, y) = self.scroll_start() return self.px_to_us(x) def paint_rectangle_zone(self, nr, color, top_color, start, end): offset_px = self.us_to_px(start - self.ts_start) width_px = self.us_to_px(end - self.ts_start) offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)) width_py = RootFrame.RECT_HEIGHT dc = self.dc if top_color is not None: (r, g, b) = top_color top_color = wx.Colour(r, g, b) brush = wx.Brush(top_color, wx.SOLID) dc.SetBrush(brush) dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH) width_py -= RootFrame.EVENT_MARKING_WIDTH offset_py += RootFrame.EVENT_MARKING_WIDTH (r ,g, b) = color color = wx.Colour(r, g, b) brush = wx.Brush(color, wx.SOLID) dc.SetBrush(brush) dc.DrawRectangle(offset_px, offset_py, width_px, width_py) def update_rectangles(self, dc, start, end): start += self.ts_start end += self.ts_start self.sched_tracer.fill_zone(start, end) def on_paint(self, event): dc = wx.PaintDC(self.scroll_panel) self.dc = dc width = min(self.width_virtual, self.screen_width) (x, y) = self.scroll_start() start = self.px_to_us(x) end = self.px_to_us(x + width) self.update_rectangles(dc, start, end) def rect_from_ypixel(self, y): y -= RootFrame.Y_OFFSET rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE) height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE) if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT: return -1 return rect def update_summary(self, txt): if self.txt: self.txt.Destroy() self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50)) def on_mouse_down(self, event): (x, y) = event.GetPositionTuple() rect = self.rect_from_ypixel(y) if rect == -1: return t = self.px_to_us(x) + self.ts_start self.sched_tracer.mouse_down(rect, t) def update_width_virtual(self): self.width_virtual = self.us_to_px(self.ts_end - self.ts_start) def __zoom(self, x): self.update_width_virtual() (xpos, ypos) = self.scroll.GetViewStart() xpos = self.us_to_px(x) / self.scroll_scale self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos) self.Refresh() def zoom_in(self): x = self.scroll_start_us() self.zoom *= 2 self.__zoom(x) def zoom_out(self): x = self.scroll_start_us() self.zoom /= 2 self.__zoom(x) def on_key_press(self, event): key = event.GetRawKeyCode() if key == ord("+"): self.zoom_in() return if key == ord("-"): self.zoom_out() return key = event.GetKeyCode() (x, y) = self.scroll.GetViewStart() if key == wx.WXK_RIGHT: self.scroll.Scroll(x + 1, y) elif key == wx.WXK_LEFT: self.scroll.Scroll(x - 1, y) elif key == wx.WXK_DOWN: self.scroll.Scroll(x, y + 1) elif key == wx.WXK_UP: self.scroll.Scroll(x, y - 1)
gpl-2.0
Khilo84/PyQt4
pyuic/uic/Loader/loader.py
8
3049
############################################################################# ## ## Copyright (C) 2011 Riverbank Computing Limited. ## Copyright (C) 2006 Thorsten Marek. ## All right reserved. ## ## This file is part of PyQt. ## ## You may use this file under the terms of the GPL v2 or the revised BSD ## license as follows: ## ## "Redistribution and use in source and binary forms, with or without ## modification, are permitted provided that the following conditions are ## met: ## * Redistributions of source code must retain the above copyright ## notice, this list of conditions and the following disclaimer. ## * Redistributions in binary form must reproduce the above copyright ## notice, this list of conditions and the following disclaimer in ## the documentation and/or other materials provided with the ## distribution. ## * Neither the name of the Riverbank Computing Limited nor the names ## of its contributors may be used to endorse or promote products ## derived from this software without specific prior written ## permission. ## ## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE." ## ############################################################################# import os.path from PyQt4 import QtGui, QtCore from PyQt4.uic.uiparser import UIParser from PyQt4.uic.Loader.qobjectcreator import LoaderCreatorPolicy class DynamicUILoader(UIParser): def __init__(self): UIParser.__init__(self, QtCore, QtGui, LoaderCreatorPolicy()) def createToplevelWidget(self, classname, widgetname): if self.toplevelInst is not None: if not isinstance(self.toplevelInst, self.factory.findQObjectType(classname)): raise TypeError(("Wrong base class of toplevel widget", (type(self.toplevelInst), classname))) return self.toplevelInst else: return self.factory.createQObject(classname, widgetname, ()) def loadUi(self, filename, toplevelInst=None): self.toplevelInst = toplevelInst if hasattr(filename, 'read'): basedir = '' else: # Allow the filename to be a QString. filename = str(filename) basedir = os.path.dirname(filename) return self.parse(filename, basedir)
gpl-2.0
c86j224s/snippet
Python_Pygments/lib/python3.6/site-packages/pygments/lexers/ooc.py
31
2999
# -*- coding: utf-8 -*- """ pygments.lexers.ooc ~~~~~~~~~~~~~~~~~~~ Lexers for the Ooc language. :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer, bygroups, words from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ Number, Punctuation __all__ = ['OocLexer'] class OocLexer(RegexLexer): """ For `Ooc <http://ooc-lang.org/>`_ source code .. versionadded:: 1.2 """ name = 'Ooc' aliases = ['ooc'] filenames = ['*.ooc'] mimetypes = ['text/x-ooc'] tokens = { 'root': [ (words(( 'class', 'interface', 'implement', 'abstract', 'extends', 'from', 'this', 'super', 'new', 'const', 'final', 'static', 'import', 'use', 'extern', 'inline', 'proto', 'break', 'continue', 'fallthrough', 'operator', 'if', 'else', 'for', 'while', 'do', 'switch', 'case', 'as', 'in', 'version', 'return', 'true', 'false', 'null'), prefix=r'\b', suffix=r'\b'), Keyword), (r'include\b', Keyword, 'include'), (r'(cover)([ \t]+)(from)([ \t]+)(\w+[*@]?)', bygroups(Keyword, Text, Keyword, Text, Name.Class)), (r'(func)((?:[ \t]|\\\n)+)(~[a-z_]\w*)', bygroups(Keyword, Text, Name.Function)), (r'\bfunc\b', Keyword), # Note: %= and ^= not listed on http://ooc-lang.org/syntax (r'//.*', Comment), (r'(?s)/\*.*?\*/', Comment.Multiline), (r'(==?|\+=?|-[=>]?|\*=?|/=?|:=|!=?|%=?|\?|>{1,3}=?|<{1,3}=?|\.\.|' r'&&?|\|\|?|\^=?)', Operator), (r'(\.)([ \t]*)([a-z]\w*)', bygroups(Operator, Text, Name.Function)), (r'[A-Z][A-Z0-9_]+', Name.Constant), (r'[A-Z]\w*([@*]|\[[ \t]*\])?', Name.Class), (r'([a-z]\w*(?:~[a-z]\w*)?)((?:[ \t]|\\\n)*)(?=\()', bygroups(Name.Function, Text)), (r'[a-z]\w*', Name.Variable), # : introduces types (r'[:(){}\[\];,]', Punctuation), (r'0x[0-9a-fA-F]+', Number.Hex), (r'0c[0-9]+', Number.Oct), (r'0b[01]+', Number.Bin), (r'[0-9_]\.[0-9_]*(?!\.)', Number.Float), (r'[0-9_]+', Number.Decimal), (r'"(?:\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\"])*"', String.Double), (r"'(?:\\.|\\[0-9]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char), (r'@', Punctuation), # pointer dereference (r'\.', Punctuation), # imports or chain operator (r'\\[ \t\n]', Text), (r'[ \t]+', Text), ], 'include': [ (r'[\w/]+', Name), (r',', Punctuation), (r'[ \t]', Text), (r'[;\n]', Text, '#pop'), ], }
apache-2.0
mytliulei/DCNRobotInstallPackages
windows/win32/pygal-1.7.0/pygal/test/test_pie.py
4
2015
# -*- coding: utf-8 -*- # This file is part of pygal # # A python svg graph plotting library # Copyright © 2012-2014 Kozea # # This library is free software: you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the Free # Software Foundation, either version 3 of the License, or (at your option) any # later version. # # This library is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # details. # # You should have received a copy of the GNU Lesser General Public License # along with pygal. If not, see <http://www.gnu.org/licenses/>. import os import uuid from pygal import Pie def test_donut(): chart = Pie(inner_radius=.3, pretty_print=True) chart.title = 'Browser usage in February 2012 (in %)' chart.add('IE', 19.5) chart.add('Firefox', 36.6) chart.add('Chrome', 36.3) chart.add('Safari', 4.5) chart.add('Opera', 2.3) assert chart.render() def test_multiseries_donut(): # this just demos that the multiseries pie does not respect # the inner_radius chart = Pie(inner_radius=.3, pretty_print=True) chart.title = 'Browser usage by version in February 2012 (in %)' chart.add('IE', [5.7, 10.2, 2.6, 1]) chart.add('Firefox', [.6, 16.8, 7.4, 2.2, 1.2, 1, 1, 1.1, 4.3, 1]) chart.add('Chrome', [.3, .9, 17.1, 15.3, .6, .5, 1.6]) chart.add('Safari', [4.4, .1]) chart.add('Opera', [.1, 1.6, .1, .5]) assert chart.render() def test_half_pie(): pie = Pie() pie.add('IE', 19.5) pie.add('Firefox', 36.6) pie.add('Chrome', 36.3) pie.add('Safari', 4.5) pie.add('Opera', 2.3) half = Pie(half_pie=True) half.add('IE', 19.5) half.add('Firefox', 36.6) half.add('Chrome', 36.3) half.add('Safari', 4.5) half.add('Opera', 2.3) assert pie.render() != half.render()
apache-2.0
pbarton666/buzz_bot
djangoproj/djangoapp/csc/webapi/urls.py
1
2618
from django.conf.urls.defaults import * from piston.resource import Resource from csc.webapi.docs import documentation_view from csc.webapi.handlers import * # This gives a way to accept "query.foo" on the end of the URL to set the # format to 'foo'. "?format=foo" works as well. Q = r'(query\.(?P<emitter_format>.+))?$' urlpatterns = patterns('', url(r'^(?P<lang>.+)/concept/(?P<concept>[^/]*)/'+Q, Resource(ConceptHandler), name='concept_handler'), url(r'^(?P<lang>.+)/concept/(?P<concept>[^/]*)/assertions/'+Q, Resource(ConceptAssertionHandler), name='concept_assertion_handler_default'), url(r'^(?P<lang>.+)/concept/(?P<concept>[^/]*)/assertions/limit:(?P<limit>[0-9]+)/'+Q, Resource(ConceptAssertionHandler), name='concept_assertion_handler'), url(r'^(?P<lang>.+)/concept/(?P<concept>[^/]*)/surfaceforms/'+Q, Resource(ConceptSurfaceHandler), name='concept_surface_handler_default'), url(r'^(?P<lang>.+)/concept/(?P<concept>[^/]*)/surfaceforms/limit:(?P<limit>[0-9]+)/'+Q, Resource(ConceptSurfaceHandler), name='concept_surface_handler'), url(r'^(?P<lang>.+)/(?P<dir>left|right)feature/(?P<relation>[^/]+)/(?P<concept>[^/]+)/'+Q, Resource(FeatureQueryHandler), name='feature_query_handler_default'), url(r'^(?P<lang>.+)/(?P<dir>left|right)feature/(?P<relation>[^/]+)/(?P<concept>[^/]+)/limit:(?P<limit>[0-9]+)/'+Q, Resource(FeatureQueryHandler), name='feature_query_handler'), url(r'^(?P<lang>.+)/(?P<type>.+)/(?P<id>[0-9]+)/votes/'+Q, Resource(RatedObjectHandler), name='rated_object_handler'), url(r'^(?P<lang>.+)/surface/(?P<text>.+)/'+Q, Resource(SurfaceFormHandler), name='surface_form_handler'), url(r'^(?P<lang>.+)/frame/(?P<id>[0-9]+)/'+Q, Resource(FrameHandler), name='frame_handler'), url(r'^(?P<lang>.+)/frame/(?P<id>[0-9]+)/statements/'+Q, Resource(RawAssertionByFrameHandler), name='raw_assertion_by_frame_handler_default'), url(r'^(?P<lang>.+)/frame/(?P<id>[0-9]+)/statements/limit:(?P<limit>[0-9]+)/'+Q, Resource(RawAssertionByFrameHandler), name='raw_assertion_by_frame_handler'), url(r'^(?P<lang>.+)/assertion/(?P<id>[0-9]+)/'+Q, Resource(AssertionHandler), name='assertion_handler'), url(r'^(?P<lang>.+)/raw_assertion/(?P<id>[0-9]+)/'+Q, Resource(RawAssertionHandler), name='raw_assertion_handler'), url(r'^(?P<lang>.+)/frequency/(?P<text>[^/]*)/'+Q, Resource(FrequencyHandler), name='frequency_handler'), url(r'docs.txt$', documentation_view, name='documentation_view') ) # :vim:tw=0:
mit
s0930342674/pyload
module/plugins/container/RSDF.py
15
1699
# -*- coding: utf-8 -*- from __future__ import with_statement import binascii import re from Crypto.Cipher import AES from module.plugins.internal.Container import Container from module.utils import fs_encode class RSDF(Container): __name__ = "RSDF" __type__ = "container" __version__ = "0.31" __status__ = "testing" __pattern__ = r'.+\.rsdf$' __description__ = """RSDF container decrypter plugin""" __license__ = "GPLv3" __authors__ = [("RaNaN", "[email protected]"), ("spoob", "[email protected]"), ("Walter Purcaro", "[email protected]")] KEY = "8C35192D964DC3182C6F84F3252239EB4A320D2500000000" IV = "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF" def decrypt(self, pyfile): KEY = binascii.unhexlify(self.KEY) IV = binascii.unhexlify(self.IV) iv = AES.new(KEY, AES.MODE_ECB).encrypt(IV) cipher = AES.new(KEY, AES.MODE_CFB, iv) try: fs_filename = fs_encode(pyfile.url.strip()) with open(fs_filename, 'r') as rsdf: data = rsdf.read() except IOError, e: self.fail(e) if re.search(r"<title>404 - Not Found</title>", data): pyfile.setStatus("offline") else: try: raw_links = binascii.unhexlify(''.join(data.split())).splitlines() except TypeError: self.fail(_("Container is corrupted")) for link in raw_links: if not link: continue link = cipher.decrypt(link.decode('base64')).replace('CCF: ', '') self.urls.append(link)
gpl-3.0
takis/django
django/contrib/gis/geoip2/base.py
335
9054
import os import socket import geoip2.database from django.conf import settings from django.core.validators import ipv4_re from django.utils import six from django.utils.ipv6 import is_valid_ipv6_address from .resources import City, Country # Creating the settings dictionary with any settings, if needed. GEOIP_SETTINGS = { 'GEOIP_PATH': getattr(settings, 'GEOIP_PATH', None), 'GEOIP_CITY': getattr(settings, 'GEOIP_CITY', 'GeoLite2-City.mmdb'), 'GEOIP_COUNTRY': getattr(settings, 'GEOIP_COUNTRY', 'GeoLite2-Country.mmdb'), } class GeoIP2Exception(Exception): pass class GeoIP2(object): # The flags for GeoIP memory caching. # Try MODE_MMAP_EXT, MODE_MMAP, MODE_FILE in that order. MODE_AUTO = 0 # Use the C extension with memory map. MODE_MMAP_EXT = 1 # Read from memory map. Pure Python. MODE_MMAP = 2 # Read database as standard file. Pure Python. MODE_FILE = 4 # Load database into memory. Pure Python. MODE_MEMORY = 8 cache_options = {opt: None for opt in (0, 1, 2, 4, 8)} # Paths to the city & country binary databases. _city_file = '' _country_file = '' # Initially, pointers to GeoIP file references are NULL. _city = None _country = None def __init__(self, path=None, cache=0, country=None, city=None): """ Initialize the GeoIP object. No parameters are required to use default settings. Keyword arguments may be passed in to customize the locations of the GeoIP datasets. * path: Base directory to where GeoIP data is located or the full path to where the city or country data files (*.mmdb) are located. Assumes that both the city and country data sets are located in this directory; overrides the GEOIP_PATH setting. * cache: The cache settings when opening up the GeoIP datasets. May be an integer in (0, 1, 2, 4, 8) corresponding to the MODE_AUTO, MODE_MMAP_EXT, MODE_MMAP, MODE_FILE, and MODE_MEMORY, `GeoIPOptions` C API settings, respectively. Defaults to 0, meaning MODE_AUTO. * country: The name of the GeoIP country data file. Defaults to 'GeoLite2-Country.mmdb'; overrides the GEOIP_COUNTRY setting. * city: The name of the GeoIP city data file. Defaults to 'GeoLite2-City.mmdb'; overrides the GEOIP_CITY setting. """ # Checking the given cache option. if cache in self.cache_options: self._cache = cache else: raise GeoIP2Exception('Invalid GeoIP caching option: %s' % cache) # Getting the GeoIP data path. if not path: path = GEOIP_SETTINGS['GEOIP_PATH'] if not path: raise GeoIP2Exception('GeoIP path must be provided via parameter or the GEOIP_PATH setting.') if not isinstance(path, six.string_types): raise TypeError('Invalid path type: %s' % type(path).__name__) if os.path.isdir(path): # Constructing the GeoIP database filenames using the settings # dictionary. If the database files for the GeoLite country # and/or city datasets exist, then try to open them. country_db = os.path.join(path, country or GEOIP_SETTINGS['GEOIP_COUNTRY']) if os.path.isfile(country_db): self._country = geoip2.database.Reader(country_db, mode=cache) self._country_file = country_db city_db = os.path.join(path, city or GEOIP_SETTINGS['GEOIP_CITY']) if os.path.isfile(city_db): self._city = geoip2.database.Reader(city_db, mode=cache) self._city_file = city_db elif os.path.isfile(path): # Otherwise, some detective work will be needed to figure out # whether the given database path is for the GeoIP country or city # databases. reader = geoip2.database.Reader(path, mode=cache) db_type = reader.metadata().database_type if db_type.endswith('City'): # GeoLite City database detected. self._city = reader self._city_file = path elif db_type.endswith('Country'): # GeoIP Country database detected. self._country = reader self._country_file = path else: raise GeoIP2Exception('Unable to recognize database edition: %s' % db_type) else: raise GeoIP2Exception('GeoIP path must be a valid file or directory.') @property def _reader(self): if self._country: return self._country else: return self._city @property def _country_or_city(self): if self._country: return self._country.country else: return self._city.city def __del__(self): # Cleanup any GeoIP file handles lying around. if self._reader: self._reader.close() def __repr__(self): meta = self._reader.metadata() version = '[v%s.%s]' % (meta.binary_format_major_version, meta.binary_format_minor_version) return '<%(cls)s %(version)s _country_file="%(country)s", _city_file="%(city)s">' % { 'cls': self.__class__.__name__, 'version': version, 'country': self._country_file, 'city': self._city_file, } def _check_query(self, query, country=False, city=False, city_or_country=False): "Helper routine for checking the query and database availability." # Making sure a string was passed in for the query. if not isinstance(query, six.string_types): raise TypeError('GeoIP query must be a string, not type %s' % type(query).__name__) # Extra checks for the existence of country and city databases. if city_or_country and not (self._country or self._city): raise GeoIP2Exception('Invalid GeoIP country and city data files.') elif country and not self._country: raise GeoIP2Exception('Invalid GeoIP country data file: %s' % self._country_file) elif city and not self._city: raise GeoIP2Exception('Invalid GeoIP city data file: %s' % self._city_file) # Return the query string back to the caller. GeoIP2 only takes IP addresses. if not (ipv4_re.match(query) or is_valid_ipv6_address(query)): query = socket.gethostbyname(query) return query def city(self, query): """ Return a dictionary of city information for the given IP address or Fully Qualified Domain Name (FQDN). Some information in the dictionary may be undefined (None). """ enc_query = self._check_query(query, city=True) return City(self._city.city(enc_query)) def country_code(self, query): "Return the country code for the given IP Address or FQDN." enc_query = self._check_query(query, city_or_country=True) return self.country(enc_query)['country_code'] def country_name(self, query): "Return the country name for the given IP Address or FQDN." enc_query = self._check_query(query, city_or_country=True) return self.country(enc_query)['country_name'] def country(self, query): """ Return a dictionary with the country code and name when given an IP address or a Fully Qualified Domain Name (FQDN). For example, both '24.124.1.80' and 'djangoproject.com' are valid parameters. """ # Returning the country code and name enc_query = self._check_query(query, city_or_country=True) return Country(self._country_or_city(enc_query)) # #### Coordinate retrieval routines #### def coords(self, query, ordering=('longitude', 'latitude')): cdict = self.city(query) if cdict is None: return None else: return tuple(cdict[o] for o in ordering) def lon_lat(self, query): "Return a tuple of the (longitude, latitude) for the given query." return self.coords(query) def lat_lon(self, query): "Return a tuple of the (latitude, longitude) for the given query." return self.coords(query, ('latitude', 'longitude')) def geos(self, query): "Return a GEOS Point object for the given query." ll = self.lon_lat(query) if ll: from django.contrib.gis.geos import Point return Point(ll, srid=4326) else: return None # #### GeoIP Database Information Routines #### @property def info(self): "Return information about the GeoIP library and databases in use." meta = self._reader.metadata() return 'GeoIP Library:\n\t%s.%s\n' % (meta.binary_format_major_version, meta.binary_format_minor_version) @classmethod def open(cls, full_path, cache): return GeoIP2(full_path, cache)
bsd-3-clause
vbarrielle/cppChan
3rdparty/gtest-1.6.0/test/gtest_break_on_failure_unittest.py
1050
7214
#!/usr/bin/env python # # Copyright 2006, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit test for Google Test's break-on-failure mode. A user can ask Google Test to seg-fault when an assertion fails, using either the GTEST_BREAK_ON_FAILURE environment variable or the --gtest_break_on_failure flag. This script tests such functionality by invoking gtest_break_on_failure_unittest_ (a program written with Google Test) with different environments and command line flags. """ __author__ = '[email protected] (Zhanyong Wan)' import gtest_test_utils import os import sys # Constants. IS_WINDOWS = os.name == 'nt' # The environment variable for enabling/disabling the break-on-failure mode. BREAK_ON_FAILURE_ENV_VAR = 'GTEST_BREAK_ON_FAILURE' # The command line flag for enabling/disabling the break-on-failure mode. BREAK_ON_FAILURE_FLAG = 'gtest_break_on_failure' # The environment variable for enabling/disabling the throw-on-failure mode. THROW_ON_FAILURE_ENV_VAR = 'GTEST_THROW_ON_FAILURE' # The environment variable for enabling/disabling the catch-exceptions mode. CATCH_EXCEPTIONS_ENV_VAR = 'GTEST_CATCH_EXCEPTIONS' # Path to the gtest_break_on_failure_unittest_ program. EXE_PATH = gtest_test_utils.GetTestExecutablePath( 'gtest_break_on_failure_unittest_') # Utilities. environ = os.environ.copy() def SetEnvVar(env_var, value): """Sets an environment variable to a given value; unsets it when the given value is None. """ if value is not None: environ[env_var] = value elif env_var in environ: del environ[env_var] def Run(command): """Runs a command; returns 1 if it was killed by a signal, or 0 otherwise.""" p = gtest_test_utils.Subprocess(command, env=environ) if p.terminated_by_signal: return 1 else: return 0 # The tests. class GTestBreakOnFailureUnitTest(gtest_test_utils.TestCase): """Tests using the GTEST_BREAK_ON_FAILURE environment variable or the --gtest_break_on_failure flag to turn assertion failures into segmentation faults. """ def RunAndVerify(self, env_var_value, flag_value, expect_seg_fault): """Runs gtest_break_on_failure_unittest_ and verifies that it does (or does not) have a seg-fault. Args: env_var_value: value of the GTEST_BREAK_ON_FAILURE environment variable; None if the variable should be unset. flag_value: value of the --gtest_break_on_failure flag; None if the flag should not be present. expect_seg_fault: 1 if the program is expected to generate a seg-fault; 0 otherwise. """ SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, env_var_value) if env_var_value is None: env_var_value_msg = ' is not set' else: env_var_value_msg = '=' + env_var_value if flag_value is None: flag = '' elif flag_value == '0': flag = '--%s=0' % BREAK_ON_FAILURE_FLAG else: flag = '--%s' % BREAK_ON_FAILURE_FLAG command = [EXE_PATH] if flag: command.append(flag) if expect_seg_fault: should_or_not = 'should' else: should_or_not = 'should not' has_seg_fault = Run(command) SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, None) msg = ('when %s%s, an assertion failure in "%s" %s cause a seg-fault.' % (BREAK_ON_FAILURE_ENV_VAR, env_var_value_msg, ' '.join(command), should_or_not)) self.assert_(has_seg_fault == expect_seg_fault, msg) def testDefaultBehavior(self): """Tests the behavior of the default mode.""" self.RunAndVerify(env_var_value=None, flag_value=None, expect_seg_fault=0) def testEnvVar(self): """Tests using the GTEST_BREAK_ON_FAILURE environment variable.""" self.RunAndVerify(env_var_value='0', flag_value=None, expect_seg_fault=0) self.RunAndVerify(env_var_value='1', flag_value=None, expect_seg_fault=1) def testFlag(self): """Tests using the --gtest_break_on_failure flag.""" self.RunAndVerify(env_var_value=None, flag_value='0', expect_seg_fault=0) self.RunAndVerify(env_var_value=None, flag_value='1', expect_seg_fault=1) def testFlagOverridesEnvVar(self): """Tests that the flag overrides the environment variable.""" self.RunAndVerify(env_var_value='0', flag_value='0', expect_seg_fault=0) self.RunAndVerify(env_var_value='0', flag_value='1', expect_seg_fault=1) self.RunAndVerify(env_var_value='1', flag_value='0', expect_seg_fault=0) self.RunAndVerify(env_var_value='1', flag_value='1', expect_seg_fault=1) def testBreakOnFailureOverridesThrowOnFailure(self): """Tests that gtest_break_on_failure overrides gtest_throw_on_failure.""" SetEnvVar(THROW_ON_FAILURE_ENV_VAR, '1') try: self.RunAndVerify(env_var_value=None, flag_value='1', expect_seg_fault=1) finally: SetEnvVar(THROW_ON_FAILURE_ENV_VAR, None) if IS_WINDOWS: def testCatchExceptionsDoesNotInterfere(self): """Tests that gtest_catch_exceptions doesn't interfere.""" SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, '1') try: self.RunAndVerify(env_var_value='1', flag_value='1', expect_seg_fault=1) finally: SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, None) if __name__ == '__main__': gtest_test_utils.Main()
mit
yawnosnorous/python-for-android
python-modules/twisted/twisted/web/test/test_error.py
52
5433
# Copyright (c) 2010 Twisted Matrix Laboratories. # See LICENSE for details. """ HTTP errors. """ from twisted.trial import unittest from twisted.web import error class ErrorTestCase(unittest.TestCase): """ Tests for how L{Error} attributes are initialized. """ def test_noMessageValidStatus(self): """ If no C{message} argument is passed to the L{Error} constructor and the C{code} argument is a valid HTTP status code, C{code} is mapped to a descriptive string to which C{message} is assigned. """ e = error.Error("200") self.assertEquals(e.message, "OK") def test_noMessageInvalidStatus(self): """ If no C{message} argument is passed to the L{Error} constructor and C{code} isn't a valid HTTP status code, C{message} stays C{None}. """ e = error.Error("InvalidCode") self.assertEquals(e.message, None) def test_messageExists(self): """ If a C{message} argument is passed to the L{Error} constructor, the C{message} isn't affected by the value of C{status}. """ e = error.Error("200", "My own message") self.assertEquals(e.message, "My own message") class PageRedirectTestCase(unittest.TestCase): """ Tests for how L{PageRedirect} attributes are initialized. """ def test_noMessageValidStatus(self): """ If no C{message} argument is passed to the L{PageRedirect} constructor and the C{code} argument is a valid HTTP status code, C{code} is mapped to a descriptive string to which C{message} is assigned. """ e = error.PageRedirect("200", location="/foo") self.assertEquals(e.message, "OK to /foo") def test_noMessageValidStatusNoLocation(self): """ If no C{message} argument is passed to the L{PageRedirect} constructor and C{location} is also empty and the C{code} argument is a valid HTTP status code, C{code} is mapped to a descriptive string to which C{message} is assigned without trying to include an empty location. """ e = error.PageRedirect("200") self.assertEquals(e.message, "OK") def test_noMessageInvalidStatusLocationExists(self): """ If no C{message} argument is passed to the L{PageRedirect} constructor and C{code} isn't a valid HTTP status code, C{message} stays C{None}. """ e = error.PageRedirect("InvalidCode", location="/foo") self.assertEquals(e.message, None) def test_messageExistsLocationExists(self): """ If a C{message} argument is passed to the L{PageRedirect} constructor, the C{message} isn't affected by the value of C{status}. """ e = error.PageRedirect("200", "My own message", location="/foo") self.assertEquals(e.message, "My own message to /foo") def test_messageExistsNoLocation(self): """ If a C{message} argument is passed to the L{PageRedirect} constructor and no location is provided, C{message} doesn't try to include the empty location. """ e = error.PageRedirect("200", "My own message") self.assertEquals(e.message, "My own message") class InfiniteRedirectionTestCase(unittest.TestCase): """ Tests for how L{InfiniteRedirection} attributes are initialized. """ def test_noMessageValidStatus(self): """ If no C{message} argument is passed to the L{InfiniteRedirection} constructor and the C{code} argument is a valid HTTP status code, C{code} is mapped to a descriptive string to which C{message} is assigned. """ e = error.InfiniteRedirection("200", location="/foo") self.assertEquals(e.message, "OK to /foo") def test_noMessageValidStatusNoLocation(self): """ If no C{message} argument is passed to the L{InfiniteRedirection} constructor and C{location} is also empty and the C{code} argument is a valid HTTP status code, C{code} is mapped to a descriptive string to which C{message} is assigned without trying to include an empty location. """ e = error.InfiniteRedirection("200") self.assertEquals(e.message, "OK") def test_noMessageInvalidStatusLocationExists(self): """ If no C{message} argument is passed to the L{InfiniteRedirection} constructor and C{code} isn't a valid HTTP status code, C{message} stays C{None}. """ e = error.InfiniteRedirection("InvalidCode", location="/foo") self.assertEquals(e.message, None) def test_messageExistsLocationExists(self): """ If a C{message} argument is passed to the L{InfiniteRedirection} constructor, the C{message} isn't affected by the value of C{status}. """ e = error.InfiniteRedirection("200", "My own message", location="/foo") self.assertEquals(e.message, "My own message to /foo") def test_messageExistsNoLocation(self): """ If a C{message} argument is passed to the L{InfiniteRedirection} constructor and no location is provided, C{message} doesn't try to include the empty location. """ e = error.InfiniteRedirection("200", "My own message") self.assertEquals(e.message, "My own message")
apache-2.0
ralphtheninja/cjdns
node_build/dependencies/libuv/build/gyp/test/win/gyptest-link-pgo.py
239
2993
#!/usr/bin/env python # Copyright (c) 2013 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Make sure PGO is working properly. """ import TestGyp import os import sys if sys.platform == 'win32': test = TestGyp.TestGyp(formats=['msvs', 'ninja']) CHDIR = 'linker-flags' test.run_gyp('pgo.gyp', chdir=CHDIR) def IsPGOAvailable(): """Returns true if the Visual Studio available here supports PGO.""" test.build('pgo.gyp', 'gen_linker_option', chdir=CHDIR) tmpfile = test.read(test.built_file_path('linker_options.txt', chdir=CHDIR)) return any(line.find('PGOPTIMIZE') for line in tmpfile) # Test generated build files look fine. if test.format == 'ninja': ninja = test.built_file_path('obj/test_pgo_instrument.ninja', chdir=CHDIR) test.must_contain(ninja, '/LTCG:PGINSTRUMENT') test.must_contain(ninja, 'test_pgo.pgd') ninja = test.built_file_path('obj/test_pgo_optimize.ninja', chdir=CHDIR) test.must_contain(ninja, '/LTCG:PGOPTIMIZE') test.must_contain(ninja, 'test_pgo.pgd') ninja = test.built_file_path('obj/test_pgo_update.ninja', chdir=CHDIR) test.must_contain(ninja, '/LTCG:PGUPDATE') test.must_contain(ninja, 'test_pgo.pgd') elif test.format == 'msvs': LTCG_FORMAT = '<LinkTimeCodeGeneration>%s</LinkTimeCodeGeneration>' vcproj = test.workpath('linker-flags/test_pgo_instrument.vcxproj') test.must_contain(vcproj, LTCG_FORMAT % 'PGInstrument') test.must_contain(vcproj, 'test_pgo.pgd') vcproj = test.workpath('linker-flags/test_pgo_optimize.vcxproj') test.must_contain(vcproj, LTCG_FORMAT % 'PGOptimization') test.must_contain(vcproj, 'test_pgo.pgd') vcproj = test.workpath('linker-flags/test_pgo_update.vcxproj') test.must_contain(vcproj, LTCG_FORMAT % 'PGUpdate') test.must_contain(vcproj, 'test_pgo.pgd') # When PGO is available, try building binaries with PGO. if IsPGOAvailable(): pgd_path = test.built_file_path('test_pgo.pgd', chdir=CHDIR) # Test if 'PGInstrument' generates PGD (Profile-Guided Database) file. if os.path.exists(pgd_path): test.unlink(pgd_path) test.must_not_exist(pgd_path) test.build('pgo.gyp', 'test_pgo_instrument', chdir=CHDIR) test.must_exist(pgd_path) # Test if 'PGOptimize' works well test.build('pgo.gyp', 'test_pgo_optimize', chdir=CHDIR) test.must_contain_any_line(test.stdout(), ['profiled functions']) # Test if 'PGUpdate' works well test.build('pgo.gyp', 'test_pgo_update', chdir=CHDIR) # With 'PGUpdate', linker should not complain that sources are changed after # the previous training run. test.touch(test.workpath('linker-flags/inline_test_main.cc')) test.unlink(test.built_file_path('test_pgo_update.exe', chdir=CHDIR)) test.build('pgo.gyp', 'test_pgo_update', chdir=CHDIR) test.must_contain_any_line(test.stdout(), ['profiled functions']) test.pass_test()
gpl-3.0
Plexxi/st2
st2common/tests/unit/services/test_workflow.py
3
18346
# Copyright 2020 The StackStorm Authors. # Copyright 2019 Extreme Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import import copy import mock from orquesta import exceptions as orquesta_exc from orquesta.specs import loader as specs_loader from orquesta import statuses as wf_statuses import st2tests import st2tests.config as tests_config tests_config.parse_args() from st2common.bootstrap import actionsregistrar from st2common.bootstrap import runnersregistrar from st2common.exceptions import action as action_exc from st2common.models.db import liveaction as lv_db_models from st2common.models.db import execution as ex_db_models from st2common.models.db import pack as pk_db_models from st2common.persistence import execution as ex_db_access from st2common.persistence import pack as pk_db_access from st2common.persistence import workflow as wf_db_access from st2common.services import action as action_service from st2common.services import workflows as workflow_service from st2common.transport import liveaction as lv_ac_xport from st2common.transport import publishers from st2tests.mocks import liveaction as mock_lv_ac_xport TEST_PACK = "orquesta_tests" TEST_PACK_PATH = ( st2tests.fixturesloader.get_fixtures_packs_base_path() + "/" + TEST_PACK ) PACK_7 = "dummy_pack_7" PACK_7_PATH = st2tests.fixturesloader.get_fixtures_packs_base_path() + "/" + PACK_7 PACKS = [ TEST_PACK_PATH, PACK_7_PATH, st2tests.fixturesloader.get_fixtures_packs_base_path() + "/core", ] @mock.patch.object( publishers.CUDPublisher, "publish_update", mock.MagicMock(return_value=None) ) @mock.patch.object( publishers.CUDPublisher, "publish_create", mock.MagicMock(side_effect=mock_lv_ac_xport.MockLiveActionPublisher.publish_create), ) @mock.patch.object( lv_ac_xport.LiveActionPublisher, "publish_state", mock.MagicMock(side_effect=mock_lv_ac_xport.MockLiveActionPublisher.publish_state), ) class WorkflowExecutionServiceTest(st2tests.WorkflowTestCase): @classmethod def setUpClass(cls): super(WorkflowExecutionServiceTest, cls).setUpClass() # Register runners. runnersregistrar.register_runners() # Register test pack(s). actions_registrar = actionsregistrar.ActionsRegistrar( use_pack_cache=False, fail_on_failure=True ) for pack in PACKS: actions_registrar.register_from_pack(pack) def test_request(self): wf_meta = self.get_wf_fixture_meta_data(TEST_PACK_PATH, "sequential.yaml") # Manually create the liveaction and action execution objects without publishing. lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"]) lv_ac_db, ac_ex_db = action_service.create_request(lv_ac_db) # Request the workflow execution. wf_def = self.get_wf_def(TEST_PACK_PATH, wf_meta) st2_ctx = self.mock_st2_context(ac_ex_db) wf_ex_db = workflow_service.request(wf_def, ac_ex_db, st2_ctx) # Check workflow execution is saved to the database. wf_ex_dbs = wf_db_access.WorkflowExecution.query( action_execution=str(ac_ex_db.id) ) self.assertEqual(len(wf_ex_dbs), 1) # Check required attributes. wf_ex_db = wf_ex_dbs[0] self.assertIsNotNone(wf_ex_db.id) self.assertGreater(wf_ex_db.rev, 0) self.assertEqual(wf_ex_db.action_execution, str(ac_ex_db.id)) self.assertEqual(wf_ex_db.status, wf_statuses.REQUESTED) def test_request_with_input(self): wf_meta = self.get_wf_fixture_meta_data(TEST_PACK_PATH, "sequential.yaml") # Manually create the liveaction and action execution objects without publishing. lv_ac_db = lv_db_models.LiveActionDB( action=wf_meta["name"], parameters={"who": "stan"} ) lv_ac_db, ac_ex_db = action_service.create_request(lv_ac_db) # Request the workflow execution. wf_def = self.get_wf_def(TEST_PACK_PATH, wf_meta) st2_ctx = self.mock_st2_context(ac_ex_db) wf_ex_db = workflow_service.request(wf_def, ac_ex_db, st2_ctx) # Check workflow execution is saved to the database. wf_ex_dbs = wf_db_access.WorkflowExecution.query( action_execution=str(ac_ex_db.id) ) self.assertEqual(len(wf_ex_dbs), 1) # Check required attributes. wf_ex_db = wf_ex_dbs[0] self.assertIsNotNone(wf_ex_db.id) self.assertGreater(wf_ex_db.rev, 0) self.assertEqual(wf_ex_db.action_execution, str(ac_ex_db.id)) self.assertEqual(wf_ex_db.status, wf_statuses.REQUESTED) # Check input and context. expected_input = {"who": "stan"} self.assertDictEqual(wf_ex_db.input, expected_input) def test_request_bad_action(self): wf_meta = self.get_wf_fixture_meta_data(TEST_PACK_PATH, "sequential.yaml") # Manually create the action execution object with the bad action. ac_ex_db = ex_db_models.ActionExecutionDB( action={"ref": "mock.foobar"}, runner={"name": "foobar"} ) # Request the workflow execution. self.assertRaises( action_exc.InvalidActionReferencedException, workflow_service.request, self.get_wf_def(TEST_PACK_PATH, wf_meta), ac_ex_db, self.mock_st2_context(ac_ex_db), ) def test_request_wf_def_with_bad_action_ref(self): wf_meta = self.get_wf_fixture_meta_data( TEST_PACK_PATH, "fail-inspection-action-ref.yaml" ) # Manually create the liveaction and action execution objects without publishing. lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"]) lv_ac_db, ac_ex_db = action_service.create_request(lv_ac_db) # Exception is expected on request of workflow execution. self.assertRaises( orquesta_exc.WorkflowInspectionError, workflow_service.request, self.get_wf_def(TEST_PACK_PATH, wf_meta), ac_ex_db, self.mock_st2_context(ac_ex_db), ) def test_request_wf_def_with_unregistered_action(self): wf_meta = self.get_wf_fixture_meta_data( TEST_PACK_PATH, "fail-inspection-action-db.yaml" ) # Manually create the liveaction and action execution objects without publishing. lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"]) lv_ac_db, ac_ex_db = action_service.create_request(lv_ac_db) # Exception is expected on request of workflow execution. self.assertRaises( orquesta_exc.WorkflowInspectionError, workflow_service.request, self.get_wf_def(TEST_PACK_PATH, wf_meta), ac_ex_db, self.mock_st2_context(ac_ex_db), ) def test_request_wf_def_with_missing_required_action_param(self): wf_name = "fail-inspection-missing-required-action-param" wf_meta = self.get_wf_fixture_meta_data(TEST_PACK_PATH, wf_name + ".yaml") # Manually create the liveaction and action execution objects without publishing. lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"]) lv_ac_db, ac_ex_db = action_service.create_request(lv_ac_db) # Exception is expected on request of workflow execution. self.assertRaises( orquesta_exc.WorkflowInspectionError, workflow_service.request, self.get_wf_def(TEST_PACK_PATH, wf_meta), ac_ex_db, self.mock_st2_context(ac_ex_db), ) def test_request_wf_def_with_unexpected_action_param(self): wf_name = "fail-inspection-unexpected-action-param" wf_meta = self.get_wf_fixture_meta_data(TEST_PACK_PATH, wf_name + ".yaml") # Manually create the liveaction and action execution objects without publishing. lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"]) lv_ac_db, ac_ex_db = action_service.create_request(lv_ac_db) # Exception is expected on request of workflow execution. self.assertRaises( orquesta_exc.WorkflowInspectionError, workflow_service.request, self.get_wf_def(TEST_PACK_PATH, wf_meta), ac_ex_db, self.mock_st2_context(ac_ex_db), ) def test_request_task_execution(self): wf_meta = self.get_wf_fixture_meta_data(TEST_PACK_PATH, "sequential.yaml") # Manually create the liveaction and action execution objects without publishing. lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"]) lv_ac_db, ac_ex_db = action_service.create_request(lv_ac_db) # Request the workflow execution. wf_def = self.get_wf_def(TEST_PACK_PATH, wf_meta) st2_ctx = self.mock_st2_context(ac_ex_db) wf_ex_db = workflow_service.request(wf_def, ac_ex_db, st2_ctx) spec_module = specs_loader.get_spec_module(wf_ex_db.spec["catalog"]) wf_spec = spec_module.WorkflowSpec.deserialize(wf_ex_db.spec) # Manually request task execution. task_route = 0 task_id = "task1" task_spec = wf_spec.tasks.get_task(task_id) task_ctx = {"foo": "bar"} st2_ctx = {"execution_id": wf_ex_db.action_execution} task_ex_req = { "id": task_id, "route": task_route, "spec": task_spec, "ctx": task_ctx, "actions": [ {"action": "core.echo", "input": {"message": "Veni, vidi, vici."}} ], } workflow_service.request_task_execution(wf_ex_db, st2_ctx, task_ex_req) # Check task execution is saved to the database. task_ex_dbs = wf_db_access.TaskExecution.query( workflow_execution=str(wf_ex_db.id) ) self.assertEqual(len(task_ex_dbs), 1) # Check required attributes. task_ex_db = task_ex_dbs[0] self.assertIsNotNone(task_ex_db.id) self.assertGreater(task_ex_db.rev, 0) self.assertEqual(task_ex_db.workflow_execution, str(wf_ex_db.id)) self.assertEqual(task_ex_db.status, wf_statuses.RUNNING) # Check action execution for the task query with task execution ID. ac_ex_dbs = ex_db_access.ActionExecution.query( task_execution=str(task_ex_db.id) ) self.assertEqual(len(ac_ex_dbs), 1) # Check action execution for the task query with workflow execution ID. ac_ex_dbs = ex_db_access.ActionExecution.query( workflow_execution=str(wf_ex_db.id) ) self.assertEqual(len(ac_ex_dbs), 1) def test_request_task_execution_bad_action(self): wf_meta = self.get_wf_fixture_meta_data(TEST_PACK_PATH, "sequential.yaml") # Manually create the liveaction and action execution objects without publishing. lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"]) lv_ac_db, ac_ex_db = action_service.create_request(lv_ac_db) # Request the workflow execution. wf_def = self.get_wf_def(TEST_PACK_PATH, wf_meta) st2_ctx = self.mock_st2_context(ac_ex_db) wf_ex_db = workflow_service.request(wf_def, ac_ex_db, st2_ctx) spec_module = specs_loader.get_spec_module(wf_ex_db.spec["catalog"]) wf_spec = spec_module.WorkflowSpec.deserialize(wf_ex_db.spec) # Manually request task execution. task_route = 0 task_id = "task1" task_spec = wf_spec.tasks.get_task(task_id) task_ctx = {"foo": "bar"} st2_ctx = {"execution_id": wf_ex_db.action_execution} task_ex_req = { "id": task_id, "route": task_route, "spec": task_spec, "ctx": task_ctx, "actions": [ {"action": "mock.echo", "input": {"message": "Veni, vidi, vici."}} ], } self.assertRaises( action_exc.InvalidActionReferencedException, workflow_service.request_task_execution, wf_ex_db, st2_ctx, task_ex_req, ) def test_handle_action_execution_completion(self): wf_meta = self.get_wf_fixture_meta_data(TEST_PACK_PATH, "sequential.yaml") # Manually create the liveaction and action execution objects without publishing. lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"]) lv_ac_db, ac_ex_db = action_service.create_request(lv_ac_db) # Request and pre-process the workflow execution. wf_def = self.get_wf_def(TEST_PACK_PATH, wf_meta) st2_ctx = self.mock_st2_context(ac_ex_db) wf_ex_db = workflow_service.request(wf_def, ac_ex_db, st2_ctx) wf_ex_db = self.prep_wf_ex(wf_ex_db) # Manually request task execution. self.run_workflow_step(wf_ex_db, "task1", 0, ctx={"foo": "bar"}) # Check that a new task is executed. self.assert_task_running("task2", 0) def test_evaluate_action_execution_delay(self): base_task_ex_req = {"task_id": "task1", "task_name": "task1", "route": 0} # No task delay. task_ex_req = copy.deepcopy(base_task_ex_req) ac_ex_req = {"action": "core.noop", "input": None} actual_delay = workflow_service.eval_action_execution_delay( task_ex_req, ac_ex_req ) self.assertIsNone(actual_delay) # Simple task delay. task_ex_req = copy.deepcopy(base_task_ex_req) task_ex_req["delay"] = 180 ac_ex_req = {"action": "core.noop", "input": None} actual_delay = workflow_service.eval_action_execution_delay( task_ex_req, ac_ex_req ) self.assertEqual(actual_delay, 180) # Task delay for with items task and with no concurrency. task_ex_req = copy.deepcopy(base_task_ex_req) task_ex_req["delay"] = 180 task_ex_req["concurrency"] = None ac_ex_req = {"action": "core.noop", "input": None, "items_id": 0} actual_delay = workflow_service.eval_action_execution_delay( task_ex_req, ac_ex_req, True ) self.assertEqual(actual_delay, 180) # Task delay for with items task, with concurrency, and evaluate first item. task_ex_req = copy.deepcopy(base_task_ex_req) task_ex_req["delay"] = 180 task_ex_req["concurrency"] = 1 ac_ex_req = {"action": "core.noop", "input": None, "item_id": 0} actual_delay = workflow_service.eval_action_execution_delay( task_ex_req, ac_ex_req, True ) self.assertEqual(actual_delay, 180) # Task delay for with items task, with concurrency, and evaluate later items. task_ex_req = copy.deepcopy(base_task_ex_req) task_ex_req["delay"] = 180 task_ex_req["concurrency"] = 1 ac_ex_req = {"action": "core.noop", "input": None, "item_id": 1} actual_delay = workflow_service.eval_action_execution_delay( task_ex_req, ac_ex_req, True ) self.assertIsNone(actual_delay) def test_request_action_execution_render(self): # Manually create ConfigDB output = "Testing" value = {"config_item_one": output} config_db = pk_db_models.ConfigDB(pack=PACK_7, values=value) config = pk_db_access.Config.add_or_update(config_db) self.assertEqual(len(config), 3) wf_meta = self.get_wf_fixture_meta_data( TEST_PACK_PATH, "render_config_context.yaml" ) # Manually create the liveaction and action execution objects without publishing. lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"]) lv_ac_db, ac_ex_db = action_service.create_request(lv_ac_db) # Request the workflow execution. wf_def = self.get_wf_def(TEST_PACK_PATH, wf_meta) st2_ctx = self.mock_st2_context(ac_ex_db) wf_ex_db = workflow_service.request(wf_def, ac_ex_db, st2_ctx) spec_module = specs_loader.get_spec_module(wf_ex_db.spec["catalog"]) wf_spec = spec_module.WorkflowSpec.deserialize(wf_ex_db.spec) # Pass down appropriate st2 context to the task and action execution(s). root_st2_ctx = wf_ex_db.context.get("st2", {}) st2_ctx = { "execution_id": wf_ex_db.action_execution, "user": root_st2_ctx.get("user"), "pack": root_st2_ctx.get("pack"), } # Manually request task execution. task_route = 0 task_id = "task1" task_spec = wf_spec.tasks.get_task(task_id) task_ctx = {"foo": "bar"} task_ex_req = { "id": task_id, "route": task_route, "spec": task_spec, "ctx": task_ctx, "actions": [ {"action": "dummy_pack_7.render_config_context", "input": None} ], } workflow_service.request_task_execution(wf_ex_db, st2_ctx, task_ex_req) # Check task execution is saved to the database. task_ex_dbs = wf_db_access.TaskExecution.query( workflow_execution=str(wf_ex_db.id) ) self.assertEqual(len(task_ex_dbs), 1) workflow_service.request_task_execution(wf_ex_db, st2_ctx, task_ex_req) # Manually request action execution task_ex_db = task_ex_dbs[0] action_ex_db = workflow_service.request_action_execution( wf_ex_db, task_ex_db, st2_ctx, task_ex_req["actions"][0] ) # Check required attributes. self.assertIsNotNone(str(action_ex_db.id)) self.assertEqual(task_ex_db.workflow_execution, str(wf_ex_db.id)) expected_parameters = {"value1": output} self.assertEqual(expected_parameters, action_ex_db.parameters)
apache-2.0
draugiskisprendimai/odoo
openerp/addons/base/tests/test_ir_attachment.py
433
3536
import hashlib import os import openerp import openerp.tests.common HASH_SPLIT = 2 # FIXME: testing implementations detail is not a good idea class test_ir_attachment(openerp.tests.common.TransactionCase): def setUp(self): super(test_ir_attachment, self).setUp() registry, cr, uid = self.registry, self.cr, self.uid self.ira = registry('ir.attachment') self.filestore = self.ira._filestore(cr, uid) # Blob1 self.blob1 = 'blob1' self.blob1_b64 = self.blob1.encode('base64') blob1_hash = hashlib.sha1(self.blob1).hexdigest() self.blob1_fname = blob1_hash[:HASH_SPLIT] + '/' + blob1_hash # Blob2 blob2 = 'blob2' self.blob2_b64 = blob2.encode('base64') def test_01_store_in_db(self): registry, cr, uid = self.registry, self.cr, self.uid # force storing in database registry('ir.config_parameter').set_param(cr, uid, 'ir_attachment.location', 'db') # 'ir_attachment.location' is undefined test database storage a1 = self.ira.create(cr, uid, {'name': 'a1', 'datas': self.blob1_b64}) a1_read = self.ira.read(cr, uid, [a1], ['datas']) self.assertEqual(a1_read[0]['datas'], self.blob1_b64) a1_db_datas = self.ira.browse(cr, uid, a1).db_datas self.assertEqual(a1_db_datas, self.blob1_b64) def test_02_store_on_disk(self): registry, cr, uid = self.registry, self.cr, self.uid a2 = self.ira.create(cr, uid, {'name': 'a2', 'datas': self.blob1_b64}) a2_store_fname = self.ira.browse(cr, uid, a2).store_fname self.assertEqual(a2_store_fname, self.blob1_fname) self.assertTrue(os.path.isfile(os.path.join(self.filestore, a2_store_fname))) def test_03_no_duplication(self): registry, cr, uid = self.registry, self.cr, self.uid a2 = self.ira.create(cr, uid, {'name': 'a2', 'datas': self.blob1_b64}) a2_store_fname = self.ira.browse(cr, uid, a2).store_fname a3 = self.ira.create(cr, uid, {'name': 'a3', 'datas': self.blob1_b64}) a3_store_fname = self.ira.browse(cr, uid, a3).store_fname self.assertEqual(a3_store_fname, a2_store_fname) def test_04_keep_file(self): registry, cr, uid = self.registry, self.cr, self.uid a2 = self.ira.create(cr, uid, {'name': 'a2', 'datas': self.blob1_b64}) a3 = self.ira.create(cr, uid, {'name': 'a3', 'datas': self.blob1_b64}) a2_store_fname = self.ira.browse(cr, uid, a2).store_fname a2_fn = os.path.join(self.filestore, a2_store_fname) self.ira.unlink(cr, uid, [a3]) self.assertTrue(os.path.isfile(a2_fn)) # delete a2 it is unlinked self.ira.unlink(cr, uid, [a2]) self.assertFalse(os.path.isfile(a2_fn)) def test_05_change_data_change_file(self): registry, cr, uid = self.registry, self.cr, self.uid a2 = self.ira.create(cr, uid, {'name': 'a2', 'datas': self.blob1_b64}) a2_store_fname = self.ira.browse(cr, uid, a2).store_fname a2_fn = os.path.join(self.filestore, a2_store_fname) self.assertTrue(os.path.isfile(a2_fn)) self.ira.write(cr, uid, [a2], {'datas': self.blob2_b64}) self.assertFalse(os.path.isfile(a2_fn)) new_a2_store_fname = self.ira.browse(cr, uid, a2).store_fname self.assertNotEqual(a2_store_fname, new_a2_store_fname) new_a2_fn = os.path.join(self.filestore, new_a2_store_fname) self.assertTrue(os.path.isfile(new_a2_fn))
agpl-3.0
zoidbergwill/lint-review
lintreview/config.py
2
5210
import os import logging.config from flask.config import Config from ConfigParser import ConfigParser from StringIO import StringIO def load_config(): """ Loads the config files merging the defaults with the file defined in environ.LINTREVIEW_SETTINGS if it exists. """ config = Config(os.getcwd()) if 'LINTREVIEW_SETTINGS' in os.environ: config.from_envvar('LINTREVIEW_SETTINGS') elif os.path.exists(os.path.join(os.getcwd(), 'settings.py')): config.from_pyfile('settings.py') else: msg = ("Unable to load configuration file. Please " "either create ./settings.py or set LINTREVIEW_SETTINGS " "in your environment before running.") raise ImportError(msg) if config.get('LOGGING_CONFIG'): logging.config.fileConfig( config.get('LOGGING_CONFIG'), disable_existing_loggers=False) if config.get('SSL_CA_BUNDLE'): os.environ['REQUESTS_CA_BUNDLE'] = config.get('SSL_CA_BUNDLE') return config def get_lintrc_defaults(config): """ Load the default lintrc, if it exists """ if config.get('LINTRC_DEFAULTS'): with open(config.get('LINTRC_DEFAULTS')) as f: return f.read() def build_review_config(ini_config, app_config=None): """ Build a new ReviewConfig object using the ini config file and the defaults if they exist in the app_config """ config = ReviewConfig() if app_config: defaults = get_lintrc_defaults(app_config) if defaults: config.load_ini(defaults) config.load_ini(ini_config) return config def comma_value(values): return map(lambda x: x.strip(), values.split(',')) def newline_value(values): return map(lambda x: x.strip(), values.split('\n')) class ReviewConfig(object): """ Provides a domain level API to a repositories .lintrc file. Allows reading tool names and tool configuration """ def __init__(self, data=None): self._data = {} if data: self._data = data def update(self, data): """ Does a shallow merge of configuration settings. This allows repos to control entire tool config by only defining the keys they want. If we did a recursive merge, the user config file would have to 'undo' our default file changes. The one exception is that if the new data has empty config, and the current data has non-empty config, the non-empty config will be retained. """ for key, value in data.iteritems(): if key == 'linters' and 'linters' in self._data: self._update_linter_config(value) else: self._data[key] = value def _update_linter_config(self, linter_config): """ Update linter config. Because linter config is a nested structure, it needs to be updated in a somewhat recursive way. """ for linter, tool_config in linter_config.iteritems(): if self._config_update(linter, tool_config): self._data['linters'][linter] = tool_config def _config_update(self, linter, tool_config): if linter not in self.linters(): return True existing = self.linter_config(linter) if tool_config == {} and existing != {}: return False return True def linters(self): try: return self._data['linters'].keys() except: return [] def linter_config(self, tool): try: return self._data['linters'][tool] except: return {} def ignore_patterns(self): try: return self._data['files']['ignore'] except: return [] def ignore_branches(self): try: return self._data['branches']['ignore'] except: return [] def load_ini(self, ini_config): """ Read the provided ini contents arguments and merge the data in the ini config into the config object. ini_config is assumed to be a string of the ini file contents. """ parser = ConfigParser() parser.readfp(StringIO(ini_config)) data = { 'linters': {}, 'files': {}, 'branches': {}, } if parser.has_section('files'): ignore = parser.get('files', 'ignore') data['files']['ignore'] = newline_value(ignore) if parser.has_section('branches'): ignore = parser.get('branches', 'ignore') data['branches']['ignore'] = comma_value(ignore) linters = [] if parser.has_section('tools'): linters = comma_value(parser.get('tools', 'linters')) # Setup empty config sections for linter in linters: data['linters'][linter] = {} for section in parser.sections(): if not section.startswith('tool_'): continue # Strip off tool_ linter = section[5:] data['linters'][linter] = dict(parser.items(section)) self.update(data)
mit
robfig/rietveld
third_party/oauth2client/multistore_file.py
52
12045
# Copyright 2011 Google Inc. All Rights Reserved. """Multi-credential file store with lock support. This module implements a JSON credential store where multiple credentials can be stored in one file. That file supports locking both in a single process and across processes. The credential themselves are keyed off of: * client_id * user_agent * scope The format of the stored data is like so: { 'file_version': 1, 'data': [ { 'key': { 'clientId': '<client id>', 'userAgent': '<user agent>', 'scope': '<scope>' }, 'credential': { # JSON serialized Credentials. } } ] } """ __author__ = '[email protected] (Joe Beda)' import base64 import errno import logging import os import threading from anyjson import simplejson from oauth2client.client import Storage as BaseStorage from oauth2client.client import Credentials from oauth2client import util from locked_file import LockedFile logger = logging.getLogger(__name__) # A dict from 'filename'->_MultiStore instances _multistores = {} _multistores_lock = threading.Lock() class Error(Exception): """Base error for this module.""" pass class NewerCredentialStoreError(Error): """The credential store is a newer version that supported.""" pass @util.positional(4) def get_credential_storage(filename, client_id, user_agent, scope, warn_on_readonly=True): """Get a Storage instance for a credential. Args: filename: The JSON file storing a set of credentials client_id: The client_id for the credential user_agent: The user agent for the credential scope: string or iterable of strings, Scope(s) being requested warn_on_readonly: if True, log a warning if the store is readonly Returns: An object derived from client.Storage for getting/setting the credential. """ # Recreate the legacy key with these specific parameters key = {'clientId': client_id, 'userAgent': user_agent, 'scope': util.scopes_to_string(scope)} return get_credential_storage_custom_key( filename, key, warn_on_readonly=warn_on_readonly) @util.positional(2) def get_credential_storage_custom_string_key( filename, key_string, warn_on_readonly=True): """Get a Storage instance for a credential using a single string as a key. Allows you to provide a string as a custom key that will be used for credential storage and retrieval. Args: filename: The JSON file storing a set of credentials key_string: A string to use as the key for storing this credential. warn_on_readonly: if True, log a warning if the store is readonly Returns: An object derived from client.Storage for getting/setting the credential. """ # Create a key dictionary that can be used key_dict = {'key': key_string} return get_credential_storage_custom_key( filename, key_dict, warn_on_readonly=warn_on_readonly) @util.positional(2) def get_credential_storage_custom_key( filename, key_dict, warn_on_readonly=True): """Get a Storage instance for a credential using a dictionary as a key. Allows you to provide a dictionary as a custom key that will be used for credential storage and retrieval. Args: filename: The JSON file storing a set of credentials key_dict: A dictionary to use as the key for storing this credential. There is no ordering of the keys in the dictionary. Logically equivalent dictionaries will produce equivalent storage keys. warn_on_readonly: if True, log a warning if the store is readonly Returns: An object derived from client.Storage for getting/setting the credential. """ filename = os.path.expanduser(filename) _multistores_lock.acquire() try: multistore = _multistores.setdefault( filename, _MultiStore(filename, warn_on_readonly=warn_on_readonly)) finally: _multistores_lock.release() key = util.dict_to_tuple_key(key_dict) return multistore._get_storage(key) class _MultiStore(object): """A file backed store for multiple credentials.""" @util.positional(2) def __init__(self, filename, warn_on_readonly=True): """Initialize the class. This will create the file if necessary. """ self._file = LockedFile(filename, 'r+b', 'rb') self._thread_lock = threading.Lock() self._read_only = False self._warn_on_readonly = warn_on_readonly self._create_file_if_needed() # Cache of deserialized store. This is only valid after the # _MultiStore is locked or _refresh_data_cache is called. This is # of the form of: # # ((key, value), (key, value)...) -> OAuth2Credential # # If this is None, then the store hasn't been read yet. self._data = None class _Storage(BaseStorage): """A Storage object that knows how to read/write a single credential.""" def __init__(self, multistore, key): self._multistore = multistore self._key = key def acquire_lock(self): """Acquires any lock necessary to access this Storage. This lock is not reentrant. """ self._multistore._lock() def release_lock(self): """Release the Storage lock. Trying to release a lock that isn't held will result in a RuntimeError. """ self._multistore._unlock() def locked_get(self): """Retrieve credential. The Storage lock must be held when this is called. Returns: oauth2client.client.Credentials """ credential = self._multistore._get_credential(self._key) if credential: credential.set_store(self) return credential def locked_put(self, credentials): """Write a credential. The Storage lock must be held when this is called. Args: credentials: Credentials, the credentials to store. """ self._multistore._update_credential(self._key, credentials) def locked_delete(self): """Delete a credential. The Storage lock must be held when this is called. Args: credentials: Credentials, the credentials to store. """ self._multistore._delete_credential(self._key) def _create_file_if_needed(self): """Create an empty file if necessary. This method will not initialize the file. Instead it implements a simple version of "touch" to ensure the file has been created. """ if not os.path.exists(self._file.filename()): old_umask = os.umask(0177) try: open(self._file.filename(), 'a+b').close() finally: os.umask(old_umask) def _lock(self): """Lock the entire multistore.""" self._thread_lock.acquire() self._file.open_and_lock() if not self._file.is_locked(): self._read_only = True if self._warn_on_readonly: logger.warn('The credentials file (%s) is not writable. Opening in ' 'read-only mode. Any refreshed credentials will only be ' 'valid for this run.' % self._file.filename()) if os.path.getsize(self._file.filename()) == 0: logger.debug('Initializing empty multistore file') # The multistore is empty so write out an empty file. self._data = {} self._write() elif not self._read_only or self._data is None: # Only refresh the data if we are read/write or we haven't # cached the data yet. If we are readonly, we assume is isn't # changing out from under us and that we only have to read it # once. This prevents us from whacking any new access keys that # we have cached in memory but were unable to write out. self._refresh_data_cache() def _unlock(self): """Release the lock on the multistore.""" self._file.unlock_and_close() self._thread_lock.release() def _locked_json_read(self): """Get the raw content of the multistore file. The multistore must be locked when this is called. Returns: The contents of the multistore decoded as JSON. """ assert self._thread_lock.locked() self._file.file_handle().seek(0) return simplejson.load(self._file.file_handle()) def _locked_json_write(self, data): """Write a JSON serializable data structure to the multistore. The multistore must be locked when this is called. Args: data: The data to be serialized and written. """ assert self._thread_lock.locked() if self._read_only: return self._file.file_handle().seek(0) simplejson.dump(data, self._file.file_handle(), sort_keys=True, indent=2) self._file.file_handle().truncate() def _refresh_data_cache(self): """Refresh the contents of the multistore. The multistore must be locked when this is called. Raises: NewerCredentialStoreError: Raised when a newer client has written the store. """ self._data = {} try: raw_data = self._locked_json_read() except Exception: logger.warn('Credential data store could not be loaded. ' 'Will ignore and overwrite.') return version = 0 try: version = raw_data['file_version'] except Exception: logger.warn('Missing version for credential data store. It may be ' 'corrupt or an old version. Overwriting.') if version > 1: raise NewerCredentialStoreError( 'Credential file has file_version of %d. ' 'Only file_version of 1 is supported.' % version) credentials = [] try: credentials = raw_data['data'] except (TypeError, KeyError): pass for cred_entry in credentials: try: (key, credential) = self._decode_credential_from_json(cred_entry) self._data[key] = credential except: # If something goes wrong loading a credential, just ignore it logger.info('Error decoding credential, skipping', exc_info=True) def _decode_credential_from_json(self, cred_entry): """Load a credential from our JSON serialization. Args: cred_entry: A dict entry from the data member of our format Returns: (key, cred) where the key is the key tuple and the cred is the OAuth2Credential object. """ raw_key = cred_entry['key'] key = util.dict_to_tuple_key(raw_key) credential = None credential = Credentials.new_from_json(simplejson.dumps(cred_entry['credential'])) return (key, credential) def _write(self): """Write the cached data back out. The multistore must be locked. """ raw_data = {'file_version': 1} raw_creds = [] raw_data['data'] = raw_creds for (cred_key, cred) in self._data.items(): raw_key = dict(cred_key) raw_cred = simplejson.loads(cred.to_json()) raw_creds.append({'key': raw_key, 'credential': raw_cred}) self._locked_json_write(raw_data) def _get_credential(self, key): """Get a credential from the multistore. The multistore must be locked. Args: key: The key used to retrieve the credential Returns: The credential specified or None if not present """ return self._data.get(key, None) def _update_credential(self, key, cred): """Update a credential and write the multistore. This must be called when the multistore is locked. Args: key: The key used to retrieve the credential cred: The OAuth2Credential to update/set """ self._data[key] = cred self._write() def _delete_credential(self, key): """Delete a credential and write the multistore. This must be called when the multistore is locked. Args: key: The key used to retrieve the credential """ try: del self._data[key] except KeyError: pass self._write() def _get_storage(self, key): """Get a Storage object to get/set a credential. This Storage is a 'view' into the multistore. Args: key: The key used to retrieve the credential Returns: A Storage object that can be used to get/set this cred """ return self._Storage(self, key)
apache-2.0
hzruandd/AutobahnPython
examples/twisted/wamp/pubsub/unsubscribe/backend.py
8
2223
############################################################################### # # The MIT License (MIT) # # Copyright (c) Tavendo GmbH # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # ############################################################################### from __future__ import print_function from os import environ from twisted.internet.defer import inlineCallbacks from autobahn.twisted.util import sleep from autobahn.twisted.wamp import ApplicationSession, ApplicationRunner class Component(ApplicationSession): """ An application component that publishes an event every second. """ @inlineCallbacks def onJoin(self, details): print("session attached") counter = 0 while True: print("publish: com.myapp.topic1", counter) self.publish(u'com.myapp.topic1', counter) counter += 1 yield sleep(1) if __name__ == '__main__': runner = ApplicationRunner( environ.get("AUTOBAHN_DEMO_ROUTER", u"ws://127.0.0.1:8080/ws"), u"crossbardemo", debug_wamp=False, # optional; log many WAMP details debug=False, # optional; log even more details ) runner.run(Component)
mit
kenshay/ImageScript
ProgramData/SystemFiles/Python/Lib/site-packages/scipy/sparse/csgraph/tests/test_graph_laplacian.py
26
4388
# Author: Gael Varoquaux <[email protected]> # Jake Vanderplas <[email protected]> # License: BSD from __future__ import division, print_function, absolute_import import numpy as np from numpy.testing import assert_allclose, assert_array_almost_equal from pytest import raises as assert_raises from scipy import sparse from scipy.sparse import csgraph def _explicit_laplacian(x, normed=False): if sparse.issparse(x): x = x.todense() x = np.asarray(x) y = -1.0 * x for j in range(y.shape[0]): y[j,j] = x[j,j+1:].sum() + x[j,:j].sum() if normed: d = np.diag(y).copy() d[d == 0] = 1.0 y /= d[:,None]**.5 y /= d[None,:]**.5 return y def _check_symmetric_graph_laplacian(mat, normed): if not hasattr(mat, 'shape'): mat = eval(mat, dict(np=np, sparse=sparse)) if sparse.issparse(mat): sp_mat = mat mat = sp_mat.todense() else: sp_mat = sparse.csr_matrix(mat) laplacian = csgraph.laplacian(mat, normed=normed) n_nodes = mat.shape[0] if not normed: assert_array_almost_equal(laplacian.sum(axis=0), np.zeros(n_nodes)) assert_array_almost_equal(laplacian.T, laplacian) assert_array_almost_equal(laplacian, csgraph.laplacian(sp_mat, normed=normed).todense()) assert_array_almost_equal(laplacian, _explicit_laplacian(mat, normed=normed)) def test_laplacian_value_error(): for t in int, float, complex: for m in ([1, 1], [[[1]]], [[1, 2, 3], [4, 5, 6]], [[1, 2], [3, 4], [5, 5]]): A = np.array(m, dtype=t) assert_raises(ValueError, csgraph.laplacian, A) def test_symmetric_graph_laplacian(): symmetric_mats = ('np.arange(10) * np.arange(10)[:, np.newaxis]', 'np.ones((7, 7))', 'np.eye(19)', 'sparse.diags([1, 1], [-1, 1], shape=(4,4))', 'sparse.diags([1, 1], [-1, 1], shape=(4,4)).todense()', 'np.asarray(sparse.diags([1, 1], [-1, 1], shape=(4,4)).todense())', 'np.vander(np.arange(4)) + np.vander(np.arange(4)).T') for mat_str in symmetric_mats: for normed in True, False: _check_symmetric_graph_laplacian(mat_str, normed) def _assert_allclose_sparse(a, b, **kwargs): # helper function that can deal with sparse matrices if sparse.issparse(a): a = a.toarray() if sparse.issparse(b): b = a.toarray() assert_allclose(a, b, **kwargs) def _check_laplacian(A, desired_L, desired_d, normed, use_out_degree): for arr_type in np.array, sparse.csr_matrix, sparse.coo_matrix: for t in int, float, complex: adj = arr_type(A, dtype=t) L = csgraph.laplacian(adj, normed=normed, return_diag=False, use_out_degree=use_out_degree) _assert_allclose_sparse(L, desired_L, atol=1e-12) L, d = csgraph.laplacian(adj, normed=normed, return_diag=True, use_out_degree=use_out_degree) _assert_allclose_sparse(L, desired_L, atol=1e-12) _assert_allclose_sparse(d, desired_d, atol=1e-12) def test_asymmetric_laplacian(): # adjacency matrix A = [[0, 1, 0], [4, 2, 0], [0, 0, 0]] # Laplacian matrix using out-degree L = [[1, -1, 0], [-4, 4, 0], [0, 0, 0]] d = [1, 4, 0] _check_laplacian(A, L, d, normed=False, use_out_degree=True) # normalized Laplacian matrix using out-degree L = [[1, -0.5, 0], [-2, 1, 0], [0, 0, 0]] d = [1, 2, 1] _check_laplacian(A, L, d, normed=True, use_out_degree=True) # Laplacian matrix using in-degree L = [[4, -1, 0], [-4, 1, 0], [0, 0, 0]] d = [4, 1, 0] _check_laplacian(A, L, d, normed=False, use_out_degree=False) # normalized Laplacian matrix using in-degree L = [[1, -0.5, 0], [-2, 1, 0], [0, 0, 0]] d = [2, 1, 1] _check_laplacian(A, L, d, normed=True, use_out_degree=False) def test_sparse_formats(): for fmt in ('csr', 'csc', 'coo', 'lil', 'dok', 'dia', 'bsr'): mat = sparse.diags([1, 1], [-1, 1], shape=(4,4), format=fmt) for normed in True, False: _check_symmetric_graph_laplacian(mat, normed)
gpl-3.0
upgoingstar/datasploit
emails/email_scribd.py
2
1462
#!/usr/bin/env python import base import re import sys import requests from termcolor import colored # Control whether the module is enabled or not ENABLED = True class style: BOLD = '\033[1m' END = '\033[0m' def banner(): print colored(style.BOLD + '\n---> Searching Scribd Docs\n' + style.END, 'blue') def main(email): req = requests.get('https://www.scribd.com/search?page=1&content_type=documents&query=%s' % (email)) m = re.findall('(?<=https://www.scribd.com/doc/)\w+', req.text.encode('UTF-8')) m = set(m) m = list(m) links = [] length = len(m) for lt in range(0, length - 1): links.append("https://www.scribd.com/doc/" + m[lt]) return links def output(data, email=""): if data: print "Found %s associated SCRIBD documents:\n" % len(data) for link in data: print link print "" print colored(style.BOLD + 'More results might be available, please follow this link:' + style.END) print "https://www.scribd.com/search?page=1&content_type=documents&query=" + email else: print colored('[-] No Associated Scribd Documents found.', 'red') if __name__ == "__main__": try: email = sys.argv[1] banner() result = main(email) output(result, email) except Exception as e: print e print "Please provide an email as argument"
gpl-3.0
tumbl3w33d/ansible
test/lib/ansible_test/_internal/manage_ci.py
17
10692
"""Access Ansible Core CI remote services.""" from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import tempfile import time from .util import ( SubprocessError, ApplicationError, cmd_quote, display, ANSIBLE_TEST_DATA_ROOT, ) from .util_common import ( intercept_command, run_command, ) from .core_ci import ( AnsibleCoreCI, ) from .ansible_util import ( ansible_environment, ) from .config import ( ShellConfig, ) from .payload import ( create_payload, ) class ManageWindowsCI: """Manage access to a Windows instance provided by Ansible Core CI.""" def __init__(self, core_ci): """ :type core_ci: AnsibleCoreCI """ self.core_ci = core_ci self.ssh_args = ['-i', self.core_ci.ssh_key.key] ssh_options = dict( BatchMode='yes', StrictHostKeyChecking='no', UserKnownHostsFile='/dev/null', ServerAliveInterval=15, ServerAliveCountMax=4, ) for ssh_option in sorted(ssh_options): self.ssh_args += ['-o', '%s=%s' % (ssh_option, ssh_options[ssh_option])] def setup(self, python_version): """Used in delegate_remote to setup the host, no action is required for Windows. :type python_version: str """ def wait(self): """Wait for instance to respond to ansible ping.""" extra_vars = [ 'ansible_connection=winrm', 'ansible_host=%s' % self.core_ci.connection.hostname, 'ansible_user=%s' % self.core_ci.connection.username, 'ansible_password=%s' % self.core_ci.connection.password, 'ansible_port=%s' % self.core_ci.connection.port, 'ansible_winrm_server_cert_validation=ignore', ] name = 'windows_%s' % self.core_ci.version env = ansible_environment(self.core_ci.args) cmd = ['ansible', '-m', 'win_ping', '-i', '%s,' % name, name, '-e', ' '.join(extra_vars)] for dummy in range(1, 120): try: intercept_command(self.core_ci.args, cmd, 'ping', env=env, disable_coverage=True) return except SubprocessError: time.sleep(10) raise ApplicationError('Timeout waiting for %s/%s instance %s.' % (self.core_ci.platform, self.core_ci.version, self.core_ci.instance_id)) def download(self, remote, local): """ :type remote: str :type local: str """ self.scp('%s@%s:%s' % (self.core_ci.connection.username, self.core_ci.connection.hostname, remote), local) def upload(self, local, remote): """ :type local: str :type remote: str """ self.scp(local, '%s@%s:%s' % (self.core_ci.connection.username, self.core_ci.connection.hostname, remote)) def ssh(self, command, options=None, force_pty=True): """ :type command: str | list[str] :type options: list[str] | None :type force_pty: bool """ if not options: options = [] if force_pty: options.append('-tt') if isinstance(command, list): command = ' '.join(cmd_quote(c) for c in command) run_command(self.core_ci.args, ['ssh', '-q'] + self.ssh_args + options + ['-p', '22', '%s@%s' % (self.core_ci.connection.username, self.core_ci.connection.hostname)] + [command]) def scp(self, src, dst): """ :type src: str :type dst: str """ for dummy in range(1, 10): try: run_command(self.core_ci.args, ['scp'] + self.ssh_args + ['-P', '22', '-q', '-r', src, dst]) return except SubprocessError: time.sleep(10) raise ApplicationError('Failed transfer: %s -> %s' % (src, dst)) class ManageNetworkCI: """Manage access to a network instance provided by Ansible Core CI.""" def __init__(self, core_ci): """ :type core_ci: AnsibleCoreCI """ self.core_ci = core_ci def wait(self): """Wait for instance to respond to ansible ping.""" extra_vars = [ 'ansible_host=%s' % self.core_ci.connection.hostname, 'ansible_port=%s' % self.core_ci.connection.port, 'ansible_connection=local', 'ansible_ssh_private_key_file=%s' % self.core_ci.ssh_key.key, ] name = '%s-%s' % (self.core_ci.platform, self.core_ci.version.replace('.', '-')) env = ansible_environment(self.core_ci.args) cmd = [ 'ansible', '-m', '%s_command' % self.core_ci.platform, '-a', 'commands=?', '-u', self.core_ci.connection.username, '-i', '%s,' % name, '-e', ' '.join(extra_vars), name, ] for dummy in range(1, 90): try: intercept_command(self.core_ci.args, cmd, 'ping', env=env, disable_coverage=True) return except SubprocessError: time.sleep(10) raise ApplicationError('Timeout waiting for %s/%s instance %s.' % (self.core_ci.platform, self.core_ci.version, self.core_ci.instance_id)) class ManagePosixCI: """Manage access to a POSIX instance provided by Ansible Core CI.""" def __init__(self, core_ci): """ :type core_ci: AnsibleCoreCI """ self.core_ci = core_ci self.ssh_args = ['-i', self.core_ci.ssh_key.key] ssh_options = dict( BatchMode='yes', StrictHostKeyChecking='no', UserKnownHostsFile='/dev/null', ServerAliveInterval=15, ServerAliveCountMax=4, ) for ssh_option in sorted(ssh_options): self.ssh_args += ['-o', '%s=%s' % (ssh_option, ssh_options[ssh_option])] if self.core_ci.platform == 'freebsd': if self.core_ci.provider == 'aws': self.become = ['su', '-l', 'root', '-c'] elif self.core_ci.provider == 'azure': self.become = ['sudo', '-in', 'sh', '-c'] else: raise NotImplementedError('provider %s has not been implemented' % self.core_ci.provider) elif self.core_ci.platform == 'osx': self.become = ['sudo', '-in', 'PATH=/usr/local/bin:$PATH'] elif self.core_ci.platform == 'rhel': self.become = ['sudo', '-in', 'bash', '-c'] def setup(self, python_version): """Start instance and wait for it to become ready and respond to an ansible ping. :type python_version: str :rtype: str """ pwd = self.wait() display.info('Remote working directory: %s' % pwd, verbosity=1) if isinstance(self.core_ci.args, ShellConfig): if self.core_ci.args.raw: return pwd self.configure(python_version) self.upload_source() return pwd def wait(self): # type: () -> str """Wait for instance to respond to SSH.""" for dummy in range(1, 90): try: stdout = self.ssh('pwd', capture=True)[0] if self.core_ci.args.explain: return '/pwd' pwd = stdout.strip().splitlines()[-1] if not pwd.startswith('/'): raise Exception('Unexpected current working directory "%s" from "pwd" command output:\n%s' % (pwd, stdout)) return pwd except SubprocessError: time.sleep(10) raise ApplicationError('Timeout waiting for %s/%s instance %s.' % (self.core_ci.platform, self.core_ci.version, self.core_ci.instance_id)) def configure(self, python_version): """Configure remote host for testing. :type python_version: str """ self.upload(os.path.join(ANSIBLE_TEST_DATA_ROOT, 'setup', 'remote.sh'), '/tmp') self.ssh('chmod +x /tmp/remote.sh && /tmp/remote.sh %s %s' % (self.core_ci.platform, python_version)) def upload_source(self): """Upload and extract source.""" with tempfile.NamedTemporaryFile(prefix='ansible-source-', suffix='.tgz') as local_source_fd: remote_source_dir = '/tmp' remote_source_path = os.path.join(remote_source_dir, os.path.basename(local_source_fd.name)) create_payload(self.core_ci.args, local_source_fd.name) self.upload(local_source_fd.name, remote_source_dir) self.ssh('rm -rf ~/ansible && mkdir ~/ansible && cd ~/ansible && tar oxzf %s' % remote_source_path) def download(self, remote, local): """ :type remote: str :type local: str """ self.scp('%s@%s:%s' % (self.core_ci.connection.username, self.core_ci.connection.hostname, remote), local) def upload(self, local, remote): """ :type local: str :type remote: str """ self.scp(local, '%s@%s:%s' % (self.core_ci.connection.username, self.core_ci.connection.hostname, remote)) def ssh(self, command, options=None, capture=False): """ :type command: str | list[str] :type options: list[str] | None :type capture: bool :rtype: str | None, str | None """ if not options: options = [] if isinstance(command, list): command = ' '.join(cmd_quote(c) for c in command) return run_command(self.core_ci.args, ['ssh', '-tt', '-q'] + self.ssh_args + options + ['-p', str(self.core_ci.connection.port), '%s@%s' % (self.core_ci.connection.username, self.core_ci.connection.hostname)] + self.become + [cmd_quote(command)], capture=capture) def scp(self, src, dst): """ :type src: str :type dst: str """ for dummy in range(1, 10): try: run_command(self.core_ci.args, ['scp'] + self.ssh_args + ['-P', str(self.core_ci.connection.port), '-q', '-r', src, dst]) return except SubprocessError: time.sleep(10) raise ApplicationError('Failed transfer: %s -> %s' % (src, dst))
gpl-3.0
mnahm5/django-estore
Lib/site-packages/cryptography/hazmat/primitives/asymmetric/rsa.py
17
10226
# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. from __future__ import absolute_import, division, print_function import abc from fractions import gcd import six from cryptography import utils from cryptography.exceptions import UnsupportedAlgorithm, _Reasons from cryptography.hazmat.backends.interfaces import RSABackend @six.add_metaclass(abc.ABCMeta) class RSAPrivateKey(object): @abc.abstractmethod def signer(self, padding, algorithm): """ Returns an AsymmetricSignatureContext used for signing data. """ @abc.abstractmethod def decrypt(self, ciphertext, padding): """ Decrypts the provided ciphertext. """ @abc.abstractproperty def key_size(self): """ The bit length of the public modulus. """ @abc.abstractmethod def public_key(self): """ The RSAPublicKey associated with this private key. """ @abc.abstractmethod def sign(self, data, padding, algorithm): """ Signs the data. """ @six.add_metaclass(abc.ABCMeta) class RSAPrivateKeyWithSerialization(RSAPrivateKey): @abc.abstractmethod def private_numbers(self): """ Returns an RSAPrivateNumbers. """ @abc.abstractmethod def private_bytes(self, encoding, format, encryption_algorithm): """ Returns the key serialized as bytes. """ @six.add_metaclass(abc.ABCMeta) class RSAPublicKey(object): @abc.abstractmethod def verifier(self, signature, padding, algorithm): """ Returns an AsymmetricVerificationContext used for verifying signatures. """ @abc.abstractmethod def encrypt(self, plaintext, padding): """ Encrypts the given plaintext. """ @abc.abstractproperty def key_size(self): """ The bit length of the public modulus. """ @abc.abstractmethod def public_numbers(self): """ Returns an RSAPublicNumbers """ @abc.abstractmethod def public_bytes(self, encoding, format): """ Returns the key serialized as bytes. """ @abc.abstractmethod def verify(self, signature, data, padding, algorithm): """ Verifies the signature of the data. """ RSAPublicKeyWithSerialization = RSAPublicKey def generate_private_key(public_exponent, key_size, backend): if not isinstance(backend, RSABackend): raise UnsupportedAlgorithm( "Backend object does not implement RSABackend.", _Reasons.BACKEND_MISSING_INTERFACE ) _verify_rsa_parameters(public_exponent, key_size) return backend.generate_rsa_private_key(public_exponent, key_size) def _verify_rsa_parameters(public_exponent, key_size): if public_exponent < 3: raise ValueError("public_exponent must be >= 3.") if public_exponent & 1 == 0: raise ValueError("public_exponent must be odd.") if key_size < 512: raise ValueError("key_size must be at least 512-bits.") def _check_private_key_components(p, q, private_exponent, dmp1, dmq1, iqmp, public_exponent, modulus): if modulus < 3: raise ValueError("modulus must be >= 3.") if p >= modulus: raise ValueError("p must be < modulus.") if q >= modulus: raise ValueError("q must be < modulus.") if dmp1 >= modulus: raise ValueError("dmp1 must be < modulus.") if dmq1 >= modulus: raise ValueError("dmq1 must be < modulus.") if iqmp >= modulus: raise ValueError("iqmp must be < modulus.") if private_exponent >= modulus: raise ValueError("private_exponent must be < modulus.") if public_exponent < 3 or public_exponent >= modulus: raise ValueError("public_exponent must be >= 3 and < modulus.") if public_exponent & 1 == 0: raise ValueError("public_exponent must be odd.") if dmp1 & 1 == 0: raise ValueError("dmp1 must be odd.") if dmq1 & 1 == 0: raise ValueError("dmq1 must be odd.") if p * q != modulus: raise ValueError("p*q must equal modulus.") def _check_public_key_components(e, n): if n < 3: raise ValueError("n must be >= 3.") if e < 3 or e >= n: raise ValueError("e must be >= 3 and < n.") if e & 1 == 0: raise ValueError("e must be odd.") def _modinv(e, m): """ Modular Multiplicative Inverse. Returns x such that: (x*e) mod m == 1 """ x1, y1, x2, y2 = 1, 0, 0, 1 a, b = e, m while b > 0: q, r = divmod(a, b) xn, yn = x1 - q * x2, y1 - q * y2 a, b, x1, y1, x2, y2 = b, r, x2, y2, xn, yn return x1 % m def rsa_crt_iqmp(p, q): """ Compute the CRT (q ** -1) % p value from RSA primes p and q. """ return _modinv(q, p) def rsa_crt_dmp1(private_exponent, p): """ Compute the CRT private_exponent % (p - 1) value from the RSA private_exponent (d) and p. """ return private_exponent % (p - 1) def rsa_crt_dmq1(private_exponent, q): """ Compute the CRT private_exponent % (q - 1) value from the RSA private_exponent (d) and q. """ return private_exponent % (q - 1) # Controls the number of iterations rsa_recover_prime_factors will perform # to obtain the prime factors. Each iteration increments by 2 so the actual # maximum attempts is half this number. _MAX_RECOVERY_ATTEMPTS = 1000 def rsa_recover_prime_factors(n, e, d): """ Compute factors p and q from the private exponent d. We assume that n has no more than two factors. This function is adapted from code in PyCrypto. """ # See 8.2.2(i) in Handbook of Applied Cryptography. ktot = d * e - 1 # The quantity d*e-1 is a multiple of phi(n), even, # and can be represented as t*2^s. t = ktot while t % 2 == 0: t = t // 2 # Cycle through all multiplicative inverses in Zn. # The algorithm is non-deterministic, but there is a 50% chance # any candidate a leads to successful factoring. # See "Digitalized Signatures and Public Key Functions as Intractable # as Factorization", M. Rabin, 1979 spotted = False a = 2 while not spotted and a < _MAX_RECOVERY_ATTEMPTS: k = t # Cycle through all values a^{t*2^i}=a^k while k < ktot: cand = pow(a, k, n) # Check if a^k is a non-trivial root of unity (mod n) if cand != 1 and cand != (n - 1) and pow(cand, 2, n) == 1: # We have found a number such that (cand-1)(cand+1)=0 (mod n). # Either of the terms divides n. p = gcd(cand + 1, n) spotted = True break k *= 2 # This value was not any good... let's try another! a += 2 if not spotted: raise ValueError("Unable to compute factors p and q from exponent d.") # Found ! q, r = divmod(n, p) assert r == 0 p, q = sorted((p, q), reverse=True) return (p, q) class RSAPrivateNumbers(object): def __init__(self, p, q, d, dmp1, dmq1, iqmp, public_numbers): if ( not isinstance(p, six.integer_types) or not isinstance(q, six.integer_types) or not isinstance(d, six.integer_types) or not isinstance(dmp1, six.integer_types) or not isinstance(dmq1, six.integer_types) or not isinstance(iqmp, six.integer_types) ): raise TypeError( "RSAPrivateNumbers p, q, d, dmp1, dmq1, iqmp arguments must" " all be an integers." ) if not isinstance(public_numbers, RSAPublicNumbers): raise TypeError( "RSAPrivateNumbers public_numbers must be an RSAPublicNumbers" " instance." ) self._p = p self._q = q self._d = d self._dmp1 = dmp1 self._dmq1 = dmq1 self._iqmp = iqmp self._public_numbers = public_numbers p = utils.read_only_property("_p") q = utils.read_only_property("_q") d = utils.read_only_property("_d") dmp1 = utils.read_only_property("_dmp1") dmq1 = utils.read_only_property("_dmq1") iqmp = utils.read_only_property("_iqmp") public_numbers = utils.read_only_property("_public_numbers") def private_key(self, backend): return backend.load_rsa_private_numbers(self) def __eq__(self, other): if not isinstance(other, RSAPrivateNumbers): return NotImplemented return ( self.p == other.p and self.q == other.q and self.d == other.d and self.dmp1 == other.dmp1 and self.dmq1 == other.dmq1 and self.iqmp == other.iqmp and self.public_numbers == other.public_numbers ) def __ne__(self, other): return not self == other def __hash__(self): return hash(( self.p, self.q, self.d, self.dmp1, self.dmq1, self.iqmp, self.public_numbers, )) class RSAPublicNumbers(object): def __init__(self, e, n): if ( not isinstance(e, six.integer_types) or not isinstance(n, six.integer_types) ): raise TypeError("RSAPublicNumbers arguments must be integers.") self._e = e self._n = n e = utils.read_only_property("_e") n = utils.read_only_property("_n") def public_key(self, backend): return backend.load_rsa_public_numbers(self) def __repr__(self): return "<RSAPublicNumbers(e={0.e}, n={0.n})>".format(self) def __eq__(self, other): if not isinstance(other, RSAPublicNumbers): return NotImplemented return self.e == other.e and self.n == other.n def __ne__(self, other): return not self == other def __hash__(self): return hash((self.e, self.n))
mit
defance/edx-platform
lms/djangoapps/ccx/tests/test_field_override_performance.py
27
9659
# coding=UTF-8 """ Performance tests for field overrides. """ import ddt import itertools import mock from nose.plugins.skip import SkipTest from courseware.views import progress from courseware.field_overrides import OverrideFieldData from datetime import datetime from django.conf import settings from django.core.cache import caches from django.test.client import RequestFactory from django.test.utils import override_settings from edxmako.middleware import MakoMiddleware from nose.plugins.attrib import attr from pytz import UTC from request_cache.middleware import RequestCache from student.models import CourseEnrollment from student.tests.factories import UserFactory from xblock.core import XBlock from xmodule.modulestore.django import modulestore from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase, \ TEST_DATA_SPLIT_MODULESTORE, TEST_DATA_MONGO_MODULESTORE from xmodule.modulestore.tests.factories import check_mongo_calls_range, CourseFactory, check_sum_of_calls from xmodule.modulestore.tests.utils import ProceduralCourseTestMixin from ccx_keys.locator import CCXLocator from lms.djangoapps.ccx.tests.factories import CcxFactory @attr('shard_1') @mock.patch.dict( 'django.conf.settings.FEATURES', { 'ENABLE_XBLOCK_VIEW_ENDPOINT': True, 'ENABLE_MAX_SCORE_CACHE': False, } ) @ddt.ddt class FieldOverridePerformanceTestCase(ProceduralCourseTestMixin, ModuleStoreTestCase): """ Base class for instrumenting SQL queries and Mongo reads for field override providers. """ __test__ = False # TEST_DATA must be overridden by subclasses TEST_DATA = None def setUp(self): """ Create a test client, course, and user. """ super(FieldOverridePerformanceTestCase, self).setUp() self.request_factory = RequestFactory() self.student = UserFactory.create() self.request = self.request_factory.get("foo") self.request.user = self.student self.course = None self.ccx = None MakoMiddleware().process_request(self.request) def setup_course(self, size, enable_ccx, view_as_ccx): """ Build a gradable course where each node has `size` children. """ grading_policy = { "GRADER": [ { "drop_count": 2, "min_count": 12, "short_label": "HW", "type": "Homework", "weight": 0.15 }, { "drop_count": 2, "min_count": 12, "type": "Lab", "weight": 0.15 }, { "drop_count": 0, "min_count": 1, "short_label": "Midterm", "type": "Midterm Exam", "weight": 0.3 }, { "drop_count": 0, "min_count": 1, "short_label": "Final", "type": "Final Exam", "weight": 0.4 } ], "GRADE_CUTOFFS": { "Pass": 0.5 } } self.course = CourseFactory.create( graded=True, start=datetime.now(UTC), grading_policy=grading_policy, enable_ccx=enable_ccx, ) self.populate_course(size) course_key = self.course.id if enable_ccx: self.ccx = CcxFactory.create(course_id=self.course.id) if view_as_ccx: course_key = CCXLocator.from_course_locator(self.course.id, self.ccx.id) CourseEnrollment.enroll( self.student, course_key ) def grade_course(self, course, view_as_ccx): """ Renders the progress page for the given course. """ course_key = course.id if view_as_ccx: course_key = CCXLocator.from_course_locator(course_key, self.ccx.id) return progress( self.request, course_id=unicode(course_key), student_id=self.student.id ) def assertMongoCallCount(self, calls): """ Assert that mongodb is queried ``calls`` times in the surrounded context. """ return check_mongo_calls_range(max_finds=calls) def assertXBlockInstantiations(self, instantiations): """ Assert that exactly ``instantiations`` XBlocks are instantiated in the surrounded context. """ return check_sum_of_calls(XBlock, ['__init__'], instantiations, instantiations, include_arguments=False) def instrument_course_progress_render(self, course_width, enable_ccx, view_as_ccx, queries, reads, xblocks): """ Renders the progress page, instrumenting Mongo reads and SQL queries. """ self.setup_course(course_width, enable_ccx, view_as_ccx) # Switch to published-only mode to simulate the LMS with self.settings(MODULESTORE_BRANCH='published-only'): # Clear all caches before measuring for cache in settings.CACHES: caches[cache].clear() # Refill the metadata inheritance cache modulestore().get_course(self.course.id, depth=None) # We clear the request cache to simulate a new request in the LMS. RequestCache.clear_request_cache() # Reset the list of provider classes, so that our django settings changes # can actually take affect. OverrideFieldData.provider_classes = None with self.assertNumQueries(queries): with self.assertMongoCallCount(reads): with self.assertXBlockInstantiations(xblocks): self.grade_course(self.course, view_as_ccx) @ddt.data(*itertools.product(('no_overrides', 'ccx'), range(1, 4), (True, False), (True, False))) @ddt.unpack @override_settings( FIELD_OVERRIDE_PROVIDERS=(), ) def test_field_overrides(self, overrides, course_width, enable_ccx, view_as_ccx): """ Test without any field overrides. """ providers = { 'no_overrides': (), 'ccx': ('ccx.overrides.CustomCoursesForEdxOverrideProvider',) } if overrides == 'no_overrides' and view_as_ccx: raise SkipTest("Can't view a ccx course if field overrides are disabled.") if not enable_ccx and view_as_ccx: raise SkipTest("Can't view a ccx course if ccx is disabled on the course") if self.MODULESTORE == TEST_DATA_MONGO_MODULESTORE and view_as_ccx: raise SkipTest("Can't use a MongoModulestore test as a CCX course") with self.settings(FIELD_OVERRIDE_PROVIDERS=providers[overrides]): queries, reads, xblocks = self.TEST_DATA[(overrides, course_width, enable_ccx, view_as_ccx)] self.instrument_course_progress_render(course_width, enable_ccx, view_as_ccx, queries, reads, xblocks) class TestFieldOverrideMongoPerformance(FieldOverridePerformanceTestCase): """ Test cases for instrumenting field overrides against the Mongo modulestore. """ MODULESTORE = TEST_DATA_MONGO_MODULESTORE __test__ = True TEST_DATA = { # (providers, course_width, enable_ccx, view_as_ccx): # of sql queries, # of mongo queries, # of xblocks ('no_overrides', 1, True, False): (48, 6, 13), ('no_overrides', 2, True, False): (135, 6, 84), ('no_overrides', 3, True, False): (480, 6, 335), ('ccx', 1, True, False): (48, 6, 13), ('ccx', 2, True, False): (135, 6, 84), ('ccx', 3, True, False): (480, 6, 335), ('ccx', 1, True, True): (48, 6, 13), ('ccx', 2, True, True): (135, 6, 84), ('ccx', 3, True, True): (480, 6, 335), ('no_overrides', 1, False, False): (48, 6, 13), ('no_overrides', 2, False, False): (135, 6, 84), ('no_overrides', 3, False, False): (480, 6, 335), ('ccx', 1, False, False): (48, 6, 13), ('ccx', 2, False, False): (135, 6, 84), ('ccx', 3, False, False): (480, 6, 335), ('ccx', 1, False, True): (48, 6, 13), ('ccx', 2, False, True): (135, 6, 84), ('ccx', 3, False, True): (480, 6, 335), } class TestFieldOverrideSplitPerformance(FieldOverridePerformanceTestCase): """ Test cases for instrumenting field overrides against the Split modulestore. """ MODULESTORE = TEST_DATA_SPLIT_MODULESTORE __test__ = True TEST_DATA = { ('no_overrides', 1, True, False): (48, 4, 9), ('no_overrides', 2, True, False): (135, 19, 54), ('no_overrides', 3, True, False): (480, 84, 215), ('ccx', 1, True, False): (48, 4, 9), ('ccx', 2, True, False): (135, 19, 54), ('ccx', 3, True, False): (480, 84, 215), ('ccx', 1, True, True): (50, 4, 13), ('ccx', 2, True, True): (137, 19, 84), ('ccx', 3, True, True): (482, 84, 335), ('no_overrides', 1, False, False): (48, 4, 9), ('no_overrides', 2, False, False): (135, 19, 54), ('no_overrides', 3, False, False): (480, 84, 215), ('ccx', 1, False, False): (48, 4, 9), ('ccx', 2, False, False): (135, 19, 54), ('ccx', 3, False, False): (480, 84, 215), ('ccx', 1, False, True): (48, 4, 9), ('ccx', 2, False, True): (135, 19, 54), ('ccx', 3, False, True): (480, 84, 215), }
agpl-3.0
zaccoz/odoo
addons/web_diagram/controllers/main.py
268
4321
import openerp from openerp.tools.safe_eval import safe_eval as eval class DiagramView(openerp.http.Controller): @openerp.http.route('/web_diagram/diagram/get_diagram_info', type='json', auth='user') def get_diagram_info(self, req, id, model, node, connector, src_node, des_node, label, **kw): visible_node_fields = kw.get('visible_node_fields',[]) invisible_node_fields = kw.get('invisible_node_fields',[]) node_fields_string = kw.get('node_fields_string',[]) connector_fields = kw.get('connector_fields',[]) connector_fields_string = kw.get('connector_fields_string',[]) bgcolors = {} shapes = {} bgcolor = kw.get('bgcolor','') shape = kw.get('shape','') if bgcolor: for color_spec in bgcolor.split(';'): if color_spec: colour, color_state = color_spec.split(':') bgcolors[colour] = color_state if shape: for shape_spec in shape.split(';'): if shape_spec: shape_colour, shape_color_state = shape_spec.split(':') shapes[shape_colour] = shape_color_state ir_view = req.session.model('ir.ui.view') graphs = ir_view.graph_get( int(id), model, node, connector, src_node, des_node, label, (140, 180), req.session.context) nodes = graphs['nodes'] transitions = graphs['transitions'] isolate_nodes = {} for blnk_node in graphs['blank_nodes']: isolate_nodes[blnk_node['id']] = blnk_node else: y = map(lambda t: t['y'],filter(lambda x: x['y'] if x['x']==20 else None, nodes.values())) y_max = (y and max(y)) or 120 connectors = {} list_tr = [] for tr in transitions: list_tr.append(tr) connectors.setdefault(tr, { 'id': int(tr), 's_id': transitions[tr][0], 'd_id': transitions[tr][1] }) connector_tr = req.session.model(connector) connector_ids = connector_tr.search([('id', 'in', list_tr)], 0, 0, 0, req.session.context) data_connectors =connector_tr.read(connector_ids, connector_fields, req.session.context) for tr in data_connectors: transition_id = str(tr['id']) _sourceid, label = graphs['label'][transition_id] t = connectors[transition_id] t.update( source=tr[src_node][1], destination=tr[des_node][1], options={}, signal=label ) for i, fld in enumerate(connector_fields): t['options'][connector_fields_string[i]] = tr[fld] fields = req.session.model('ir.model.fields') field_ids = fields.search([('model', '=', model), ('relation', '=', node)], 0, 0, 0, req.session.context) field_data = fields.read(field_ids, ['relation_field'], req.session.context) node_act = req.session.model(node) search_acts = node_act.search([(field_data[0]['relation_field'], '=', id)], 0, 0, 0, req.session.context) data_acts = node_act.read(search_acts, invisible_node_fields + visible_node_fields, req.session.context) for act in data_acts: n = nodes.get(str(act['id'])) if not n: n = isolate_nodes.get(act['id'], {}) y_max += 140 n.update(x=20, y=y_max) nodes[act['id']] = n n.update( id=act['id'], color='white', options={} ) for color, expr in bgcolors.items(): if eval(expr, act): n['color'] = color for shape, expr in shapes.items(): if eval(expr, act): n['shape'] = shape for i, fld in enumerate(visible_node_fields): n['options'][node_fields_string[i]] = act[fld] _id, name = req.session.model(model).name_get([id], req.session.context)[0] return dict(nodes=nodes, conn=connectors, name=name, parent_field=graphs['node_parent_field'])
agpl-3.0
mkennedy04/knodj
env/Lib/site-packages/django/utils/crypto.py
82
6844
""" Django's standard crypto functions and utilities. """ from __future__ import unicode_literals import binascii import hashlib import hmac import random import struct import time from django.conf import settings from django.utils import six from django.utils.encoding import force_bytes from django.utils.six.moves import range try: random = random.SystemRandom() using_sysrandom = True except NotImplementedError: import warnings warnings.warn('A secure pseudo-random number generator is not available ' 'on your system. Falling back to Mersenne Twister.') using_sysrandom = False def salted_hmac(key_salt, value, secret=None): """ Returns the HMAC-SHA1 of 'value', using a key generated from key_salt and a secret (which defaults to settings.SECRET_KEY). A different key_salt should be passed in for every application of HMAC. """ if secret is None: secret = settings.SECRET_KEY key_salt = force_bytes(key_salt) secret = force_bytes(secret) # We need to generate a derived key from our base key. We can do this by # passing the key_salt and our base key through a pseudo-random function and # SHA1 works nicely. key = hashlib.sha1(key_salt + secret).digest() # If len(key_salt + secret) > sha_constructor().block_size, the above # line is redundant and could be replaced by key = key_salt + secret, since # the hmac module does the same thing for keys longer than the block size. # However, we need to ensure that we *always* do this. return hmac.new(key, msg=force_bytes(value), digestmod=hashlib.sha1) def get_random_string(length=12, allowed_chars='abcdefghijklmnopqrstuvwxyz' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'): """ Returns a securely generated random string. The default length of 12 with the a-z, A-Z, 0-9 character set returns a 71-bit value. log_2((26+26+10)^12) =~ 71 bits """ if not using_sysrandom: # This is ugly, and a hack, but it makes things better than # the alternative of predictability. This re-seeds the PRNG # using a value that is hard for an attacker to predict, every # time a random string is required. This may change the # properties of the chosen random sequence slightly, but this # is better than absolute predictability. random.seed( hashlib.sha256( ("%s%s%s" % ( random.getstate(), time.time(), settings.SECRET_KEY)).encode('utf-8') ).digest()) return ''.join(random.choice(allowed_chars) for i in range(length)) if hasattr(hmac, "compare_digest"): # Prefer the stdlib implementation, when available. def constant_time_compare(val1, val2): return hmac.compare_digest(force_bytes(val1), force_bytes(val2)) else: def constant_time_compare(val1, val2): """ Returns True if the two strings are equal, False otherwise. The time taken is independent of the number of characters that match. For the sake of simplicity, this function executes in constant time only when the two strings have the same length. It short-circuits when they have different lengths. Since Django only uses it to compare hashes of known expected length, this is acceptable. """ if len(val1) != len(val2): return False result = 0 if six.PY3 and isinstance(val1, bytes) and isinstance(val2, bytes): for x, y in zip(val1, val2): result |= x ^ y else: for x, y in zip(val1, val2): result |= ord(x) ^ ord(y) return result == 0 def _bin_to_long(x): """ Convert a binary string into a long integer This is a clever optimization for fast xor vector math """ return int(binascii.hexlify(x), 16) def _long_to_bin(x, hex_format_string): """ Convert a long integer into a binary string. hex_format_string is like "%020x" for padding 10 characters. """ return binascii.unhexlify((hex_format_string % x).encode('ascii')) if hasattr(hashlib, "pbkdf2_hmac"): def pbkdf2(password, salt, iterations, dklen=0, digest=None): """ Implements PBKDF2 with the same API as Django's existing implementation, using the stdlib. This is used in Python 2.7.8+ and 3.4+. """ if digest is None: digest = hashlib.sha256 if not dklen: dklen = None password = force_bytes(password) salt = force_bytes(salt) return hashlib.pbkdf2_hmac( digest().name, password, salt, iterations, dklen) else: def pbkdf2(password, salt, iterations, dklen=0, digest=None): """ Implements PBKDF2 as defined in RFC 2898, section 5.2 HMAC+SHA256 is used as the default pseudo random function. As of 2014, 100,000 iterations was the recommended default which took 100ms on a 2.7Ghz Intel i7 with an optimized implementation. This is probably the bare minimum for security given 1000 iterations was recommended in 2001. This code is very well optimized for CPython and is about five times slower than OpenSSL's implementation. Look in django.contrib.auth.hashers for the present default, it is lower than the recommended 100,000 because of the performance difference between this and an optimized implementation. """ assert iterations > 0 if not digest: digest = hashlib.sha256 password = force_bytes(password) salt = force_bytes(salt) hlen = digest().digest_size if not dklen: dklen = hlen if dklen > (2 ** 32 - 1) * hlen: raise OverflowError('dklen too big') l = -(-dklen // hlen) r = dklen - (l - 1) * hlen hex_format_string = "%%0%ix" % (hlen * 2) inner, outer = digest(), digest() if len(password) > inner.block_size: password = digest(password).digest() password += b'\x00' * (inner.block_size - len(password)) inner.update(password.translate(hmac.trans_36)) outer.update(password.translate(hmac.trans_5C)) def F(i): u = salt + struct.pack(b'>I', i) result = 0 for j in range(int(iterations)): dig1, dig2 = inner.copy(), outer.copy() dig1.update(u) dig2.update(dig1.digest()) u = dig2.digest() result ^= _bin_to_long(u) return _long_to_bin(result, hex_format_string) T = [F(x) for x in range(1, l)] return b''.join(T) + F(l)[:r]
mit
gunan/tensorflow
tensorflow/python/platform/sysconfig.py
3
2718
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """System configuration library.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os.path as _os_path import platform as _platform from tensorflow.python.framework.versions import CXX11_ABI_FLAG as _CXX11_ABI_FLAG from tensorflow.python.framework.versions import MONOLITHIC_BUILD as _MONOLITHIC_BUILD from tensorflow.python.framework.versions import VERSION as _VERSION from tensorflow.python.util.tf_export import tf_export # pylint: disable=g-import-not-at-top @tf_export('sysconfig.get_include') def get_include(): """Get the directory containing the TensorFlow C++ header files. Returns: The directory as string. """ # Import inside the function. # sysconfig is imported from the tensorflow module, so having this # import at the top would cause a circular import, resulting in # the tensorflow module missing symbols that come after sysconfig. import tensorflow as tf return _os_path.join(_os_path.dirname(tf.__file__), 'include') @tf_export('sysconfig.get_lib') def get_lib(): """Get the directory containing the TensorFlow framework library. Returns: The directory as string. """ import tensorflow as tf return _os_path.join(_os_path.dirname(tf.__file__)) @tf_export('sysconfig.get_compile_flags') def get_compile_flags(): """Get the compilation flags for custom operators. Returns: The compilation flags. """ flags = [] flags.append('-I%s' % get_include()) flags.append('-D_GLIBCXX_USE_CXX11_ABI=%d' % _CXX11_ABI_FLAG) return flags @tf_export('sysconfig.get_link_flags') def get_link_flags(): """Get the link flags for custom operators. Returns: The link flags. """ is_mac = _platform.system() == 'Darwin' ver = _VERSION.split('.')[0] flags = [] if not _MONOLITHIC_BUILD: flags.append('-L%s' % get_lib()) if is_mac: flags.append('-ltensorflow_framework.%s' % ver) else: flags.append('-l:libtensorflow_framework.so.%s' % ver) return flags
apache-2.0
1013553207/django
django/utils/text.py
308
14923
from __future__ import unicode_literals import re import unicodedata from gzip import GzipFile from io import BytesIO from django.utils import six from django.utils.encoding import force_text from django.utils.functional import SimpleLazyObject, allow_lazy from django.utils.safestring import SafeText, mark_safe from django.utils.six.moves import html_entities from django.utils.translation import pgettext, ugettext as _, ugettext_lazy if six.PY2: # Import force_unicode even though this module doesn't use it, because some # people rely on it being here. from django.utils.encoding import force_unicode # NOQA # Capitalizes the first letter of a string. capfirst = lambda x: x and force_text(x)[0].upper() + force_text(x)[1:] capfirst = allow_lazy(capfirst, six.text_type) # Set up regular expressions re_words = re.compile(r'<.*?>|((?:\w[-\w]*|&.*?;)+)', re.U | re.S) re_chars = re.compile(r'<.*?>|(.)', re.U | re.S) re_tag = re.compile(r'<(/)?([^ ]+?)(?:(\s*/)| .*?)?>', re.S) re_newlines = re.compile(r'\r\n|\r') # Used in normalize_newlines re_camel_case = re.compile(r'(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))') def wrap(text, width): """ A word-wrap function that preserves existing line breaks. Expects that existing line breaks are posix newlines. All white space is preserved except added line breaks consume the space on which they break the line. Long words are not wrapped, so the output text may have lines longer than ``width``. """ text = force_text(text) def _generator(): for line in text.splitlines(True): # True keeps trailing linebreaks max_width = min((line.endswith('\n') and width + 1 or width), width) while len(line) > max_width: space = line[:max_width + 1].rfind(' ') + 1 if space == 0: space = line.find(' ') + 1 if space == 0: yield line line = '' break yield '%s\n' % line[:space - 1] line = line[space:] max_width = min((line.endswith('\n') and width + 1 or width), width) if line: yield line return ''.join(_generator()) wrap = allow_lazy(wrap, six.text_type) class Truncator(SimpleLazyObject): """ An object used to truncate text, either by characters or words. """ def __init__(self, text): super(Truncator, self).__init__(lambda: force_text(text)) def add_truncation_text(self, text, truncate=None): if truncate is None: truncate = pgettext( 'String to return when truncating text', '%(truncated_text)s...') truncate = force_text(truncate) if '%(truncated_text)s' in truncate: return truncate % {'truncated_text': text} # The truncation text didn't contain the %(truncated_text)s string # replacement argument so just append it to the text. if text.endswith(truncate): # But don't append the truncation text if the current text already # ends in this. return text return '%s%s' % (text, truncate) def chars(self, num, truncate=None, html=False): """ Returns the text truncated to be no longer than the specified number of characters. Takes an optional argument of what should be used to notify that the string has been truncated, defaulting to a translatable string of an ellipsis (...). """ length = int(num) text = unicodedata.normalize('NFC', self._wrapped) # Calculate the length to truncate to (max length - end_text length) truncate_len = length for char in self.add_truncation_text('', truncate): if not unicodedata.combining(char): truncate_len -= 1 if truncate_len == 0: break if html: return self._truncate_html(length, truncate, text, truncate_len, False) return self._text_chars(length, truncate, text, truncate_len) chars = allow_lazy(chars) def _text_chars(self, length, truncate, text, truncate_len): """ Truncates a string after a certain number of chars. """ s_len = 0 end_index = None for i, char in enumerate(text): if unicodedata.combining(char): # Don't consider combining characters # as adding to the string length continue s_len += 1 if end_index is None and s_len > truncate_len: end_index = i if s_len > length: # Return the truncated string return self.add_truncation_text(text[:end_index or 0], truncate) # Return the original string since no truncation was necessary return text def words(self, num, truncate=None, html=False): """ Truncates a string after a certain number of words. Takes an optional argument of what should be used to notify that the string has been truncated, defaulting to ellipsis (...). """ length = int(num) if html: return self._truncate_html(length, truncate, self._wrapped, length, True) return self._text_words(length, truncate) words = allow_lazy(words) def _text_words(self, length, truncate): """ Truncates a string after a certain number of words. Newlines in the string will be stripped. """ words = self._wrapped.split() if len(words) > length: words = words[:length] return self.add_truncation_text(' '.join(words), truncate) return ' '.join(words) def _truncate_html(self, length, truncate, text, truncate_len, words): """ Truncates HTML to a certain number of chars (not counting tags and comments), or, if words is True, then to a certain number of words. Closes opened tags if they were correctly closed in the given HTML. Newlines in the HTML are preserved. """ if words and length <= 0: return '' html4_singlets = ( 'br', 'col', 'link', 'base', 'img', 'param', 'area', 'hr', 'input' ) # Count non-HTML chars/words and keep note of open tags pos = 0 end_text_pos = 0 current_len = 0 open_tags = [] regex = re_words if words else re_chars while current_len <= length: m = regex.search(text, pos) if not m: # Checked through whole string break pos = m.end(0) if m.group(1): # It's an actual non-HTML word or char current_len += 1 if current_len == truncate_len: end_text_pos = pos continue # Check for tag tag = re_tag.match(m.group(0)) if not tag or current_len >= truncate_len: # Don't worry about non tags or tags after our truncate point continue closing_tag, tagname, self_closing = tag.groups() # Element names are always case-insensitive tagname = tagname.lower() if self_closing or tagname in html4_singlets: pass elif closing_tag: # Check for match in open tags list try: i = open_tags.index(tagname) except ValueError: pass else: # SGML: An end tag closes, back to the matching start tag, # all unclosed intervening start tags with omitted end tags open_tags = open_tags[i + 1:] else: # Add it to the start of the open tags list open_tags.insert(0, tagname) if current_len <= length: return text out = text[:end_text_pos] truncate_text = self.add_truncation_text('', truncate) if truncate_text: out += truncate_text # Close any tags still open for tag in open_tags: out += '</%s>' % tag # Return string return out def get_valid_filename(s): """ Returns the given string converted to a string that can be used for a clean filename. Specifically, leading and trailing spaces are removed; other spaces are converted to underscores; and anything that is not a unicode alphanumeric, dash, underscore, or dot, is removed. >>> get_valid_filename("john's portrait in 2004.jpg") 'johns_portrait_in_2004.jpg' """ s = force_text(s).strip().replace(' ', '_') return re.sub(r'(?u)[^-\w.]', '', s) get_valid_filename = allow_lazy(get_valid_filename, six.text_type) def get_text_list(list_, last_word=ugettext_lazy('or')): """ >>> get_text_list(['a', 'b', 'c', 'd']) 'a, b, c or d' >>> get_text_list(['a', 'b', 'c'], 'and') 'a, b and c' >>> get_text_list(['a', 'b'], 'and') 'a and b' >>> get_text_list(['a']) 'a' >>> get_text_list([]) '' """ if len(list_) == 0: return '' if len(list_) == 1: return force_text(list_[0]) return '%s %s %s' % ( # Translators: This string is used as a separator between list elements _(', ').join(force_text(i) for i in list_[:-1]), force_text(last_word), force_text(list_[-1])) get_text_list = allow_lazy(get_text_list, six.text_type) def normalize_newlines(text): """Normalizes CRLF and CR newlines to just LF.""" text = force_text(text) return re_newlines.sub('\n', text) normalize_newlines = allow_lazy(normalize_newlines, six.text_type) def phone2numeric(phone): """Converts a phone number with letters into its numeric equivalent.""" char2number = {'a': '2', 'b': '2', 'c': '2', 'd': '3', 'e': '3', 'f': '3', 'g': '4', 'h': '4', 'i': '4', 'j': '5', 'k': '5', 'l': '5', 'm': '6', 'n': '6', 'o': '6', 'p': '7', 'q': '7', 'r': '7', 's': '7', 't': '8', 'u': '8', 'v': '8', 'w': '9', 'x': '9', 'y': '9', 'z': '9'} return ''.join(char2number.get(c, c) for c in phone.lower()) phone2numeric = allow_lazy(phone2numeric) # From http://www.xhaus.com/alan/python/httpcomp.html#gzip # Used with permission. def compress_string(s): zbuf = BytesIO() zfile = GzipFile(mode='wb', compresslevel=6, fileobj=zbuf) zfile.write(s) zfile.close() return zbuf.getvalue() class StreamingBuffer(object): def __init__(self): self.vals = [] def write(self, val): self.vals.append(val) def read(self): if not self.vals: return b'' ret = b''.join(self.vals) self.vals = [] return ret def flush(self): return def close(self): return # Like compress_string, but for iterators of strings. def compress_sequence(sequence): buf = StreamingBuffer() zfile = GzipFile(mode='wb', compresslevel=6, fileobj=buf) # Output headers... yield buf.read() for item in sequence: zfile.write(item) data = buf.read() if data: yield data zfile.close() yield buf.read() # Expression to match some_token and some_token="with spaces" (and similarly # for single-quoted strings). smart_split_re = re.compile(r""" ((?: [^\s'"]* (?: (?:"(?:[^"\\]|\\.)*" | '(?:[^'\\]|\\.)*') [^\s'"]* )+ ) | \S+) """, re.VERBOSE) def smart_split(text): r""" Generator that splits a string by spaces, leaving quoted phrases together. Supports both single and double quotes, and supports escaping quotes with backslashes. In the output, strings will keep their initial and trailing quote marks and escaped quotes will remain escaped (the results can then be further processed with unescape_string_literal()). >>> list(smart_split(r'This is "a person\'s" test.')) ['This', 'is', '"a person\\\'s"', 'test.'] >>> list(smart_split(r"Another 'person\'s' test.")) ['Another', "'person\\'s'", 'test.'] >>> list(smart_split(r'A "\"funky\" style" test.')) ['A', '"\\"funky\\" style"', 'test.'] """ text = force_text(text) for bit in smart_split_re.finditer(text): yield bit.group(0) def _replace_entity(match): text = match.group(1) if text[0] == '#': text = text[1:] try: if text[0] in 'xX': c = int(text[1:], 16) else: c = int(text) return six.unichr(c) except ValueError: return match.group(0) else: try: return six.unichr(html_entities.name2codepoint[text]) except (ValueError, KeyError): return match.group(0) _entity_re = re.compile(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));") def unescape_entities(text): return _entity_re.sub(_replace_entity, text) unescape_entities = allow_lazy(unescape_entities, six.text_type) def unescape_string_literal(s): r""" Convert quoted string literals to unquoted strings with escaped quotes and backslashes unquoted:: >>> unescape_string_literal('"abc"') 'abc' >>> unescape_string_literal("'abc'") 'abc' >>> unescape_string_literal('"a \"bc\""') 'a "bc"' >>> unescape_string_literal("'\'ab\' c'") "'ab' c" """ if s[0] not in "\"'" or s[-1] != s[0]: raise ValueError("Not a string literal: %r" % s) quote = s[0] return s[1:-1].replace(r'\%s' % quote, quote).replace(r'\\', '\\') unescape_string_literal = allow_lazy(unescape_string_literal) def slugify(value, allow_unicode=False): """ Convert to ASCII if 'allow_unicode' is False. Convert spaces to hyphens. Remove characters that aren't alphanumerics, underscores, or hyphens. Convert to lowercase. Also strip leading and trailing whitespace. """ value = force_text(value) if allow_unicode: value = unicodedata.normalize('NFKC', value) value = re.sub('[^\w\s-]', '', value, flags=re.U).strip().lower() return mark_safe(re.sub('[-\s]+', '-', value, flags=re.U)) value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii') value = re.sub('[^\w\s-]', '', value).strip().lower() return mark_safe(re.sub('[-\s]+', '-', value)) slugify = allow_lazy(slugify, six.text_type, SafeText) def camel_case_to_spaces(value): """ Splits CamelCase and converts to lower case. Also strips leading and trailing whitespace. """ return re_camel_case.sub(r' \1', value).strip().lower()
bsd-3-clause
forge33/CouchPotatoServer
libs/subliminal/cache.py
107
4619
# -*- coding: utf-8 -*- # Copyright 2012 Nicolas Wack <[email protected]> # # This file is part of subliminal. # # subliminal is free software; you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # subliminal is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with subliminal. If not, see <http://www.gnu.org/licenses/>. from collections import defaultdict from functools import wraps import logging import os.path import threading try: import cPickle as pickle except ImportError: import pickle __all__ = ['Cache', 'cachedmethod'] logger = logging.getLogger(__name__) class Cache(object): """A Cache object contains cached values for methods. It can have separate internal caches, one for each service """ def __init__(self, cache_dir): self.cache_dir = cache_dir self.cache = defaultdict(dict) self.lock = threading.RLock() def __del__(self): for service_name in self.cache: self.save(service_name) def cache_location(self, service_name): return os.path.join(self.cache_dir, 'subliminal_%s.cache' % service_name) def load(self, service_name): with self.lock: if service_name in self.cache: # already loaded return self.cache[service_name] = defaultdict(dict) filename = self.cache_location(service_name) logger.debug(u'Cache: loading cache from %s' % filename) try: self.cache[service_name] = pickle.load(open(filename, 'rb')) except IOError: logger.info('Cache: Cache file "%s" doesn\'t exist, creating it' % filename) except EOFError: logger.error('Cache: cache file "%s" is corrupted... Removing it.' % filename) os.remove(filename) def save(self, service_name): filename = self.cache_location(service_name) logger.debug(u'Cache: saving cache to %s' % filename) with self.lock: pickle.dump(self.cache[service_name], open(filename, 'wb')) def clear(self, service_name): try: os.remove(self.cache_location(service_name)) except OSError: pass self.cache[service_name] = defaultdict(dict) def cached_func_key(self, func, cls=None): try: cls = func.im_class except: pass return ('%s.%s' % (cls.__module__, cls.__name__), func.__name__) def function_cache(self, service_name, func): func_key = self.cached_func_key(func) return self.cache[service_name][func_key] def cache_for(self, service_name, func, args, result): # no need to lock here, dict ops are atomic self.function_cache(service_name, func)[args] = result def cached_value(self, service_name, func, args): """Raises KeyError if not found""" # no need to lock here, dict ops are atomic return self.function_cache(service_name, func)[args] def cachedmethod(function): """Decorator to make a method use the cache. .. note:: This can NOT be used with static functions, it has to be used on methods of some class """ @wraps(function) def cached(*args): c = args[0].config.cache service_name = args[0].__class__.__name__ func_key = c.cached_func_key(function, cls=args[0].__class__) func_cache = c.cache[service_name][func_key] # we need to remove the first element of args for the key, as it is the # instance pointer and we don't want the cache to know which instance # called it, it is shared among all instances of the same class key = args[1:] if key in func_cache: result = func_cache[key] logger.debug(u'Using cached value for %s(%s), returns: %s' % (func_key, key, result)) return result result = function(*args) # note: another thread could have already cached a value in the # meantime, but that's ok as we prefer to keep the latest value in # the cache func_cache[key] = result return result return cached
gpl-3.0
maxamillion/ansible
lib/ansible/module_utils/compat/_selectors2.py
79
23517
# This file is from the selectors2.py package. It backports the PSF Licensed # selectors module from the Python-3.5 stdlib to older versions of Python. # The author, Seth Michael Larson, dual licenses his modifications under the # PSF License and MIT License: # https://github.com/SethMichaelLarson/selectors2#license # # Copyright (c) 2016 Seth Michael Larson # # PSF License (see licenses/PSF-license.txt or https://opensource.org/licenses/Python-2.0) # MIT License (see licenses/MIT-license.txt or https://opensource.org/licenses/MIT) # # Backport of selectors.py from Python 3.5+ to support Python < 3.4 # Also has the behavior specified in PEP 475 which is to retry syscalls # in the case of an EINTR error. This module is required because selectors34 # does not follow this behavior and instead returns that no file descriptor # events have occurred rather than retry the syscall. The decision to drop # support for select.devpoll is made to maintain 100% test coverage. import errno import math import select import socket import sys import time from collections import namedtuple from ansible.module_utils.common._collections_compat import Mapping try: monotonic = time.monotonic except (AttributeError, ImportError): # Python 3.3< monotonic = time.time __author__ = 'Seth Michael Larson' __email__ = '[email protected]' __version__ = '1.1.1' __license__ = 'MIT' __all__ = [ 'EVENT_READ', 'EVENT_WRITE', 'SelectorError', 'SelectorKey', 'DefaultSelector' ] EVENT_READ = (1 << 0) EVENT_WRITE = (1 << 1) HAS_SELECT = True # Variable that shows whether the platform has a selector. _SYSCALL_SENTINEL = object() # Sentinel in case a system call returns None. class SelectorError(Exception): def __init__(self, errcode): super(SelectorError, self).__init__() self.errno = errcode def __repr__(self): return "<SelectorError errno={0}>".format(self.errno) def __str__(self): return self.__repr__() def _fileobj_to_fd(fileobj): """ Return a file descriptor from a file object. If given an integer will simply return that integer back. """ if isinstance(fileobj, int): fd = fileobj else: try: fd = int(fileobj.fileno()) except (AttributeError, TypeError, ValueError): raise ValueError("Invalid file object: {0!r}".format(fileobj)) if fd < 0: raise ValueError("Invalid file descriptor: {0}".format(fd)) return fd # Python 3.5 uses a more direct route to wrap system calls to increase speed. if sys.version_info >= (3, 5): def _syscall_wrapper(func, _, *args, **kwargs): """ This is the short-circuit version of the below logic because in Python 3.5+ all selectors restart system calls. """ try: return func(*args, **kwargs) except (OSError, IOError, select.error) as e: errcode = None if hasattr(e, "errno"): errcode = e.errno elif hasattr(e, "args"): errcode = e.args[0] raise SelectorError(errcode) else: def _syscall_wrapper(func, recalc_timeout, *args, **kwargs): """ Wrapper function for syscalls that could fail due to EINTR. All functions should be retried if there is time left in the timeout in accordance with PEP 475. """ timeout = kwargs.get("timeout", None) if timeout is None: expires = None recalc_timeout = False else: timeout = float(timeout) if timeout < 0.0: # Timeout less than 0 treated as no timeout. expires = None else: expires = monotonic() + timeout args = list(args) if recalc_timeout and "timeout" not in kwargs: raise ValueError( "Timeout must be in args or kwargs to be recalculated") result = _SYSCALL_SENTINEL while result is _SYSCALL_SENTINEL: try: result = func(*args, **kwargs) # OSError is thrown by select.select # IOError is thrown by select.epoll.poll # select.error is thrown by select.poll.poll # Aren't we thankful for Python 3.x rework for exceptions? except (OSError, IOError, select.error) as e: # select.error wasn't a subclass of OSError in the past. errcode = None if hasattr(e, "errno"): errcode = e.errno elif hasattr(e, "args"): errcode = e.args[0] # Also test for the Windows equivalent of EINTR. is_interrupt = (errcode == errno.EINTR or (hasattr(errno, "WSAEINTR") and errcode == errno.WSAEINTR)) if is_interrupt: if expires is not None: current_time = monotonic() if current_time > expires: raise OSError(errno.ETIMEDOUT) if recalc_timeout: if "timeout" in kwargs: kwargs["timeout"] = expires - current_time continue if errcode: raise SelectorError(errcode) else: raise return result SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data']) class _SelectorMapping(Mapping): """ Mapping of file objects to selector keys """ def __init__(self, selector): self._selector = selector def __len__(self): return len(self._selector._fd_to_key) def __getitem__(self, fileobj): try: fd = self._selector._fileobj_lookup(fileobj) return self._selector._fd_to_key[fd] except KeyError: raise KeyError("{0!r} is not registered.".format(fileobj)) def __iter__(self): return iter(self._selector._fd_to_key) class BaseSelector(object): """ Abstract Selector class A selector supports registering file objects to be monitored for specific I/O events. A file object is a file descriptor or any object with a `fileno()` method. An arbitrary object can be attached to the file object which can be used for example to store context info, a callback, etc. A selector can use various implementations (select(), poll(), epoll(), and kqueue()) depending on the platform. The 'DefaultSelector' class uses the most efficient implementation for the current platform. """ def __init__(self): # Maps file descriptors to keys. self._fd_to_key = {} # Read-only mapping returned by get_map() self._map = _SelectorMapping(self) def _fileobj_lookup(self, fileobj): """ Return a file descriptor from a file object. This wraps _fileobj_to_fd() to do an exhaustive search in case the object is invalid but we still have it in our map. Used by unregister() so we can unregister an object that was previously registered even if it is closed. It is also used by _SelectorMapping """ try: return _fileobj_to_fd(fileobj) except ValueError: # Search through all our mapped keys. for key in self._fd_to_key.values(): if key.fileobj is fileobj: return key.fd # Raise ValueError after all. raise def register(self, fileobj, events, data=None): """ Register a file object for a set of events to monitor. """ if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)): raise ValueError("Invalid events: {0!r}".format(events)) key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data) if key.fd in self._fd_to_key: raise KeyError("{0!r} (FD {1}) is already registered" .format(fileobj, key.fd)) self._fd_to_key[key.fd] = key return key def unregister(self, fileobj): """ Unregister a file object from being monitored. """ try: key = self._fd_to_key.pop(self._fileobj_lookup(fileobj)) except KeyError: raise KeyError("{0!r} is not registered".format(fileobj)) # Getting the fileno of a closed socket on Windows errors with EBADF. except socket.error as err: if err.errno != errno.EBADF: raise else: for key in self._fd_to_key.values(): if key.fileobj is fileobj: self._fd_to_key.pop(key.fd) break else: raise KeyError("{0!r} is not registered".format(fileobj)) return key def modify(self, fileobj, events, data=None): """ Change a registered file object monitored events and data. """ # NOTE: Some subclasses optimize this operation even further. try: key = self._fd_to_key[self._fileobj_lookup(fileobj)] except KeyError: raise KeyError("{0!r} is not registered".format(fileobj)) if events != key.events: self.unregister(fileobj) key = self.register(fileobj, events, data) elif data != key.data: # Use a shortcut to update the data. key = key._replace(data=data) self._fd_to_key[key.fd] = key return key def select(self, timeout=None): """ Perform the actual selection until some monitored file objects are ready or the timeout expires. """ raise NotImplementedError() def close(self): """ Close the selector. This must be called to ensure that all underlying resources are freed. """ self._fd_to_key.clear() self._map = None def get_key(self, fileobj): """ Return the key associated with a registered file object. """ mapping = self.get_map() if mapping is None: raise RuntimeError("Selector is closed") try: return mapping[fileobj] except KeyError: raise KeyError("{0!r} is not registered".format(fileobj)) def get_map(self): """ Return a mapping of file objects to selector keys """ return self._map def _key_from_fd(self, fd): """ Return the key associated to a given file descriptor Return None if it is not found. """ try: return self._fd_to_key[fd] except KeyError: return None def __enter__(self): return self def __exit__(self, *args): self.close() # Almost all platforms have select.select() if hasattr(select, "select"): class SelectSelector(BaseSelector): """ Select-based selector. """ def __init__(self): super(SelectSelector, self).__init__() self._readers = set() self._writers = set() def register(self, fileobj, events, data=None): key = super(SelectSelector, self).register(fileobj, events, data) if events & EVENT_READ: self._readers.add(key.fd) if events & EVENT_WRITE: self._writers.add(key.fd) return key def unregister(self, fileobj): key = super(SelectSelector, self).unregister(fileobj) self._readers.discard(key.fd) self._writers.discard(key.fd) return key def _select(self, r, w, timeout=None): """ Wrapper for select.select because timeout is a positional arg """ return select.select(r, w, [], timeout) def select(self, timeout=None): # Selecting on empty lists on Windows errors out. if not len(self._readers) and not len(self._writers): return [] timeout = None if timeout is None else max(timeout, 0.0) ready = [] r, w, _ = _syscall_wrapper(self._select, True, self._readers, self._writers, timeout=timeout) r = set(r) w = set(w) for fd in r | w: events = 0 if fd in r: events |= EVENT_READ if fd in w: events |= EVENT_WRITE key = self._key_from_fd(fd) if key: ready.append((key, events & key.events)) return ready __all__.append('SelectSelector') if hasattr(select, "poll"): class PollSelector(BaseSelector): """ Poll-based selector """ def __init__(self): super(PollSelector, self).__init__() self._poll = select.poll() def register(self, fileobj, events, data=None): key = super(PollSelector, self).register(fileobj, events, data) event_mask = 0 if events & EVENT_READ: event_mask |= select.POLLIN if events & EVENT_WRITE: event_mask |= select.POLLOUT self._poll.register(key.fd, event_mask) return key def unregister(self, fileobj): key = super(PollSelector, self).unregister(fileobj) self._poll.unregister(key.fd) return key def _wrap_poll(self, timeout=None): """ Wrapper function for select.poll.poll() so that _syscall_wrapper can work with only seconds. """ if timeout is not None: if timeout <= 0: timeout = 0 else: # select.poll.poll() has a resolution of 1 millisecond, # round away from zero to wait *at least* timeout seconds. timeout = math.ceil(timeout * 1e3) result = self._poll.poll(timeout) return result def select(self, timeout=None): ready = [] fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout) for fd, event_mask in fd_events: events = 0 if event_mask & ~select.POLLIN: events |= EVENT_WRITE if event_mask & ~select.POLLOUT: events |= EVENT_READ key = self._key_from_fd(fd) if key: ready.append((key, events & key.events)) return ready __all__.append('PollSelector') if hasattr(select, "epoll"): class EpollSelector(BaseSelector): """ Epoll-based selector """ def __init__(self): super(EpollSelector, self).__init__() self._epoll = select.epoll() def fileno(self): return self._epoll.fileno() def register(self, fileobj, events, data=None): key = super(EpollSelector, self).register(fileobj, events, data) events_mask = 0 if events & EVENT_READ: events_mask |= select.EPOLLIN if events & EVENT_WRITE: events_mask |= select.EPOLLOUT _syscall_wrapper(self._epoll.register, False, key.fd, events_mask) return key def unregister(self, fileobj): key = super(EpollSelector, self).unregister(fileobj) try: _syscall_wrapper(self._epoll.unregister, False, key.fd) except SelectorError: # This can occur when the fd was closed since registry. pass return key def select(self, timeout=None): if timeout is not None: if timeout <= 0: timeout = 0.0 else: # select.epoll.poll() has a resolution of 1 millisecond # but luckily takes seconds so we don't need a wrapper # like PollSelector. Just for better rounding. timeout = math.ceil(timeout * 1e3) * 1e-3 timeout = float(timeout) else: timeout = -1.0 # epoll.poll() must have a float. # We always want at least 1 to ensure that select can be called # with no file descriptors registered. Otherwise will fail. max_events = max(len(self._fd_to_key), 1) ready = [] fd_events = _syscall_wrapper(self._epoll.poll, True, timeout=timeout, maxevents=max_events) for fd, event_mask in fd_events: events = 0 if event_mask & ~select.EPOLLIN: events |= EVENT_WRITE if event_mask & ~select.EPOLLOUT: events |= EVENT_READ key = self._key_from_fd(fd) if key: ready.append((key, events & key.events)) return ready def close(self): self._epoll.close() super(EpollSelector, self).close() __all__.append('EpollSelector') if hasattr(select, "devpoll"): class DevpollSelector(BaseSelector): """Solaris /dev/poll selector.""" def __init__(self): super(DevpollSelector, self).__init__() self._devpoll = select.devpoll() def fileno(self): return self._devpoll.fileno() def register(self, fileobj, events, data=None): key = super(DevpollSelector, self).register(fileobj, events, data) poll_events = 0 if events & EVENT_READ: poll_events |= select.POLLIN if events & EVENT_WRITE: poll_events |= select.POLLOUT self._devpoll.register(key.fd, poll_events) return key def unregister(self, fileobj): key = super(DevpollSelector, self).unregister(fileobj) self._devpoll.unregister(key.fd) return key def _wrap_poll(self, timeout=None): """ Wrapper function for select.poll.poll() so that _syscall_wrapper can work with only seconds. """ if timeout is not None: if timeout <= 0: timeout = 0 else: # select.devpoll.poll() has a resolution of 1 millisecond, # round away from zero to wait *at least* timeout seconds. timeout = math.ceil(timeout * 1e3) result = self._devpoll.poll(timeout) return result def select(self, timeout=None): ready = [] fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout) for fd, event_mask in fd_events: events = 0 if event_mask & ~select.POLLIN: events |= EVENT_WRITE if event_mask & ~select.POLLOUT: events |= EVENT_READ key = self._key_from_fd(fd) if key: ready.append((key, events & key.events)) return ready def close(self): self._devpoll.close() super(DevpollSelector, self).close() __all__.append('DevpollSelector') if hasattr(select, "kqueue"): class KqueueSelector(BaseSelector): """ Kqueue / Kevent-based selector """ def __init__(self): super(KqueueSelector, self).__init__() self._kqueue = select.kqueue() def fileno(self): return self._kqueue.fileno() def register(self, fileobj, events, data=None): key = super(KqueueSelector, self).register(fileobj, events, data) if events & EVENT_READ: kevent = select.kevent(key.fd, select.KQ_FILTER_READ, select.KQ_EV_ADD) _syscall_wrapper(self._wrap_control, False, [kevent], 0, 0) if events & EVENT_WRITE: kevent = select.kevent(key.fd, select.KQ_FILTER_WRITE, select.KQ_EV_ADD) _syscall_wrapper(self._wrap_control, False, [kevent], 0, 0) return key def unregister(self, fileobj): key = super(KqueueSelector, self).unregister(fileobj) if key.events & EVENT_READ: kevent = select.kevent(key.fd, select.KQ_FILTER_READ, select.KQ_EV_DELETE) try: _syscall_wrapper(self._wrap_control, False, [kevent], 0, 0) except SelectorError: pass if key.events & EVENT_WRITE: kevent = select.kevent(key.fd, select.KQ_FILTER_WRITE, select.KQ_EV_DELETE) try: _syscall_wrapper(self._wrap_control, False, [kevent], 0, 0) except SelectorError: pass return key def select(self, timeout=None): if timeout is not None: timeout = max(timeout, 0) max_events = len(self._fd_to_key) * 2 ready_fds = {} kevent_list = _syscall_wrapper(self._wrap_control, True, None, max_events, timeout=timeout) for kevent in kevent_list: fd = kevent.ident event_mask = kevent.filter events = 0 if event_mask == select.KQ_FILTER_READ: events |= EVENT_READ if event_mask == select.KQ_FILTER_WRITE: events |= EVENT_WRITE key = self._key_from_fd(fd) if key: if key.fd not in ready_fds: ready_fds[key.fd] = (key, events & key.events) else: old_events = ready_fds[key.fd][1] ready_fds[key.fd] = (key, (events | old_events) & key.events) return list(ready_fds.values()) def close(self): self._kqueue.close() super(KqueueSelector, self).close() def _wrap_control(self, changelist, max_events, timeout): return self._kqueue.control(changelist, max_events, timeout) __all__.append('KqueueSelector') # Choose the best implementation, roughly: # kqueue == epoll == devpoll > poll > select. # select() also can't accept a FD > FD_SETSIZE (usually around 1024) if 'KqueueSelector' in globals(): # Platform-specific: Mac OS and BSD DefaultSelector = KqueueSelector elif 'DevpollSelector' in globals(): DefaultSelector = DevpollSelector elif 'EpollSelector' in globals(): # Platform-specific: Linux DefaultSelector = EpollSelector elif 'PollSelector' in globals(): # Platform-specific: Linux DefaultSelector = PollSelector elif 'SelectSelector' in globals(): # Platform-specific: Windows DefaultSelector = SelectSelector else: # Platform-specific: AppEngine def no_selector(_): raise ValueError("Platform does not have a selector") DefaultSelector = no_selector HAS_SELECT = False
gpl-3.0
IONISx/edx-platform
cms/djangoapps/contentstore/management/commands/import.py
64
2065
""" Script for importing courseware from XML format """ from django.core.management.base import BaseCommand, CommandError, make_option from django_comment_common.utils import (seed_permissions_roles, are_permissions_roles_seeded) from xmodule.modulestore.xml_importer import import_course_from_xml from xmodule.modulestore import ModuleStoreEnum from xmodule.modulestore.django import modulestore from xmodule.contentstore.django import contentstore class Command(BaseCommand): """ Import the specified data directory into the default ModuleStore """ help = 'Import the specified data directory into the default ModuleStore' option_list = BaseCommand.option_list + ( make_option('--nostatic', action='store_true', help='Skip import of static content'), ) def handle(self, *args, **options): "Execute the command" if len(args) == 0: raise CommandError("import requires at least one argument: <data directory> [--nostatic] [<course dir>...]") data_dir = args[0] do_import_static = not options.get('nostatic', False) if len(args) > 1: source_dirs = args[1:] else: source_dirs = None self.stdout.write("Importing. Data_dir={data}, source_dirs={courses}\n".format( data=data_dir, courses=source_dirs, )) mstore = modulestore() course_items = import_course_from_xml( mstore, ModuleStoreEnum.UserID.mgmt_command, data_dir, source_dirs, load_error_modules=False, static_content_store=contentstore(), verbose=True, do_import_static=do_import_static, create_if_not_present=True, ) for course in course_items: course_id = course.id if not are_permissions_roles_seeded(course_id): self.stdout.write('Seeding forum roles for course {0}\n'.format(course_id)) seed_permissions_roles(course_id)
agpl-3.0
msingh172/youtube-dl
youtube_dl/extractor/movieclips.py
126
2668
from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import ( compat_str, ) from ..utils import ( ExtractorError, clean_html, ) class MovieClipsIE(InfoExtractor): _VALID_URL = r'https?://movieclips\.com/(?P<id>[\da-zA-Z]+)(?:-(?P<display_id>[\da-z-]+))?' _TEST = { 'url': 'http://movieclips.com/Wy7ZU-my-week-with-marilyn-movie-do-you-love-me/', 'info_dict': { 'id': 'Wy7ZU', 'display_id': 'my-week-with-marilyn-movie-do-you-love-me', 'ext': 'mp4', 'title': 'My Week with Marilyn - Do You Love Me?', 'description': 'md5:e86795bd332fe3cff461e7c8dc542acb', 'thumbnail': 're:^https?://.*\.jpg$', }, 'params': { # rtmp download 'skip_download': True, } } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') display_id = mobj.group('display_id') show_id = display_id or video_id config = self._download_xml( 'http://config.movieclips.com/player/config/%s' % video_id, show_id, 'Downloading player config') if config.find('./country-region').text == 'false': raise ExtractorError( '%s said: %s' % (self.IE_NAME, config.find('./region_alert').text), expected=True) properties = config.find('./video/properties') smil_file = properties.attrib['smil_file'] smil = self._download_xml(smil_file, show_id, 'Downloading SMIL') base_url = smil.find('./head/meta').attrib['base'] formats = [] for video in smil.findall('./body/switch/video'): vbr = int(video.attrib['system-bitrate']) / 1000 src = video.attrib['src'] formats.append({ 'url': base_url, 'play_path': src, 'ext': src.split(':')[0], 'vbr': vbr, 'format_id': '%dk' % vbr, }) self._sort_formats(formats) title = '%s - %s' % (properties.attrib['clip_movie_title'], properties.attrib['clip_title']) description = clean_html(compat_str(properties.attrib['clip_description'])) thumbnail = properties.attrib['image'] categories = properties.attrib['clip_categories'].split(',') return { 'id': video_id, 'display_id': display_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'categories': categories, 'formats': formats, }
unlicense
402231466/40223146
static/Brython3.1.0-20150301-090019/Lib/unittest/test/test_break.py
785
8138
import gc import io import os import sys import signal import weakref import unittest @unittest.skipUnless(hasattr(os, 'kill'), "Test requires os.kill") @unittest.skipIf(sys.platform =="win32", "Test cannot run on Windows") @unittest.skipIf(sys.platform == 'freebsd6', "Test kills regrtest on freebsd6 " "if threads have been used") class TestBreak(unittest.TestCase): def setUp(self): self._default_handler = signal.getsignal(signal.SIGINT) def tearDown(self): signal.signal(signal.SIGINT, self._default_handler) unittest.signals._results = weakref.WeakKeyDictionary() unittest.signals._interrupt_handler = None def testInstallHandler(self): default_handler = signal.getsignal(signal.SIGINT) unittest.installHandler() self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler) try: pid = os.getpid() os.kill(pid, signal.SIGINT) except KeyboardInterrupt: self.fail("KeyboardInterrupt not handled") self.assertTrue(unittest.signals._interrupt_handler.called) def testRegisterResult(self): result = unittest.TestResult() unittest.registerResult(result) for ref in unittest.signals._results: if ref is result: break elif ref is not result: self.fail("odd object in result set") else: self.fail("result not found") def testInterruptCaught(self): default_handler = signal.getsignal(signal.SIGINT) result = unittest.TestResult() unittest.installHandler() unittest.registerResult(result) self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler) def test(result): pid = os.getpid() os.kill(pid, signal.SIGINT) result.breakCaught = True self.assertTrue(result.shouldStop) try: test(result) except KeyboardInterrupt: self.fail("KeyboardInterrupt not handled") self.assertTrue(result.breakCaught) def testSecondInterrupt(self): result = unittest.TestResult() unittest.installHandler() unittest.registerResult(result) def test(result): pid = os.getpid() os.kill(pid, signal.SIGINT) result.breakCaught = True self.assertTrue(result.shouldStop) os.kill(pid, signal.SIGINT) self.fail("Second KeyboardInterrupt not raised") try: test(result) except KeyboardInterrupt: pass else: self.fail("Second KeyboardInterrupt not raised") self.assertTrue(result.breakCaught) def testTwoResults(self): unittest.installHandler() result = unittest.TestResult() unittest.registerResult(result) new_handler = signal.getsignal(signal.SIGINT) result2 = unittest.TestResult() unittest.registerResult(result2) self.assertEqual(signal.getsignal(signal.SIGINT), new_handler) result3 = unittest.TestResult() def test(result): pid = os.getpid() os.kill(pid, signal.SIGINT) try: test(result) except KeyboardInterrupt: self.fail("KeyboardInterrupt not handled") self.assertTrue(result.shouldStop) self.assertTrue(result2.shouldStop) self.assertFalse(result3.shouldStop) def testHandlerReplacedButCalled(self): # If our handler has been replaced (is no longer installed) but is # called by the *new* handler, then it isn't safe to delay the # SIGINT and we should immediately delegate to the default handler unittest.installHandler() handler = signal.getsignal(signal.SIGINT) def new_handler(frame, signum): handler(frame, signum) signal.signal(signal.SIGINT, new_handler) try: pid = os.getpid() os.kill(pid, signal.SIGINT) except KeyboardInterrupt: pass else: self.fail("replaced but delegated handler doesn't raise interrupt") def testRunner(self): # Creating a TextTestRunner with the appropriate argument should # register the TextTestResult it creates runner = unittest.TextTestRunner(stream=io.StringIO()) result = runner.run(unittest.TestSuite()) self.assertIn(result, unittest.signals._results) def testWeakReferences(self): # Calling registerResult on a result should not keep it alive result = unittest.TestResult() unittest.registerResult(result) ref = weakref.ref(result) del result # For non-reference counting implementations gc.collect();gc.collect() self.assertIsNone(ref()) def testRemoveResult(self): result = unittest.TestResult() unittest.registerResult(result) unittest.installHandler() self.assertTrue(unittest.removeResult(result)) # Should this raise an error instead? self.assertFalse(unittest.removeResult(unittest.TestResult())) try: pid = os.getpid() os.kill(pid, signal.SIGINT) except KeyboardInterrupt: pass self.assertFalse(result.shouldStop) def testMainInstallsHandler(self): failfast = object() test = object() verbosity = object() result = object() default_handler = signal.getsignal(signal.SIGINT) class FakeRunner(object): initArgs = [] runArgs = [] def __init__(self, *args, **kwargs): self.initArgs.append((args, kwargs)) def run(self, test): self.runArgs.append(test) return result class Program(unittest.TestProgram): def __init__(self, catchbreak): self.exit = False self.verbosity = verbosity self.failfast = failfast self.catchbreak = catchbreak self.testRunner = FakeRunner self.test = test self.result = None p = Program(False) p.runTests() self.assertEqual(FakeRunner.initArgs, [((), {'buffer': None, 'verbosity': verbosity, 'failfast': failfast, 'warnings': None})]) self.assertEqual(FakeRunner.runArgs, [test]) self.assertEqual(p.result, result) self.assertEqual(signal.getsignal(signal.SIGINT), default_handler) FakeRunner.initArgs = [] FakeRunner.runArgs = [] p = Program(True) p.runTests() self.assertEqual(FakeRunner.initArgs, [((), {'buffer': None, 'verbosity': verbosity, 'failfast': failfast, 'warnings': None})]) self.assertEqual(FakeRunner.runArgs, [test]) self.assertEqual(p.result, result) self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler) def testRemoveHandler(self): default_handler = signal.getsignal(signal.SIGINT) unittest.installHandler() unittest.removeHandler() self.assertEqual(signal.getsignal(signal.SIGINT), default_handler) # check that calling removeHandler multiple times has no ill-effect unittest.removeHandler() self.assertEqual(signal.getsignal(signal.SIGINT), default_handler) def testRemoveHandlerAsDecorator(self): default_handler = signal.getsignal(signal.SIGINT) unittest.installHandler() @unittest.removeHandler def test(): self.assertEqual(signal.getsignal(signal.SIGINT), default_handler) test() self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
gpl-3.0
zaqwes8811/matlab_ext
code-miners/projects/mkt-processors/data_str_to_json/coursera_title_parser.py
1
4890
#-*- coding: utf-8 -*- import sys sys.path.append('D:/github-release') import unittest import json # Other import uasio.os_io.io_wrapper as iow # App import _http_requester as http_request def _split_url(url): """ http://www.dessci.com/en/products/mathplayer/ to www.dessci.com /en/products/mathplayer/ - для httplib """ url_copy = url.replace('https://','') url_copy = url_copy.replace('http://','') head = url_copy.split('/')[0] path = url_copy[len(head):] return head+' '+path def _purge_key(key_value): result_key = '' if key_value: if key_value[0] == ' ': result_key = key_value[1:] return result_key def _fill_tree_dict(lst, levels, level_pos, result_dict, head): if level_pos == len(levels)-1: return foldered_list = ('@@@'.join(lst)).split(levels[level_pos]) if head: if head[0] == ' ': head = head[1:] result_dict[head] = {} for it in foldered_list: splitted = it.split('@@@') # Текущий заголовок current_head = splitted[0] # Подчищаем ключ if current_head: if current_head[0] == ' ': current_head = current_head[1:] if current_head: result_dict[head][current_head] = {} if level_pos != len(levels)-2: result_dict[head][current_head] = {} else: # Оконечный лист result_dict[head][current_head] = _fill_last_leaf(splitted[1:]) _fill_tree_dict( splitted[1:], levels, level_pos+1, result_dict[head], current_head) # Переходим на следующий ярус level_pos += 1 def _fill_last_leaf(in_list): result_list = [] for at in in_list: if at: result_list.append(at) return result_list def _tree_walker(tree): if 'list' in str(type(tree)): for at in tree: print '\t\t', at return # Выводим заголовки for key in tree: print print key _tree_walker(tree[key]) def main(): #fname = 'lessions_names.txt' fname = 'lessions_html_code.txt' sets = iow.get_utf8_template() sets['name'] = fname i = 0 result_list = [] readed_list = iow.file2list(sets) for at in readed_list: # Предварительная фильтарция at = at.replace('</a>', '') # Ссылки с содержанием if 'pdf' in at or '&format=srt' in at: at_copy = at.replace(' <a target="_new" href=', '') #result_list.append('link_to_get '+_split_url(at_copy)) result_list.append(_split_url(at_copy)) # Темы if 'min)' in at and 'div' not in at: result_list.append('name_content '+at) # Части-недели if '(Week' in at: at_copy_list = at.split('&nbsp;')[1].split('</h3>')[0] result_list.append('folder '+at_copy_list) i += 1 # теперь нужно запаковать в словарь levels = ['folder', 'name_content', 'link_to_get'] result = {} for at in result_list: print at _fill_tree_dict(result_list, levels, 0, result, 'root') #_tree_walker(result) # сохраняем результаты в файл to_file = [json.dumps(result, sort_keys=True, indent=2)] settings = { 'name': 'extracted_from_html.json', 'howOpen': 'w', 'coding': 'cp866' } iow.list2file(settings, to_file) settings = { 'name': 'result.list', 'howOpen': 'w', 'coding': 'cp866' } iow.list2file(settings, result_list) class TestSequenceFunctions(unittest.TestCase): """ Проверка разбиения текста по маркерам """ def test_shuffle(self): sets = iow.get_utf8_template() sets['name'] = 'result_test.list' readed_list = iow.file2list(sets) # теперь нужно запаковать в словарь levels = ['LEVEL0', 'LEVEL1', 'nothing'] result = {} _fill_tree_dict(readed_list, levels, 0, result, 'root') json_result = json.dumps(result['root'], sort_keys=True, indent=2) print print json_result if __name__=='__main__': #main() suite = unittest.TestLoader().loadTestsFromTestCase(TestSequenceFunctions) unittest.TextTestRunner(verbosity=2).run(suite) print 'Done'
apache-2.0
bob-the-hamster/commandergenius
project/jni/python/src/Lib/pydoc_topics.py
48
410336
# Autogenerated by Sphinx on Tue Apr 14 09:12:28 2009 topics = {'assert': u'\nThe ``assert`` statement\n************************\n\nAssert statements are a convenient way to insert debugging assertions\ninto a program:\n\n assert_stmt ::= "assert" expression ["," expression]\n\nThe simple form, ``assert expression``, is equivalent to\n\n if __debug__:\n if not expression: raise AssertionError\n\nThe extended form, ``assert expression1, expression2``, is equivalent\nto\n\n if __debug__:\n if not expression1: raise AssertionError, expression2\n\nThese equivalences assume that ``__debug__`` and ``AssertionError``\nrefer to the built-in variables with those names. In the current\nimplementation, the built-in variable ``__debug__`` is ``True`` under\nnormal circumstances, ``False`` when optimization is requested\n(command line option -O). The current code generator emits no code\nfor an assert statement when optimization is requested at compile\ntime. Note that it is unnecessary to include the source code for the\nexpression that failed in the error message; it will be displayed as\npart of the stack trace.\n\nAssignments to ``__debug__`` are illegal. The value for the built-in\nvariable is determined when the interpreter starts.\n', 'assignment': u'\nAssignment statements\n*********************\n\nAssignment statements are used to (re)bind names to values and to\nmodify attributes or items of mutable objects:\n\n assignment_stmt ::= (target_list "=")+ (expression_list | yield_expression)\n target_list ::= target ("," target)* [","]\n target ::= identifier\n | "(" target_list ")"\n | "[" target_list "]"\n | attributeref\n | subscription\n | slicing\n\n(See section *Primaries* for the syntax definitions for the last three\nsymbols.)\n\nAn assignment statement evaluates the expression list (remember that\nthis can be a single expression or a comma-separated list, the latter\nyielding a tuple) and assigns the single resulting object to each of\nthe target lists, from left to right.\n\nAssignment is defined recursively depending on the form of the target\n(list). When a target is part of a mutable object (an attribute\nreference, subscription or slicing), the mutable object must\nultimately perform the assignment and decide about its validity, and\nmay raise an exception if the assignment is unacceptable. The rules\nobserved by various types and the exceptions raised are given with the\ndefinition of the object types (see section *The standard type\nhierarchy*).\n\nAssignment of an object to a target list is recursively defined as\nfollows.\n\n* If the target list is a single target: The object is assigned to\n that target.\n\n* If the target list is a comma-separated list of targets: The object\n must be an iterable with the same number of items as there are\n targets in the target list, and the items are assigned, from left to\n right, to the corresponding targets. (This rule is relaxed as of\n Python 1.5; in earlier versions, the object had to be a tuple.\n Since strings are sequences, an assignment like ``a, b = "xy"`` is\n now legal as long as the string has the right length.)\n\nAssignment of an object to a single target is recursively defined as\nfollows.\n\n* If the target is an identifier (name):\n\n * If the name does not occur in a ``global`` statement in the\n current code block: the name is bound to the object in the current\n local namespace.\n\n * Otherwise: the name is bound to the object in the current global\n namespace.\n\n The name is rebound if it was already bound. This may cause the\n reference count for the object previously bound to the name to reach\n zero, causing the object to be deallocated and its destructor (if it\n has one) to be called.\n\n* If the target is a target list enclosed in parentheses or in square\n brackets: The object must be an iterable with the same number of\n items as there are targets in the target list, and its items are\n assigned, from left to right, to the corresponding targets.\n\n* If the target is an attribute reference: The primary expression in\n the reference is evaluated. It should yield an object with\n assignable attributes; if this is not the case, ``TypeError`` is\n raised. That object is then asked to assign the assigned object to\n the given attribute; if it cannot perform the assignment, it raises\n an exception (usually but not necessarily ``AttributeError``).\n\n* If the target is a subscription: The primary expression in the\n reference is evaluated. It should yield either a mutable sequence\n object (such as a list) or a mapping object (such as a dictionary).\n Next, the subscript expression is evaluated.\n\n If the primary is a mutable sequence object (such as a list), the\n subscript must yield a plain integer. If it is negative, the\n sequence\'s length is added to it. The resulting value must be a\n nonnegative integer less than the sequence\'s length, and the\n sequence is asked to assign the assigned object to its item with\n that index. If the index is out of range, ``IndexError`` is raised\n (assignment to a subscripted sequence cannot add new items to a\n list).\n\n If the primary is a mapping object (such as a dictionary), the\n subscript must have a type compatible with the mapping\'s key type,\n and the mapping is then asked to create a key/datum pair which maps\n the subscript to the assigned object. This can either replace an\n existing key/value pair with the same key value, or insert a new\n key/value pair (if no key with the same value existed).\n\n* If the target is a slicing: The primary expression in the reference\n is evaluated. It should yield a mutable sequence object (such as a\n list). The assigned object should be a sequence object of the same\n type. Next, the lower and upper bound expressions are evaluated,\n insofar they are present; defaults are zero and the sequence\'s\n length. The bounds should evaluate to (small) integers. If either\n bound is negative, the sequence\'s length is added to it. The\n resulting bounds are clipped to lie between zero and the sequence\'s\n length, inclusive. Finally, the sequence object is asked to replace\n the slice with the items of the assigned sequence. The length of\n the slice may be different from the length of the assigned sequence,\n thus changing the length of the target sequence, if the object\n allows it.\n\n(In the current implementation, the syntax for targets is taken to be\nthe same as for expressions, and invalid syntax is rejected during the\ncode generation phase, causing less detailed error messages.)\n\nWARNING: Although the definition of assignment implies that overlaps\nbetween the left-hand side and the right-hand side are \'safe\' (for\nexample ``a, b = b, a`` swaps two variables), overlaps *within* the\ncollection of assigned-to variables are not safe! For instance, the\nfollowing program prints ``[0, 2]``:\n\n x = [0, 1]\n i = 0\n i, x[i] = 1, 2\n print x\n\n\nAugmented assignment statements\n===============================\n\nAugmented assignment is the combination, in a single statement, of a\nbinary operation and an assignment statement:\n\n augmented_assignment_stmt ::= augtarget augop (expression_list | yield_expression)\n augtarget ::= identifier | attributeref | subscription | slicing\n augop ::= "+=" | "-=" | "*=" | "/=" | "//=" | "%=" | "**="\n | ">>=" | "<<=" | "&=" | "^=" | "|="\n\n(See section *Primaries* for the syntax definitions for the last three\nsymbols.)\n\nAn augmented assignment evaluates the target (which, unlike normal\nassignment statements, cannot be an unpacking) and the expression\nlist, performs the binary operation specific to the type of assignment\non the two operands, and assigns the result to the original target.\nThe target is only evaluated once.\n\nAn augmented assignment expression like ``x += 1`` can be rewritten as\n``x = x + 1`` to achieve a similar, but not exactly equal effect. In\nthe augmented version, ``x`` is only evaluated once. Also, when\npossible, the actual operation is performed *in-place*, meaning that\nrather than creating a new object and assigning that to the target,\nthe old object is modified instead.\n\nWith the exception of assigning to tuples and multiple targets in a\nsingle statement, the assignment done by augmented assignment\nstatements is handled the same way as normal assignments. Similarly,\nwith the exception of the possible *in-place* behavior, the binary\noperation performed by augmented assignment is the same as the normal\nbinary operations.\n\nFor targets which are attribute references, the initial value is\nretrieved with a ``getattr()`` and the result is assigned with a\n``setattr()``. Notice that the two methods do not necessarily refer\nto the same variable. When ``getattr()`` refers to a class variable,\n``setattr()`` still writes to an instance variable. For example:\n\n class A:\n x = 3 # class variable\n a = A()\n a.x += 1 # writes a.x as 4 leaving A.x as 3\n', 'atom-identifiers': u'\nIdentifiers (Names)\n*******************\n\nAn identifier occurring as an atom is a name. See section\n*Identifiers and keywords* for lexical definition and section *Naming\nand binding* for documentation of naming and binding.\n\nWhen the name is bound to an object, evaluation of the atom yields\nthat object. When a name is not bound, an attempt to evaluate it\nraises a ``NameError`` exception.\n\n**Private name mangling:** When an identifier that textually occurs in\na class definition begins with two or more underscore characters and\ndoes not end in two or more underscores, it is considered a *private\nname* of that class. Private names are transformed to a longer form\nbefore code is generated for them. The transformation inserts the\nclass name in front of the name, with leading underscores removed, and\na single underscore inserted in front of the class name. For example,\nthe identifier ``__spam`` occurring in a class named ``Ham`` will be\ntransformed to ``_Ham__spam``. This transformation is independent of\nthe syntactical context in which the identifier is used. If the\ntransformed name is extremely long (longer than 255 characters),\nimplementation defined truncation may happen. If the class name\nconsists only of underscores, no transformation is done.\n', 'atom-literals': u"\nLiterals\n********\n\nPython supports string literals and various numeric literals:\n\n literal ::= stringliteral | integer | longinteger\n | floatnumber | imagnumber\n\nEvaluation of a literal yields an object of the given type (string,\ninteger, long integer, floating point number, complex number) with the\ngiven value. The value may be approximated in the case of floating\npoint and imaginary (complex) literals. See section *Literals* for\ndetails.\n\nAll literals correspond to immutable data types, and hence the\nobject's identity is less important than its value. Multiple\nevaluations of literals with the same value (either the same\noccurrence in the program text or a different occurrence) may obtain\nthe same object or a different object with the same value.\n", 'attribute-access': u'\nCustomizing attribute access\n****************************\n\nThe following methods can be defined to customize the meaning of\nattribute access (use of, assignment to, or deletion of ``x.name``)\nfor class instances.\n\nobject.__getattr__(self, name)\n\n Called when an attribute lookup has not found the attribute in the\n usual places (i.e. it is not an instance attribute nor is it found\n in the class tree for ``self``). ``name`` is the attribute name.\n This method should return the (computed) attribute value or raise\n an ``AttributeError`` exception.\n\n Note that if the attribute is found through the normal mechanism,\n ``__getattr__()`` is not called. (This is an intentional asymmetry\n between ``__getattr__()`` and ``__setattr__()``.) This is done both\n for efficiency reasons and because otherwise ``__getattr__()``\n would have no way to access other attributes of the instance. Note\n that at least for instance variables, you can fake total control by\n not inserting any values in the instance attribute dictionary (but\n instead inserting them in another object). See the\n ``__getattribute__()`` method below for a way to actually get total\n control in new-style classes.\n\nobject.__setattr__(self, name, value)\n\n Called when an attribute assignment is attempted. This is called\n instead of the normal mechanism (i.e. store the value in the\n instance dictionary). *name* is the attribute name, *value* is the\n value to be assigned to it.\n\n If ``__setattr__()`` wants to assign to an instance attribute, it\n should not simply execute ``self.name = value`` --- this would\n cause a recursive call to itself. Instead, it should insert the\n value in the dictionary of instance attributes, e.g.,\n ``self.__dict__[name] = value``. For new-style classes, rather\n than accessing the instance dictionary, it should call the base\n class method with the same name, for example,\n ``object.__setattr__(self, name, value)``.\n\nobject.__delattr__(self, name)\n\n Like ``__setattr__()`` but for attribute deletion instead of\n assignment. This should only be implemented if ``del obj.name`` is\n meaningful for the object.\n\n\nMore attribute access for new-style classes\n===========================================\n\nThe following methods only apply to new-style classes.\n\nobject.__getattribute__(self, name)\n\n Called unconditionally to implement attribute accesses for\n instances of the class. If the class also defines\n ``__getattr__()``, the latter will not be called unless\n ``__getattribute__()`` either calls it explicitly or raises an\n ``AttributeError``. This method should return the (computed)\n attribute value or raise an ``AttributeError`` exception. In order\n to avoid infinite recursion in this method, its implementation\n should always call the base class method with the same name to\n access any attributes it needs, for example,\n ``object.__getattribute__(self, name)``.\n\n Note: This method may still be bypassed when looking up special methods\n as the result of implicit invocation via language syntax or\n builtin functions. See *Special method lookup for new-style\n classes*.\n\n\nImplementing Descriptors\n========================\n\nThe following methods only apply when an instance of the class\ncontaining the method (a so-called *descriptor* class) appears in the\nclass dictionary of another new-style class, known as the *owner*\nclass. In the examples below, "the attribute" refers to the attribute\nwhose name is the key of the property in the owner class\'\n``__dict__``. Descriptors can only be implemented as new-style\nclasses themselves.\n\nobject.__get__(self, instance, owner)\n\n Called to get the attribute of the owner class (class attribute\n access) or of an instance of that class (instance attribute\n access). *owner* is always the owner class, while *instance* is the\n instance that the attribute was accessed through, or ``None`` when\n the attribute is accessed through the *owner*. This method should\n return the (computed) attribute value or raise an\n ``AttributeError`` exception.\n\nobject.__set__(self, instance, value)\n\n Called to set the attribute on an instance *instance* of the owner\n class to a new value, *value*.\n\nobject.__delete__(self, instance)\n\n Called to delete the attribute on an instance *instance* of the\n owner class.\n\n\nInvoking Descriptors\n====================\n\nIn general, a descriptor is an object attribute with "binding\nbehavior", one whose attribute access has been overridden by methods\nin the descriptor protocol: ``__get__()``, ``__set__()``, and\n``__delete__()``. If any of those methods are defined for an object,\nit is said to be a descriptor.\n\nThe default behavior for attribute access is to get, set, or delete\nthe attribute from an object\'s dictionary. For instance, ``a.x`` has a\nlookup chain starting with ``a.__dict__[\'x\']``, then\n``type(a).__dict__[\'x\']``, and continuing through the base classes of\n``type(a)`` excluding metaclasses.\n\nHowever, if the looked-up value is an object defining one of the\ndescriptor methods, then Python may override the default behavior and\ninvoke the descriptor method instead. Where this occurs in the\nprecedence chain depends on which descriptor methods were defined and\nhow they were called. Note that descriptors are only invoked for new\nstyle objects or classes (ones that subclass ``object()`` or\n``type()``).\n\nThe starting point for descriptor invocation is a binding, ``a.x``.\nHow the arguments are assembled depends on ``a``:\n\nDirect Call\n The simplest and least common call is when user code directly\n invokes a descriptor method: ``x.__get__(a)``.\n\nInstance Binding\n If binding to a new-style object instance, ``a.x`` is transformed\n into the call: ``type(a).__dict__[\'x\'].__get__(a, type(a))``.\n\nClass Binding\n If binding to a new-style class, ``A.x`` is transformed into the\n call: ``A.__dict__[\'x\'].__get__(None, A)``.\n\nSuper Binding\n If ``a`` is an instance of ``super``, then the binding ``super(B,\n obj).m()`` searches ``obj.__class__.__mro__`` for the base class\n ``A`` immediately preceding ``B`` and then invokes the descriptor\n with the call: ``A.__dict__[\'m\'].__get__(obj, A)``.\n\nFor instance bindings, the precedence of descriptor invocation depends\non the which descriptor methods are defined. Normally, data\ndescriptors define both ``__get__()`` and ``__set__()``, while non-\ndata descriptors have just the ``__get__()`` method. Data descriptors\nalways override a redefinition in an instance dictionary. In\ncontrast, non-data descriptors can be overridden by instances. [2]\n\nPython methods (including ``staticmethod()`` and ``classmethod()``)\nare implemented as non-data descriptors. Accordingly, instances can\nredefine and override methods. This allows individual instances to\nacquire behaviors that differ from other instances of the same class.\n\nThe ``property()`` function is implemented as a data descriptor.\nAccordingly, instances cannot override the behavior of a property.\n\n\n__slots__\n=========\n\nBy default, instances of both old and new-style classes have a\ndictionary for attribute storage. This wastes space for objects\nhaving very few instance variables. The space consumption can become\nacute when creating large numbers of instances.\n\nThe default can be overridden by defining *__slots__* in a new-style\nclass definition. The *__slots__* declaration takes a sequence of\ninstance variables and reserves just enough space in each instance to\nhold a value for each variable. Space is saved because *__dict__* is\nnot created for each instance.\n\n__slots__\n\n This class variable can be assigned a string, iterable, or sequence\n of strings with variable names used by instances. If defined in a\n new-style class, *__slots__* reserves space for the declared\n variables and prevents the automatic creation of *__dict__* and\n *__weakref__* for each instance.\n\n New in version 2.2.\n\nNotes on using *__slots__*\n\n* When inheriting from a class without *__slots__*, the *__dict__*\n attribute of that class will always be accessible, so a *__slots__*\n definition in the subclass is meaningless.\n\n* Without a *__dict__* variable, instances cannot be assigned new\n variables not listed in the *__slots__* definition. Attempts to\n assign to an unlisted variable name raises ``AttributeError``. If\n dynamic assignment of new variables is desired, then add\n ``\'__dict__\'`` to the sequence of strings in the *__slots__*\n declaration.\n\n Changed in version 2.3: Previously, adding ``\'__dict__\'`` to the\n *__slots__* declaration would not enable the assignment of new\n attributes not specifically listed in the sequence of instance\n variable names.\n\n* Without a *__weakref__* variable for each instance, classes defining\n *__slots__* do not support weak references to its instances. If weak\n reference support is needed, then add ``\'__weakref__\'`` to the\n sequence of strings in the *__slots__* declaration.\n\n Changed in version 2.3: Previously, adding ``\'__weakref__\'`` to the\n *__slots__* declaration would not enable support for weak\n references.\n\n* *__slots__* are implemented at the class level by creating\n descriptors (*Implementing Descriptors*) for each variable name. As\n a result, class attributes cannot be used to set default values for\n instance variables defined by *__slots__*; otherwise, the class\n attribute would overwrite the descriptor assignment.\n\n* If a class defines a slot also defined in a base class, the instance\n variable defined by the base class slot is inaccessible (except by\n retrieving its descriptor directly from the base class). This\n renders the meaning of the program undefined. In the future, a\n check may be added to prevent this.\n\n* The action of a *__slots__* declaration is limited to the class\n where it is defined. As a result, subclasses will have a *__dict__*\n unless they also define *__slots__*.\n\n* Nonempty *__slots__* does not work for classes derived from\n "variable-length" built-in types such as ``long``, ``str`` and\n ``tuple``.\n\n* Any non-string iterable may be assigned to *__slots__*. Mappings may\n also be used; however, in the future, special meaning may be\n assigned to the values corresponding to each key.\n\n* *__class__* assignment works only if both classes have the same\n *__slots__*.\n\n Changed in version 2.6: Previously, *__class__* assignment raised an\n error if either new or old class had *__slots__*.\n', 'attribute-references': u'\nAttribute references\n********************\n\nAn attribute reference is a primary followed by a period and a name:\n\n attributeref ::= primary "." identifier\n\nThe primary must evaluate to an object of a type that supports\nattribute references, e.g., a module, list, or an instance. This\nobject is then asked to produce the attribute whose name is the\nidentifier. If this attribute is not available, the exception\n``AttributeError`` is raised. Otherwise, the type and value of the\nobject produced is determined by the object. Multiple evaluations of\nthe same attribute reference may yield different objects.\n', 'augassign': u'\nAugmented assignment statements\n*******************************\n\nAugmented assignment is the combination, in a single statement, of a\nbinary operation and an assignment statement:\n\n augmented_assignment_stmt ::= augtarget augop (expression_list | yield_expression)\n augtarget ::= identifier | attributeref | subscription | slicing\n augop ::= "+=" | "-=" | "*=" | "/=" | "//=" | "%=" | "**="\n | ">>=" | "<<=" | "&=" | "^=" | "|="\n\n(See section *Primaries* for the syntax definitions for the last three\nsymbols.)\n\nAn augmented assignment evaluates the target (which, unlike normal\nassignment statements, cannot be an unpacking) and the expression\nlist, performs the binary operation specific to the type of assignment\non the two operands, and assigns the result to the original target.\nThe target is only evaluated once.\n\nAn augmented assignment expression like ``x += 1`` can be rewritten as\n``x = x + 1`` to achieve a similar, but not exactly equal effect. In\nthe augmented version, ``x`` is only evaluated once. Also, when\npossible, the actual operation is performed *in-place*, meaning that\nrather than creating a new object and assigning that to the target,\nthe old object is modified instead.\n\nWith the exception of assigning to tuples and multiple targets in a\nsingle statement, the assignment done by augmented assignment\nstatements is handled the same way as normal assignments. Similarly,\nwith the exception of the possible *in-place* behavior, the binary\noperation performed by augmented assignment is the same as the normal\nbinary operations.\n\nFor targets which are attribute references, the initial value is\nretrieved with a ``getattr()`` and the result is assigned with a\n``setattr()``. Notice that the two methods do not necessarily refer\nto the same variable. When ``getattr()`` refers to a class variable,\n``setattr()`` still writes to an instance variable. For example:\n\n class A:\n x = 3 # class variable\n a = A()\n a.x += 1 # writes a.x as 4 leaving A.x as 3\n', 'binary': u'\nBinary arithmetic operations\n****************************\n\nThe binary arithmetic operations have the conventional priority\nlevels. Note that some of these operations also apply to certain non-\nnumeric types. Apart from the power operator, there are only two\nlevels, one for multiplicative operators and one for additive\noperators:\n\n m_expr ::= u_expr | m_expr "*" u_expr | m_expr "//" u_expr | m_expr "/" u_expr\n | m_expr "%" u_expr\n a_expr ::= m_expr | a_expr "+" m_expr | a_expr "-" m_expr\n\nThe ``*`` (multiplication) operator yields the product of its\narguments. The arguments must either both be numbers, or one argument\nmust be an integer (plain or long) and the other must be a sequence.\nIn the former case, the numbers are converted to a common type and\nthen multiplied together. In the latter case, sequence repetition is\nperformed; a negative repetition factor yields an empty sequence.\n\nThe ``/`` (division) and ``//`` (floor division) operators yield the\nquotient of their arguments. The numeric arguments are first\nconverted to a common type. Plain or long integer division yields an\ninteger of the same type; the result is that of mathematical division\nwith the \'floor\' function applied to the result. Division by zero\nraises the ``ZeroDivisionError`` exception.\n\nThe ``%`` (modulo) operator yields the remainder from the division of\nthe first argument by the second. The numeric arguments are first\nconverted to a common type. A zero right argument raises the\n``ZeroDivisionError`` exception. The arguments may be floating point\nnumbers, e.g., ``3.14%0.7`` equals ``0.34`` (since ``3.14`` equals\n``4*0.7 + 0.34``.) The modulo operator always yields a result with\nthe same sign as its second operand (or zero); the absolute value of\nthe result is strictly smaller than the absolute value of the second\noperand [2].\n\nThe integer division and modulo operators are connected by the\nfollowing identity: ``x == (x/y)*y + (x%y)``. Integer division and\nmodulo are also connected with the built-in function ``divmod()``:\n``divmod(x, y) == (x/y, x%y)``. These identities don\'t hold for\nfloating point numbers; there similar identities hold approximately\nwhere ``x/y`` is replaced by ``floor(x/y)`` or ``floor(x/y) - 1`` [3].\n\nIn addition to performing the modulo operation on numbers, the ``%``\noperator is also overloaded by string and unicode objects to perform\nstring formatting (also known as interpolation). The syntax for string\nformatting is described in the Python Library Reference, section\n*String Formatting Operations*.\n\nDeprecated since version 2.3: The floor division operator, the modulo\noperator, and the ``divmod()`` function are no longer defined for\ncomplex numbers. Instead, convert to a floating point number using\nthe ``abs()`` function if appropriate.\n\nThe ``+`` (addition) operator yields the sum of its arguments. The\narguments must either both be numbers or both sequences of the same\ntype. In the former case, the numbers are converted to a common type\nand then added together. In the latter case, the sequences are\nconcatenated.\n\nThe ``-`` (subtraction) operator yields the difference of its\narguments. The numeric arguments are first converted to a common\ntype.\n', 'bitwise': u'\nBinary bitwise operations\n*************************\n\nEach of the three bitwise operations has a different priority level:\n\n and_expr ::= shift_expr | and_expr "&" shift_expr\n xor_expr ::= and_expr | xor_expr "^" and_expr\n or_expr ::= xor_expr | or_expr "|" xor_expr\n\nThe ``&`` operator yields the bitwise AND of its arguments, which must\nbe plain or long integers. The arguments are converted to a common\ntype.\n\nThe ``^`` operator yields the bitwise XOR (exclusive OR) of its\narguments, which must be plain or long integers. The arguments are\nconverted to a common type.\n\nThe ``|`` operator yields the bitwise (inclusive) OR of its arguments,\nwhich must be plain or long integers. The arguments are converted to\na common type.\n', 'bltin-code-objects': u'\nCode Objects\n************\n\nCode objects are used by the implementation to represent "pseudo-\ncompiled" executable Python code such as a function body. They differ\nfrom function objects because they don\'t contain a reference to their\nglobal execution environment. Code objects are returned by the built-\nin ``compile()`` function and can be extracted from function objects\nthrough their ``func_code`` attribute. See also the ``code`` module.\n\nA code object can be executed or evaluated by passing it (instead of a\nsource string) to the ``exec`` statement or the built-in ``eval()``\nfunction.\n\nSee *The standard type hierarchy* for more information.\n', 'bltin-ellipsis-object': u'\nThe Ellipsis Object\n*******************\n\nThis object is used by extended slice notation (see *Slicings*). It\nsupports no special operations. There is exactly one ellipsis object,\nnamed ``Ellipsis`` (a built-in name).\n\nIt is written as ``Ellipsis``.\n', 'bltin-file-objects': u'\nFile Objects\n************\n\nFile objects are implemented using C\'s ``stdio`` package and can be\ncreated with the built-in ``open()`` function. File objects are also\nreturned by some other built-in functions and methods, such as\n``os.popen()`` and ``os.fdopen()`` and the ``makefile()`` method of\nsocket objects. Temporary files can be created using the ``tempfile``\nmodule, and high-level file operations such as copying, moving, and\ndeleting files and directories can be achieved with the ``shutil``\nmodule.\n\nWhen a file operation fails for an I/O-related reason, the exception\n``IOError`` is raised. This includes situations where the operation\nis not defined for some reason, like ``seek()`` on a tty device or\nwriting a file opened for reading.\n\nFiles have the following methods:\n\nfile.close()\n\n Close the file. A closed file cannot be read or written any more.\n Any operation which requires that the file be open will raise a\n ``ValueError`` after the file has been closed. Calling ``close()``\n more than once is allowed.\n\n As of Python 2.5, you can avoid having to call this method\n explicitly if you use the ``with`` statement. For example, the\n following code will automatically close *f* when the ``with`` block\n is exited:\n\n from __future__ import with_statement # This isn\'t required in Python 2.6\n\n with open("hello.txt") as f:\n for line in f:\n print line\n\n In older versions of Python, you would have needed to do this to\n get the same effect:\n\n f = open("hello.txt")\n try:\n for line in f:\n print line\n finally:\n f.close()\n\n Note: Not all "file-like" types in Python support use as a context\n manager for the ``with`` statement. If your code is intended to\n work with any file-like object, you can use the function\n ``contextlib.closing()`` instead of using the object directly.\n\nfile.flush()\n\n Flush the internal buffer, like ``stdio``\'s ``fflush``. This may\n be a no-op on some file-like objects.\n\nfile.fileno()\n\n Return the integer "file descriptor" that is used by the underlying\n implementation to request I/O operations from the operating system.\n This can be useful for other, lower level interfaces that use file\n descriptors, such as the ``fcntl`` module or ``os.read()`` and\n friends.\n\n Note: File-like objects which do not have a real file descriptor should\n *not* provide this method!\n\nfile.isatty()\n\n Return ``True`` if the file is connected to a tty(-like) device,\n else ``False``.\n\n Note: If a file-like object is not associated with a real file, this\n method should *not* be implemented.\n\nfile.next()\n\n A file object is its own iterator, for example ``iter(f)`` returns\n *f* (unless *f* is closed). When a file is used as an iterator,\n typically in a ``for`` loop (for example, ``for line in f: print\n line``), the ``next()`` method is called repeatedly. This method\n returns the next input line, or raises ``StopIteration`` when EOF\n is hit when the file is open for reading (behavior is undefined\n when the file is open for writing). In order to make a ``for``\n loop the most efficient way of looping over the lines of a file (a\n very common operation), the ``next()`` method uses a hidden read-\n ahead buffer. As a consequence of using a read-ahead buffer,\n combining ``next()`` with other file methods (like ``readline()``)\n does not work right. However, using ``seek()`` to reposition the\n file to an absolute position will flush the read-ahead buffer.\n\n New in version 2.3.\n\nfile.read([size])\n\n Read at most *size* bytes from the file (less if the read hits EOF\n before obtaining *size* bytes). If the *size* argument is negative\n or omitted, read all data until EOF is reached. The bytes are\n returned as a string object. An empty string is returned when EOF\n is encountered immediately. (For certain files, like ttys, it\n makes sense to continue reading after an EOF is hit.) Note that\n this method may call the underlying C function ``fread`` more than\n once in an effort to acquire as close to *size* bytes as possible.\n Also note that when in non-blocking mode, less data than was\n requested may be returned, even if no *size* parameter was given.\n\n Note: This function is simply a wrapper for the underlying ``fread`` C\n function, and will behave the same in corner cases, such as\n whether the EOF value is cached.\n\nfile.readline([size])\n\n Read one entire line from the file. A trailing newline character\n is kept in the string (but may be absent when a file ends with an\n incomplete line). [6] If the *size* argument is present and non-\n negative, it is a maximum byte count (including the trailing\n newline) and an incomplete line may be returned. An empty string is\n returned *only* when EOF is encountered immediately.\n\n Note: Unlike ``stdio``\'s ``fgets``, the returned string contains null\n characters (``\'\\0\'``) if they occurred in the input.\n\nfile.readlines([sizehint])\n\n Read until EOF using ``readline()`` and return a list containing\n the lines thus read. If the optional *sizehint* argument is\n present, instead of reading up to EOF, whole lines totalling\n approximately *sizehint* bytes (possibly after rounding up to an\n internal buffer size) are read. Objects implementing a file-like\n interface may choose to ignore *sizehint* if it cannot be\n implemented, or cannot be implemented efficiently.\n\nfile.xreadlines()\n\n This method returns the same thing as ``iter(f)``.\n\n New in version 2.1.\n\n Deprecated since version 2.3: Use ``for line in file`` instead.\n\nfile.seek(offset[, whence])\n\n Set the file\'s current position, like ``stdio``\'s ``fseek``. The\n *whence* argument is optional and defaults to ``os.SEEK_SET`` or\n ``0`` (absolute file positioning); other values are ``os.SEEK_CUR``\n or ``1`` (seek relative to the current position) and\n ``os.SEEK_END`` or ``2`` (seek relative to the file\'s end). There\n is no return value.\n\n For example, ``f.seek(2, os.SEEK_CUR)`` advances the position by\n two and ``f.seek(-3, os.SEEK_END)`` sets the position to the third\n to last.\n\n Note that if the file is opened for appending (mode ``\'a\'`` or\n ``\'a+\'``), any ``seek()`` operations will be undone at the next\n write. If the file is only opened for writing in append mode (mode\n ``\'a\'``), this method is essentially a no-op, but it remains useful\n for files opened in append mode with reading enabled (mode\n ``\'a+\'``). If the file is opened in text mode (without ``\'b\'``),\n only offsets returned by ``tell()`` are legal. Use of other\n offsets causes undefined behavior.\n\n Note that not all file objects are seekable.\n\n Changed in version 2.6: Passing float values as offset has been\n deprecated.\n\nfile.tell()\n\n Return the file\'s current position, like ``stdio``\'s ``ftell``.\n\n Note: On Windows, ``tell()`` can return illegal values (after an\n ``fgets``) when reading files with Unix-style line-endings. Use\n binary mode (``\'rb\'``) to circumvent this problem.\n\nfile.truncate([size])\n\n Truncate the file\'s size. If the optional *size* argument is\n present, the file is truncated to (at most) that size. The size\n defaults to the current position. The current file position is not\n changed. Note that if a specified size exceeds the file\'s current\n size, the result is platform-dependent: possibilities include that\n the file may remain unchanged, increase to the specified size as if\n zero-filled, or increase to the specified size with undefined new\n content. Availability: Windows, many Unix variants.\n\nfile.write(str)\n\n Write a string to the file. There is no return value. Due to\n buffering, the string may not actually show up in the file until\n the ``flush()`` or ``close()`` method is called.\n\nfile.writelines(sequence)\n\n Write a sequence of strings to the file. The sequence can be any\n iterable object producing strings, typically a list of strings.\n There is no return value. (The name is intended to match\n ``readlines()``; ``writelines()`` does not add line separators.)\n\nFiles support the iterator protocol. Each iteration returns the same\nresult as ``file.readline()``, and iteration ends when the\n``readline()`` method returns an empty string.\n\nFile objects also offer a number of other interesting attributes.\nThese are not required for file-like objects, but should be\nimplemented if they make sense for the particular object.\n\nfile.closed\n\n bool indicating the current state of the file object. This is a\n read-only attribute; the ``close()`` method changes the value. It\n may not be available on all file-like objects.\n\nfile.encoding\n\n The encoding that this file uses. When Unicode strings are written\n to a file, they will be converted to byte strings using this\n encoding. In addition, when the file is connected to a terminal,\n the attribute gives the encoding that the terminal is likely to use\n (that information might be incorrect if the user has misconfigured\n the terminal). The attribute is read-only and may not be present\n on all file-like objects. It may also be ``None``, in which case\n the file uses the system default encoding for converting Unicode\n strings.\n\n New in version 2.3.\n\nfile.errors\n\n The Unicode error handler used along with the encoding.\n\n New in version 2.6.\n\nfile.mode\n\n The I/O mode for the file. If the file was created using the\n ``open()`` built-in function, this will be the value of the *mode*\n parameter. This is a read-only attribute and may not be present on\n all file-like objects.\n\nfile.name\n\n If the file object was created using ``open()``, the name of the\n file. Otherwise, some string that indicates the source of the file\n object, of the form ``<...>``. This is a read-only attribute and\n may not be present on all file-like objects.\n\nfile.newlines\n\n If Python was built with the *--with-universal-newlines* option to\n **configure** (the default) this read-only attribute exists, and\n for files opened in universal newline read mode it keeps track of\n the types of newlines encountered while reading the file. The\n values it can take are ``\'\\r\'``, ``\'\\n\'``, ``\'\\r\\n\'``, ``None``\n (unknown, no newlines read yet) or a tuple containing all the\n newline types seen, to indicate that multiple newline conventions\n were encountered. For files not opened in universal newline read\n mode the value of this attribute will be ``None``.\n\nfile.softspace\n\n Boolean that indicates whether a space character needs to be\n printed before another value when using the ``print`` statement.\n Classes that are trying to simulate a file object should also have\n a writable ``softspace`` attribute, which should be initialized to\n zero. This will be automatic for most classes implemented in\n Python (care may be needed for objects that override attribute\n access); types implemented in C will have to provide a writable\n ``softspace`` attribute.\n\n Note: This attribute is not used to control the ``print`` statement,\n but to allow the implementation of ``print`` to keep track of its\n internal state.\n', 'bltin-null-object': u"\nThe Null Object\n***************\n\nThis object is returned by functions that don't explicitly return a\nvalue. It supports no special operations. There is exactly one null\nobject, named ``None`` (a built-in name).\n\nIt is written as ``None``.\n", 'bltin-type-objects': u"\nType Objects\n************\n\nType objects represent the various object types. An object's type is\naccessed by the built-in function ``type()``. There are no special\noperations on types. The standard module ``types`` defines names for\nall standard built-in types.\n\nTypes are written like this: ``<type 'int'>``.\n", 'booleans': u'\nBoolean operations\n******************\n\nBoolean operations have the lowest priority of all Python operations:\n\n expression ::= conditional_expression | lambda_form\n old_expression ::= or_test | old_lambda_form\n conditional_expression ::= or_test ["if" or_test "else" expression]\n or_test ::= and_test | or_test "or" and_test\n and_test ::= not_test | and_test "and" not_test\n not_test ::= comparison | "not" not_test\n\nIn the context of Boolean operations, and also when expressions are\nused by control flow statements, the following values are interpreted\nas false: ``False``, ``None``, numeric zero of all types, and empty\nstrings and containers (including strings, tuples, lists,\ndictionaries, sets and frozensets). All other values are interpreted\nas true. (See the ``__nonzero__()`` special method for a way to\nchange this.)\n\nThe operator ``not`` yields ``True`` if its argument is false,\n``False`` otherwise.\n\nThe expression ``x if C else y`` first evaluates *C* (*not* *x*); if\n*C* is true, *x* is evaluated and its value is returned; otherwise,\n*y* is evaluated and its value is returned.\n\nNew in version 2.5.\n\nThe expression ``x and y`` first evaluates *x*; if *x* is false, its\nvalue is returned; otherwise, *y* is evaluated and the resulting value\nis returned.\n\nThe expression ``x or y`` first evaluates *x*; if *x* is true, its\nvalue is returned; otherwise, *y* is evaluated and the resulting value\nis returned.\n\n(Note that neither ``and`` nor ``or`` restrict the value and type they\nreturn to ``False`` and ``True``, but rather return the last evaluated\nargument. This is sometimes useful, e.g., if ``s`` is a string that\nshould be replaced by a default value if it is empty, the expression\n``s or \'foo\'`` yields the desired value. Because ``not`` has to\ninvent a value anyway, it does not bother to return a value of the\nsame type as its argument, so e.g., ``not \'foo\'`` yields ``False``,\nnot ``\'\'``.)\n', 'break': u'\nThe ``break`` statement\n***********************\n\n break_stmt ::= "break"\n\n``break`` may only occur syntactically nested in a ``for`` or\n``while`` loop, but not nested in a function or class definition\nwithin that loop.\n\nIt terminates the nearest enclosing loop, skipping the optional\n``else`` clause if the loop has one.\n\nIf a ``for`` loop is terminated by ``break``, the loop control target\nkeeps its current value.\n\nWhen ``break`` passes control out of a ``try`` statement with a\n``finally`` clause, that ``finally`` clause is executed before really\nleaving the loop.\n', 'callable-types': u'\nEmulating callable objects\n**************************\n\nobject.__call__(self[, args...])\n\n Called when the instance is "called" as a function; if this method\n is defined, ``x(arg1, arg2, ...)`` is a shorthand for\n ``x.__call__(arg1, arg2, ...)``.\n', 'calls': u'\nCalls\n*****\n\nA call calls a callable object (e.g., a function) with a possibly\nempty series of arguments:\n\n call ::= primary "(" [argument_list [","]\n | expression genexpr_for] ")"\n argument_list ::= positional_arguments ["," keyword_arguments]\n ["," "*" expression] ["," keyword_arguments]\n ["," "**" expression]\n | keyword_arguments ["," "*" expression]\n ["," "**" expression]\n | "*" expression ["," "*" expression] ["," "**" expression]\n | "**" expression\n positional_arguments ::= expression ("," expression)*\n keyword_arguments ::= keyword_item ("," keyword_item)*\n keyword_item ::= identifier "=" expression\n\nA trailing comma may be present after the positional and keyword\narguments but does not affect the semantics.\n\nThe primary must evaluate to a callable object (user-defined\nfunctions, built-in functions, methods of built-in objects, class\nobjects, methods of class instances, and certain class instances\nthemselves are callable; extensions may define additional callable\nobject types). All argument expressions are evaluated before the call\nis attempted. Please refer to section *Function definitions* for the\nsyntax of formal parameter lists.\n\nIf keyword arguments are present, they are first converted to\npositional arguments, as follows. First, a list of unfilled slots is\ncreated for the formal parameters. If there are N positional\narguments, they are placed in the first N slots. Next, for each\nkeyword argument, the identifier is used to determine the\ncorresponding slot (if the identifier is the same as the first formal\nparameter name, the first slot is used, and so on). If the slot is\nalready filled, a ``TypeError`` exception is raised. Otherwise, the\nvalue of the argument is placed in the slot, filling it (even if the\nexpression is ``None``, it fills the slot). When all arguments have\nbeen processed, the slots that are still unfilled are filled with the\ncorresponding default value from the function definition. (Default\nvalues are calculated, once, when the function is defined; thus, a\nmutable object such as a list or dictionary used as default value will\nbe shared by all calls that don\'t specify an argument value for the\ncorresponding slot; this should usually be avoided.) If there are any\nunfilled slots for which no default value is specified, a\n``TypeError`` exception is raised. Otherwise, the list of filled\nslots is used as the argument list for the call.\n\nNote: An implementation may provide builtin functions whose positional\n parameters do not have names, even if they are \'named\' for the\n purpose of documentation, and which therefore cannot be supplied by\n keyword. In CPython, this is the case for functions implemented in\n C that use ``PyArg_ParseTuple`` to parse their arguments.\n\nIf there are more positional arguments than there are formal parameter\nslots, a ``TypeError`` exception is raised, unless a formal parameter\nusing the syntax ``*identifier`` is present; in this case, that formal\nparameter receives a tuple containing the excess positional arguments\n(or an empty tuple if there were no excess positional arguments).\n\nIf any keyword argument does not correspond to a formal parameter\nname, a ``TypeError`` exception is raised, unless a formal parameter\nusing the syntax ``**identifier`` is present; in this case, that\nformal parameter receives a dictionary containing the excess keyword\narguments (using the keywords as keys and the argument values as\ncorresponding values), or a (new) empty dictionary if there were no\nexcess keyword arguments.\n\nIf the syntax ``*expression`` appears in the function call,\n``expression`` must evaluate to a sequence. Elements from this\nsequence are treated as if they were additional positional arguments;\nif there are positional arguments *x1*,..., *xN*, and ``expression``\nevaluates to a sequence *y1*, ..., *yM*, this is equivalent to a call\nwith M+N positional arguments *x1*, ..., *xN*, *y1*, ..., *yM*.\n\nA consequence of this is that although the ``*expression`` syntax may\nappear *after* some keyword arguments, it is processed *before* the\nkeyword arguments (and the ``**expression`` argument, if any -- see\nbelow). So:\n\n >>> def f(a, b):\n ... print a, b\n ...\n >>> f(b=1, *(2,))\n 2 1\n >>> f(a=1, *(2,))\n Traceback (most recent call last):\n File "<stdin>", line 1, in ?\n TypeError: f() got multiple values for keyword argument \'a\'\n >>> f(1, *(2,))\n 1 2\n\nIt is unusual for both keyword arguments and the ``*expression``\nsyntax to be used in the same call, so in practice this confusion does\nnot arise.\n\nIf the syntax ``**expression`` appears in the function call,\n``expression`` must evaluate to a mapping, the contents of which are\ntreated as additional keyword arguments. In the case of a keyword\nappearing in both ``expression`` and as an explicit keyword argument,\na ``TypeError`` exception is raised.\n\nFormal parameters using the syntax ``*identifier`` or ``**identifier``\ncannot be used as positional argument slots or as keyword argument\nnames. Formal parameters using the syntax ``(sublist)`` cannot be\nused as keyword argument names; the outermost sublist corresponds to a\nsingle unnamed argument slot, and the argument value is assigned to\nthe sublist using the usual tuple assignment rules after all other\nparameter processing is done.\n\nA call always returns some value, possibly ``None``, unless it raises\nan exception. How this value is computed depends on the type of the\ncallable object.\n\nIf it is---\n\na user-defined function:\n The code block for the function is executed, passing it the\n argument list. The first thing the code block will do is bind the\n formal parameters to the arguments; this is described in section\n *Function definitions*. When the code block executes a ``return``\n statement, this specifies the return value of the function call.\n\na built-in function or method:\n The result is up to the interpreter; see *Built-in Functions* for\n the descriptions of built-in functions and methods.\n\na class object:\n A new instance of that class is returned.\n\na class instance method:\n The corresponding user-defined function is called, with an argument\n list that is one longer than the argument list of the call: the\n instance becomes the first argument.\n\na class instance:\n The class must define a ``__call__()`` method; the effect is then\n the same as if that method was called.\n', 'class': u'\nClass definitions\n*****************\n\nA class definition defines a class object (see section *The standard\ntype hierarchy*):\n\n classdef ::= "class" classname [inheritance] ":" suite\n inheritance ::= "(" [expression_list] ")"\n classname ::= identifier\n\nA class definition is an executable statement. It first evaluates the\ninheritance list, if present. Each item in the inheritance list\nshould evaluate to a class object or class type which allows\nsubclassing. The class\'s suite is then executed in a new execution\nframe (see section *Naming and binding*), using a newly created local\nnamespace and the original global namespace. (Usually, the suite\ncontains only function definitions.) When the class\'s suite finishes\nexecution, its execution frame is discarded but its local namespace is\nsaved. [4] A class object is then created using the inheritance list\nfor the base classes and the saved local namespace for the attribute\ndictionary. The class name is bound to this class object in the\noriginal local namespace.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass variables; they are shared by all instances. To create instance\nvariables, they can be set in a method with ``self.name = value``.\nBoth class and instance variables are accessible through the notation\n"``self.name``", and an instance variable hides a class variable with\nthe same name when accessed in this way. Class variables can be used\nas defaults for instance variables, but using mutable values there can\nlead to unexpected results. For *new-style class*es, descriptors can\nbe used to create instance variables with different implementation\ndetails.\n\nClass definitions, like function definitions, may be wrapped by one or\nmore *decorator* expressions. The evaluation rules for the decorator\nexpressions are the same as for functions. The result must be a class\nobject, which is then bound to the class name.\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack only if there\n is no ``finally`` clause that negates the exception.\n\n[2] Currently, control "flows off the end" except in the case of an\n exception or the execution of a ``return``, ``continue``, or\n ``break`` statement.\n\n[3] A string literal appearing as the first statement in the function\n body is transformed into the function\'s ``__doc__`` attribute and\n therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n body is transformed into the namespace\'s ``__doc__`` item and\n therefore the class\'s *docstring*.\n', 'coercion-rules': u"\nCoercion rules\n**************\n\nThis section used to document the rules for coercion. As the language\nhas evolved, the coercion rules have become hard to document\nprecisely; documenting what one version of one particular\nimplementation does is undesirable. Instead, here are some informal\nguidelines regarding coercion. In Python 3.0, coercion will not be\nsupported.\n\n* If the left operand of a % operator is a string or Unicode object,\n no coercion takes place and the string formatting operation is\n invoked instead.\n\n* It is no longer recommended to define a coercion operation. Mixed-\n mode operations on types that don't define coercion pass the\n original arguments to the operation.\n\n* New-style classes (those derived from ``object``) never invoke the\n ``__coerce__()`` method in response to a binary operator; the only\n time ``__coerce__()`` is invoked is when the built-in function\n ``coerce()`` is called.\n\n* For most intents and purposes, an operator that returns\n ``NotImplemented`` is treated the same as one that is not\n implemented at all.\n\n* Below, ``__op__()`` and ``__rop__()`` are used to signify the\n generic method names corresponding to an operator; ``__iop__()`` is\n used for the corresponding in-place operator. For example, for the\n operator '``+``', ``__add__()`` and ``__radd__()`` are used for the\n left and right variant of the binary operator, and ``__iadd__()``\n for the in-place variant.\n\n* For objects *x* and *y*, first ``x.__op__(y)`` is tried. If this is\n not implemented or returns ``NotImplemented``, ``y.__rop__(x)`` is\n tried. If this is also not implemented or returns\n ``NotImplemented``, a ``TypeError`` exception is raised. But see\n the following exception:\n\n* Exception to the previous item: if the left operand is an instance\n of a built-in type or a new-style class, and the right operand is an\n instance of a proper subclass of that type or class and overrides\n the base's ``__rop__()`` method, the right operand's ``__rop__()``\n method is tried *before* the left operand's ``__op__()`` method.\n\n This is done so that a subclass can completely override binary\n operators. Otherwise, the left operand's ``__op__()`` method would\n always accept the right operand: when an instance of a given class\n is expected, an instance of a subclass of that class is always\n acceptable.\n\n* When either operand type defines a coercion, this coercion is called\n before that type's ``__op__()`` or ``__rop__()`` method is called,\n but no sooner. If the coercion returns an object of a different\n type for the operand whose coercion is invoked, part of the process\n is redone using the new object.\n\n* When an in-place operator (like '``+=``') is used, if the left\n operand implements ``__iop__()``, it is invoked without any\n coercion. When the operation falls back to ``__op__()`` and/or\n ``__rop__()``, the normal coercion rules apply.\n\n* In ``x + y``, if *x* is a sequence that implements sequence\n concatenation, sequence concatenation is invoked.\n\n* In ``x * y``, if one operator is a sequence that implements sequence\n repetition, and the other is an integer (``int`` or ``long``),\n sequence repetition is invoked.\n\n* Rich comparisons (implemented by methods ``__eq__()`` and so on)\n never use coercion. Three-way comparison (implemented by\n ``__cmp__()``) does use coercion under the same conditions as other\n binary operations use it.\n\n* In the current implementation, the built-in numeric types ``int``,\n ``long`` and ``float`` do not use coercion; the type ``complex``\n however does use coercion for binary operators and rich comparisons,\n despite the above rules. The difference can become apparent when\n subclassing these types. Over time, the type ``complex`` may be\n fixed to avoid coercion. All these types implement a\n ``__coerce__()`` method, for use by the built-in ``coerce()``\n function.\n", 'comparisons': u'\nComparisons\n***********\n\nUnlike C, all comparison operations in Python have the same priority,\nwhich is lower than that of any arithmetic, shifting or bitwise\noperation. Also unlike C, expressions like ``a < b < c`` have the\ninterpretation that is conventional in mathematics:\n\n comparison ::= or_expr ( comp_operator or_expr )*\n comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "<>" | "!="\n | "is" ["not"] | ["not"] "in"\n\nComparisons yield boolean values: ``True`` or ``False``.\n\nComparisons can be chained arbitrarily, e.g., ``x < y <= z`` is\nequivalent to ``x < y and y <= z``, except that ``y`` is evaluated\nonly once (but in both cases ``z`` is not evaluated at all when ``x <\ny`` is found to be false).\n\nFormally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and *op1*,\n*op2*, ..., *opN* are comparison operators, then ``a op1 b op2 c ... y\nopN z`` is equivalent to ``a op1 b and b op2 c and ... y opN z``,\nexcept that each expression is evaluated at most once.\n\nNote that ``a op1 b op2 c`` doesn\'t imply any kind of comparison\nbetween *a* and *c*, so that, e.g., ``x < y > z`` is perfectly legal\n(though perhaps not pretty).\n\nThe forms ``<>`` and ``!=`` are equivalent; for consistency with C,\n``!=`` is preferred; where ``!=`` is mentioned below ``<>`` is also\naccepted. The ``<>`` spelling is considered obsolescent.\n\nThe operators ``<``, ``>``, ``==``, ``>=``, ``<=``, and ``!=`` compare\nthe values of two objects. The objects need not have the same type.\nIf both are numbers, they are converted to a common type. Otherwise,\nobjects of different types *always* compare unequal, and are ordered\nconsistently but arbitrarily. You can control comparison behavior of\nobjects of non-builtin types by defining a ``__cmp__`` method or rich\ncomparison methods like ``__gt__``, described in section *Special\nmethod names*.\n\n(This unusual definition of comparison was used to simplify the\ndefinition of operations like sorting and the ``in`` and ``not in``\noperators. In the future, the comparison rules for objects of\ndifferent types are likely to change.)\n\nComparison of objects of the same type depends on the type:\n\n* Numbers are compared arithmetically.\n\n* Strings are compared lexicographically using the numeric equivalents\n (the result of the built-in function ``ord()``) of their characters.\n Unicode and 8-bit strings are fully interoperable in this behavior.\n [4]\n\n* Tuples and lists are compared lexicographically using comparison of\n corresponding elements. This means that to compare equal, each\n element must compare equal and the two sequences must be of the same\n type and have the same length.\n\n If not equal, the sequences are ordered the same as their first\n differing elements. For example, ``cmp([1,2,x], [1,2,y])`` returns\n the same as ``cmp(x,y)``. If the corresponding element does not\n exist, the shorter sequence is ordered first (for example, ``[1,2] <\n [1,2,3]``).\n\n* Mappings (dictionaries) compare equal if and only if their sorted\n (key, value) lists compare equal. [5] Outcomes other than equality\n are resolved consistently, but are not otherwise defined. [6]\n\n* Most other objects of builtin types compare unequal unless they are\n the same object; the choice whether one object is considered smaller\n or larger than another one is made arbitrarily but consistently\n within one execution of a program.\n\nThe operators ``in`` and ``not in`` test for collection membership.\n``x in s`` evaluates to true if *x* is a member of the collection *s*,\nand false otherwise. ``x not in s`` returns the negation of ``x in\ns``. The collection membership test has traditionally been bound to\nsequences; an object is a member of a collection if the collection is\na sequence and contains an element equal to that object. However, it\nmake sense for many other object types to support membership tests\nwithout being a sequence. In particular, dictionaries (for keys) and\nsets support membership testing.\n\nFor the list and tuple types, ``x in y`` is true if and only if there\nexists an index *i* such that ``x == y[i]`` is true.\n\nFor the Unicode and string types, ``x in y`` is true if and only if\n*x* is a substring of *y*. An equivalent test is ``y.find(x) != -1``.\nNote, *x* and *y* need not be the same type; consequently, ``u\'ab\' in\n\'abc\'`` will return ``True``. Empty strings are always considered to\nbe a substring of any other string, so ``"" in "abc"`` will return\n``True``.\n\nChanged in version 2.3: Previously, *x* was required to be a string of\nlength ``1``.\n\nFor user-defined classes which define the ``__contains__()`` method,\n``x in y`` is true if and only if ``y.__contains__(x)`` is true.\n\nFor user-defined classes which do not define ``__contains__()`` and do\ndefine ``__getitem__()``, ``x in y`` is true if and only if there is a\nnon-negative integer index *i* such that ``x == y[i]``, and all lower\ninteger indices do not raise ``IndexError`` exception. (If any other\nexception is raised, it is as if ``in`` raised that exception).\n\nThe operator ``not in`` is defined to have the inverse true value of\n``in``.\n\nThe operators ``is`` and ``is not`` test for object identity: ``x is\ny`` is true if and only if *x* and *y* are the same object. ``x is\nnot y`` yields the inverse truth value. [7]\n', 'compound': u'\nCompound statements\n*******************\n\nCompound statements contain (groups of) other statements; they affect\nor control the execution of those other statements in some way. In\ngeneral, compound statements span multiple lines, although in simple\nincarnations a whole compound statement may be contained in one line.\n\nThe ``if``, ``while`` and ``for`` statements implement traditional\ncontrol flow constructs. ``try`` specifies exception handlers and/or\ncleanup code for a group of statements. Function and class\ndefinitions are also syntactically compound statements.\n\nCompound statements consist of one or more \'clauses.\' A clause\nconsists of a header and a \'suite.\' The clause headers of a\nparticular compound statement are all at the same indentation level.\nEach clause header begins with a uniquely identifying keyword and ends\nwith a colon. A suite is a group of statements controlled by a\nclause. A suite can be one or more semicolon-separated simple\nstatements on the same line as the header, following the header\'s\ncolon, or it can be one or more indented statements on subsequent\nlines. Only the latter form of suite can contain nested compound\nstatements; the following is illegal, mostly because it wouldn\'t be\nclear to which ``if`` clause a following ``else`` clause would belong:\n\n if test1: if test2: print x\n\nAlso note that the semicolon binds tighter than the colon in this\ncontext, so that in the following example, either all or none of the\n``print`` statements are executed:\n\n if x < y < z: print x; print y; print z\n\nSummarizing:\n\n compound_stmt ::= if_stmt\n | while_stmt\n | for_stmt\n | try_stmt\n | with_stmt\n | funcdef\n | classdef\n | decorated\n suite ::= stmt_list NEWLINE | NEWLINE INDENT statement+ DEDENT\n statement ::= stmt_list NEWLINE | compound_stmt\n stmt_list ::= simple_stmt (";" simple_stmt)* [";"]\n\nNote that statements always end in a ``NEWLINE`` possibly followed by\na ``DEDENT``. Also note that optional continuation clauses always\nbegin with a keyword that cannot start a statement, thus there are no\nambiguities (the \'dangling ``else``\' problem is solved in Python by\nrequiring nested ``if`` statements to be indented).\n\nThe formatting of the grammar rules in the following sections places\neach clause on a separate line for clarity.\n\n\nThe ``if`` statement\n====================\n\nThe ``if`` statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the ``if`` statement is executed or evaluated).\nIf all expressions are false, the suite of the ``else`` clause, if\npresent, is executed.\n\n\nThe ``while`` statement\n=======================\n\nThe ``while`` statement is used for repeated execution as long as an\nexpression is true:\n\n while_stmt ::= "while" expression ":" suite\n ["else" ":" suite]\n\nThis repeatedly tests the expression and, if it is true, executes the\nfirst suite; if the expression is false (which may be the first time\nit is tested) the suite of the ``else`` clause, if present, is\nexecuted and the loop terminates.\n\nA ``break`` statement executed in the first suite terminates the loop\nwithout executing the ``else`` clause\'s suite. A ``continue``\nstatement executed in the first suite skips the rest of the suite and\ngoes back to testing the expression.\n\n\nThe ``for`` statement\n=====================\n\nThe ``for`` statement is used to iterate over the elements of a\nsequence (such as a string, tuple or list) or other iterable object:\n\n for_stmt ::= "for" target_list "in" expression_list ":" suite\n ["else" ":" suite]\n\nThe expression list is evaluated once; it should yield an iterable\nobject. An iterator is created for the result of the\n``expression_list``. The suite is then executed once for each item\nprovided by the iterator, in the order of ascending indices. Each\nitem in turn is assigned to the target list using the standard rules\nfor assignments, and then the suite is executed. When the items are\nexhausted (which is immediately when the sequence is empty), the suite\nin the ``else`` clause, if present, is executed, and the loop\nterminates.\n\nA ``break`` statement executed in the first suite terminates the loop\nwithout executing the ``else`` clause\'s suite. A ``continue``\nstatement executed in the first suite skips the rest of the suite and\ncontinues with the next item, or with the ``else`` clause if there was\nno next item.\n\nThe suite may assign to the variable(s) in the target list; this does\nnot affect the next item assigned to it.\n\nThe target list is not deleted when the loop is finished, but if the\nsequence is empty, it will not have been assigned to at all by the\nloop. Hint: the built-in function ``range()`` returns a sequence of\nintegers suitable to emulate the effect of Pascal\'s ``for i := a to b\ndo``; e.g., ``range(3)`` returns the list ``[0, 1, 2]``.\n\nWarning: There is a subtlety when the sequence is being modified by the loop\n (this can only occur for mutable sequences, i.e. lists). An internal\n counter is used to keep track of which item is used next, and this\n is incremented on each iteration. When this counter has reached the\n length of the sequence the loop terminates. This means that if the\n suite deletes the current (or a previous) item from the sequence,\n the next item will be skipped (since it gets the index of the\n current item which has already been treated). Likewise, if the\n suite inserts an item in the sequence before the current item, the\n current item will be treated again the next time through the loop.\n This can lead to nasty bugs that can be avoided by making a\n temporary copy using a slice of the whole sequence, e.g.,\n\n for x in a[:]:\n if x < 0: a.remove(x)\n\n\nThe ``try`` statement\n=====================\n\nThe ``try`` statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n try_stmt ::= try1_stmt | try2_stmt\n try1_stmt ::= "try" ":" suite\n ("except" [expression [("as" | ",") target]] ":" suite)+\n ["else" ":" suite]\n ["finally" ":" suite]\n try2_stmt ::= "try" ":" suite\n "finally" ":" suite\n\nChanged in version 2.5: In previous versions of Python,\n``try``...``except``...``finally`` did not work. ``try``...``except``\nhad to be nested in ``try``...``finally``.\n\nThe ``except`` clause(s) specify one or more exception handlers. When\nno exception occurs in the ``try`` clause, no exception handler is\nexecuted. When an exception occurs in the ``try`` suite, a search for\nan exception handler is started. This search inspects the except\nclauses in turn until one is found that matches the exception. An\nexpression-less except clause, if present, must be last; it matches\nany exception. For an except clause with an expression, that\nexpression is evaluated, and the clause matches the exception if the\nresulting object is "compatible" with the exception. An object is\ncompatible with an exception if it is the class or a base class of the\nexception object, a tuple containing an item compatible with the\nexception, or, in the (deprecated) case of string exceptions, is the\nraised string itself (note that the object identities must match, i.e.\nit must be the same string object, not just a string with the same\nvalue).\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire ``try`` statement\nraised the exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified in that except clause, if present, and the except\nclause\'s suite is executed. All except clauses must have an\nexecutable block. When the end of this block is reached, execution\ncontinues normally after the entire try statement. (This means that\nif two nested handlers exist for the same exception, and the exception\noccurs in the try clause of the inner handler, the outer handler will\nnot handle the exception.)\n\nBefore an except clause\'s suite is executed, details about the\nexception are assigned to three variables in the ``sys`` module:\n``sys.exc_type`` receives the object identifying the exception;\n``sys.exc_value`` receives the exception\'s parameter;\n``sys.exc_traceback`` receives a traceback object (see section *The\nstandard type hierarchy*) identifying the point in the program where\nthe exception occurred. These details are also available through the\n``sys.exc_info()`` function, which returns a tuple ``(exc_type,\nexc_value, exc_traceback)``. Use of the corresponding variables is\ndeprecated in favor of this function, since their use is unsafe in a\nthreaded program. As of Python 1.5, the variables are restored to\ntheir previous values (before the call) when returning from a function\nthat handled an exception.\n\nThe optional ``else`` clause is executed if and when control flows off\nthe end of the ``try`` clause. [2] Exceptions in the ``else`` clause\nare not handled by the preceding ``except`` clauses.\n\nIf ``finally`` is present, it specifies a \'cleanup\' handler. The\n``try`` clause is executed, including any ``except`` and ``else``\nclauses. If an exception occurs in any of the clauses and is not\nhandled, the exception is temporarily saved. The ``finally`` clause is\nexecuted. If there is a saved exception, it is re-raised at the end\nof the ``finally`` clause. If the ``finally`` clause raises another\nexception or executes a ``return`` or ``break`` statement, the saved\nexception is lost. The exception information is not available to the\nprogram during execution of the ``finally`` clause.\n\nWhen a ``return``, ``break`` or ``continue`` statement is executed in\nthe ``try`` suite of a ``try``...``finally`` statement, the\n``finally`` clause is also executed \'on the way out.\' A ``continue``\nstatement is illegal in the ``finally`` clause. (The reason is a\nproblem with the current implementation --- this restriction may be\nlifted in the future).\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information on using the ``raise`` statement to\ngenerate exceptions may be found in section *The raise statement*.\n\n\nThe ``with`` statement\n======================\n\nNew in version 2.5.\n\nThe ``with`` statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section *With Statement\nContext Managers*). This allows common\n``try``...``except``...``finally`` usage patterns to be encapsulated\nfor convenient reuse.\n\n with_stmt ::= "with" expression ["as" target] ":" suite\n\nThe execution of the ``with`` statement proceeds as follows:\n\n1. The context expression is evaluated to obtain a context manager.\n\n2. The context manager\'s ``__enter__()`` method is invoked.\n\n3. If a target was included in the ``with`` statement, the return\n value from ``__enter__()`` is assigned to it.\n\n Note: The ``with`` statement guarantees that if the ``__enter__()``\n method returns without an error, then ``__exit__()`` will always\n be called. Thus, if an error occurs during the assignment to the\n target list, it will be treated the same as an error occurring\n within the suite would be. See step 5 below.\n\n4. The suite is executed.\n\n5. The context manager\'s ``__exit__()`` method is invoked. If an\n exception caused the suite to be exited, its type, value, and\n traceback are passed as arguments to ``__exit__()``. Otherwise,\n three ``None`` arguments are supplied.\n\n If the suite was exited due to an exception, and the return value\n from the ``__exit__()`` method was false, the exception is\n reraised. If the return value was true, the exception is\n suppressed, and execution continues with the statement following\n the ``with`` statement.\n\n If the suite was exited for any reason other than an exception, the\n return value from ``__exit__()`` is ignored, and execution proceeds\n at the normal location for the kind of exit that was taken.\n\nNote: In Python 2.5, the ``with`` statement is only allowed when the\n ``with_statement`` feature has been enabled. It is always enabled\n in Python 2.6.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python\n ``with`` statement.\n\n\nFunction definitions\n====================\n\nA function definition defines a user-defined function object (see\nsection *The standard type hierarchy*):\n\n decorated ::= decorators (classdef | funcdef)\n decorators ::= decorator+\n decorator ::= "@" dotted_name ["(" [argument_list [","]] ")"] NEWLINE\n funcdef ::= "def" funcname "(" [parameter_list] ")" ":" suite\n dotted_name ::= identifier ("." identifier)*\n parameter_list ::= (defparameter ",")*\n ( "*" identifier [, "**" identifier]\n | "**" identifier\n | defparameter [","] )\n defparameter ::= parameter ["=" expression]\n sublist ::= parameter ("," parameter)* [","]\n parameter ::= identifier | "(" sublist ")"\n funcname ::= identifier\n\nA function definition is an executable statement. Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function). This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition. The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object. Multiple decorators are applied in\nnested fashion. For example, the following code:\n\n @f1(arg)\n @f2\n def func(): pass\n\nis equivalent to:\n\n def func(): pass\n func = f1(arg)(f2(func))\n\nWhen one or more top-level parameters have the form *parameter* ``=``\n*expression*, the function is said to have "default parameter values."\nFor a parameter with a default value, the corresponding argument may\nbe omitted from a call, in which case the parameter\'s default value is\nsubstituted. If a parameter has a default value, all following\nparameters must also have a default value --- this is a syntactic\nrestriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated when the function definition\nis executed.** This means that the expression is evaluated once, when\nthe function is defined, and that that same "pre-computed" value is\nused for each call. This is especially important to understand when a\ndefault parameter is a mutable object, such as a list or a dictionary:\nif the function modifies the object (e.g. by appending an item to a\nlist), the default value is in effect modified. This is generally not\nwhat was intended. A way around this is to use ``None`` as the\ndefault, and explicitly test for it in the body of the function, e.g.:\n\n def whats_on_the_telly(penguin=None):\n if penguin is None:\n penguin = []\n penguin.append("property of the zoo")\n return penguin\n\nFunction call semantics are described in more detail in section\n*Calls*. A function call always assigns values to all parameters\nmentioned in the parameter list, either from position arguments, from\nkeyword arguments, or from default values. If the form\n"``*identifier``" is present, it is initialized to a tuple receiving\nany excess positional parameters, defaulting to the empty tuple. If\nthe form "``**identifier``" is present, it is initialized to a new\ndictionary receiving any excess keyword arguments, defaulting to a new\nempty dictionary.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions. This uses lambda forms,\ndescribed in section *Expression lists*. Note that the lambda form is\nmerely a shorthand for a simplified function definition; a function\ndefined in a "``def``" statement can be passed around or assigned to\nanother name just like a function defined by a lambda form. The\n"``def``" form is actually more powerful since it allows the execution\nof multiple statements.\n\n**Programmer\'s note:** Functions are first-class objects. A "``def``"\nform executed inside a function definition defines a local function\nthat can be returned or passed around. Free variables used in the\nnested function can access the local variables of the function\ncontaining the def. See section *Naming and binding* for details.\n\n\nClass definitions\n=================\n\nA class definition defines a class object (see section *The standard\ntype hierarchy*):\n\n classdef ::= "class" classname [inheritance] ":" suite\n inheritance ::= "(" [expression_list] ")"\n classname ::= identifier\n\nA class definition is an executable statement. It first evaluates the\ninheritance list, if present. Each item in the inheritance list\nshould evaluate to a class object or class type which allows\nsubclassing. The class\'s suite is then executed in a new execution\nframe (see section *Naming and binding*), using a newly created local\nnamespace and the original global namespace. (Usually, the suite\ncontains only function definitions.) When the class\'s suite finishes\nexecution, its execution frame is discarded but its local namespace is\nsaved. [4] A class object is then created using the inheritance list\nfor the base classes and the saved local namespace for the attribute\ndictionary. The class name is bound to this class object in the\noriginal local namespace.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass variables; they are shared by all instances. To create instance\nvariables, they can be set in a method with ``self.name = value``.\nBoth class and instance variables are accessible through the notation\n"``self.name``", and an instance variable hides a class variable with\nthe same name when accessed in this way. Class variables can be used\nas defaults for instance variables, but using mutable values there can\nlead to unexpected results. For *new-style class*es, descriptors can\nbe used to create instance variables with different implementation\ndetails.\n\nClass definitions, like function definitions, may be wrapped by one or\nmore *decorator* expressions. The evaluation rules for the decorator\nexpressions are the same as for functions. The result must be a class\nobject, which is then bound to the class name.\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack only if there\n is no ``finally`` clause that negates the exception.\n\n[2] Currently, control "flows off the end" except in the case of an\n exception or the execution of a ``return``, ``continue``, or\n ``break`` statement.\n\n[3] A string literal appearing as the first statement in the function\n body is transformed into the function\'s ``__doc__`` attribute and\n therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n body is transformed into the namespace\'s ``__doc__`` item and\n therefore the class\'s *docstring*.\n', 'context-managers': u'\nWith Statement Context Managers\n*******************************\n\nNew in version 2.5.\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a ``with`` statement. The context\nmanager handles the entry into, and the exit from, the desired runtime\ncontext for the execution of the block of code. Context managers are\nnormally invoked using the ``with`` statement (described in section\n*The with statement*), but can also be used by directly invoking their\nmethods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see *Context Manager Types*.\n\nobject.__enter__(self)\n\n Enter the runtime context related to this object. The ``with``\n statement will bind this method\'s return value to the target(s)\n specified in the ``as`` clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n Exit the runtime context related to this object. The parameters\n describe the exception that caused the context to be exited. If the\n context was exited without an exception, all three arguments will\n be ``None``.\n\n If an exception is supplied, and the method wishes to suppress the\n exception (i.e., prevent it from being propagated), it should\n return a true value. Otherwise, the exception will be processed\n normally upon exit from this method.\n\n Note that ``__exit__()`` methods should not reraise the passed-in\n exception; this is the caller\'s responsibility.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python\n ``with`` statement.\n', 'continue': u'\nThe ``continue`` statement\n**************************\n\n continue_stmt ::= "continue"\n\n``continue`` may only occur syntactically nested in a ``for`` or\n``while`` loop, but not nested in a function or class definition or\n``finally`` clause within that loop. It continues with the next cycle\nof the nearest enclosing loop.\n\nWhen ``continue`` passes control out of a ``try`` statement with a\n``finally`` clause, that ``finally`` clause is executed before really\nstarting the next loop cycle.\n', 'conversions': u'\nArithmetic conversions\n**********************\n\nWhen a description of an arithmetic operator below uses the phrase\n"the numeric arguments are converted to a common type," the arguments\nare coerced using the coercion rules listed at *Coercion rules*. If\nboth arguments are standard numeric types, the following coercions are\napplied:\n\n* If either argument is a complex number, the other is converted to\n complex;\n\n* otherwise, if either argument is a floating point number, the other\n is converted to floating point;\n\n* otherwise, if either argument is a long integer, the other is\n converted to long integer;\n\n* otherwise, both must be plain integers and no conversion is\n necessary.\n\nSome additional rules apply for certain operators (e.g., a string left\nargument to the \'%\' operator). Extensions can define their own\ncoercions.\n', 'customization': u'\nBasic customization\n*******************\n\nobject.__new__(cls[, ...])\n\n Called to create a new instance of class *cls*. ``__new__()`` is a\n static method (special-cased so you need not declare it as such)\n that takes the class of which an instance was requested as its\n first argument. The remaining arguments are those passed to the\n object constructor expression (the call to the class). The return\n value of ``__new__()`` should be the new object instance (usually\n an instance of *cls*).\n\n Typical implementations create a new instance of the class by\n invoking the superclass\'s ``__new__()`` method using\n ``super(currentclass, cls).__new__(cls[, ...])`` with appropriate\n arguments and then modifying the newly-created instance as\n necessary before returning it.\n\n If ``__new__()`` returns an instance of *cls*, then the new\n instance\'s ``__init__()`` method will be invoked like\n ``__init__(self[, ...])``, where *self* is the new instance and the\n remaining arguments are the same as were passed to ``__new__()``.\n\n If ``__new__()`` does not return an instance of *cls*, then the new\n instance\'s ``__init__()`` method will not be invoked.\n\n ``__new__()`` is intended mainly to allow subclasses of immutable\n types (like int, str, or tuple) to customize instance creation. It\n is also commonly overridden in custom metaclasses in order to\n customize class creation.\n\nobject.__init__(self[, ...])\n\n Called when the instance is created. The arguments are those\n passed to the class constructor expression. If a base class has an\n ``__init__()`` method, the derived class\'s ``__init__()`` method,\n if any, must explicitly call it to ensure proper initialization of\n the base class part of the instance; for example:\n ``BaseClass.__init__(self, [args...])``. As a special constraint\n on constructors, no value may be returned; doing so will cause a\n ``TypeError`` to be raised at runtime.\n\nobject.__del__(self)\n\n Called when the instance is about to be destroyed. This is also\n called a destructor. If a base class has a ``__del__()`` method,\n the derived class\'s ``__del__()`` method, if any, must explicitly\n call it to ensure proper deletion of the base class part of the\n instance. Note that it is possible (though not recommended!) for\n the ``__del__()`` method to postpone destruction of the instance by\n creating a new reference to it. It may then be called at a later\n time when this new reference is deleted. It is not guaranteed that\n ``__del__()`` methods are called for objects that still exist when\n the interpreter exits.\n\n Note: ``del x`` doesn\'t directly call ``x.__del__()`` --- the former\n decrements the reference count for ``x`` by one, and the latter\n is only called when ``x``\'s reference count reaches zero. Some\n common situations that may prevent the reference count of an\n object from going to zero include: circular references between\n objects (e.g., a doubly-linked list or a tree data structure with\n parent and child pointers); a reference to the object on the\n stack frame of a function that caught an exception (the traceback\n stored in ``sys.exc_traceback`` keeps the stack frame alive); or\n a reference to the object on the stack frame that raised an\n unhandled exception in interactive mode (the traceback stored in\n ``sys.last_traceback`` keeps the stack frame alive). The first\n situation can only be remedied by explicitly breaking the cycles;\n the latter two situations can be resolved by storing ``None`` in\n ``sys.exc_traceback`` or ``sys.last_traceback``. Circular\n references which are garbage are detected when the option cycle\n detector is enabled (it\'s on by default), but can only be cleaned\n up if there are no Python-level ``__del__()`` methods involved.\n Refer to the documentation for the ``gc`` module for more\n information about how ``__del__()`` methods are handled by the\n cycle detector, particularly the description of the ``garbage``\n value.\n\n Warning: Due to the precarious circumstances under which ``__del__()``\n methods are invoked, exceptions that occur during their execution\n are ignored, and a warning is printed to ``sys.stderr`` instead.\n Also, when ``__del__()`` is invoked in response to a module being\n deleted (e.g., when execution of the program is done), other\n globals referenced by the ``__del__()`` method may already have\n been deleted or in the process of being torn down (e.g. the\n import machinery shutting down). For this reason, ``__del__()``\n methods should do the absolute minimum needed to maintain\n external invariants. Starting with version 1.5, Python\n guarantees that globals whose name begins with a single\n underscore are deleted from their module before other globals are\n deleted; if no other references to such globals exist, this may\n help in assuring that imported modules are still available at the\n time when the ``__del__()`` method is called.\n\nobject.__repr__(self)\n\n Called by the ``repr()`` built-in function and by string\n conversions (reverse quotes) to compute the "official" string\n representation of an object. If at all possible, this should look\n like a valid Python expression that could be used to recreate an\n object with the same value (given an appropriate environment). If\n this is not possible, a string of the form ``<...some useful\n description...>`` should be returned. The return value must be a\n string object. If a class defines ``__repr__()`` but not\n ``__str__()``, then ``__repr__()`` is also used when an "informal"\n string representation of instances of that class is required.\n\n This is typically used for debugging, so it is important that the\n representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n Called by the ``str()`` built-in function and by the ``print``\n statement to compute the "informal" string representation of an\n object. This differs from ``__repr__()`` in that it does not have\n to be a valid Python expression: a more convenient or concise\n representation may be used instead. The return value must be a\n string object.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n New in version 2.1.\n\n These are the so-called "rich comparison" methods, and are called\n for comparison operators in preference to ``__cmp__()`` below. The\n correspondence between operator symbols and method names is as\n follows: ``x<y`` calls ``x.__lt__(y)``, ``x<=y`` calls\n ``x.__le__(y)``, ``x==y`` calls ``x.__eq__(y)``, ``x!=y`` and\n ``x<>y`` call ``x.__ne__(y)``, ``x>y`` calls ``x.__gt__(y)``, and\n ``x>=y`` calls ``x.__ge__(y)``.\n\n A rich comparison method may return the singleton\n ``NotImplemented`` if it does not implement the operation for a\n given pair of arguments. By convention, ``False`` and ``True`` are\n returned for a successful comparison. However, these methods can\n return any value, so if the comparison operator is used in a\n Boolean context (e.g., in the condition of an ``if`` statement),\n Python will call ``bool()`` on the value to determine if the result\n is true or false.\n\n There are no implied relationships among the comparison operators.\n The truth of ``x==y`` does not imply that ``x!=y`` is false.\n Accordingly, when defining ``__eq__()``, one should also define\n ``__ne__()`` so that the operators will behave as expected. See\n the paragraph on ``__hash__()`` for some important notes on\n creating *hashable* objects which support custom comparison\n operations and are usable as dictionary keys.\n\n There are no swapped-argument versions of these methods (to be used\n when the left argument does not support the operation but the right\n argument does); rather, ``__lt__()`` and ``__gt__()`` are each\n other\'s reflection, ``__le__()`` and ``__ge__()`` are each other\'s\n reflection, and ``__eq__()`` and ``__ne__()`` are their own\n reflection.\n\n Arguments to rich comparison methods are never coerced.\n\nobject.__cmp__(self, other)\n\n Called by comparison operations if rich comparison (see above) is\n not defined. Should return a negative integer if ``self < other``,\n zero if ``self == other``, a positive integer if ``self > other``.\n If no ``__cmp__()``, ``__eq__()`` or ``__ne__()`` operation is\n defined, class instances are compared by object identity\n ("address"). See also the description of ``__hash__()`` for some\n important notes on creating *hashable* objects which support custom\n comparison operations and are usable as dictionary keys. (Note: the\n restriction that exceptions are not propagated by ``__cmp__()`` has\n been removed since Python 1.5.)\n\nobject.__rcmp__(self, other)\n\n Changed in version 2.1: No longer supported.\n\nobject.__hash__(self)\n\n Called by built-in function ``hash()`` and for operations on\n members of hashed collections including ``set``, ``frozenset``, and\n ``dict``. ``__hash__()`` should return an integer. The only\n required property is that objects which compare equal have the same\n hash value; it is advised to somehow mix together (e.g. using\n exclusive or) the hash values for the components of the object that\n also play a part in comparison of objects.\n\n If a class does not define a ``__cmp__()`` or ``__eq__()`` method\n it should not define a ``__hash__()`` operation either; if it\n defines ``__cmp__()`` or ``__eq__()`` but not ``__hash__()``, its\n instances will not be usable in hashed collections. If a class\n defines mutable objects and implements a ``__cmp__()`` or\n ``__eq__()`` method, it should not implement ``__hash__()``, since\n hashable collection implementations require that a object\'s hash\n value is immutable (if the object\'s hash value changes, it will be\n in the wrong hash bucket).\n\n User-defined classes have ``__cmp__()`` and ``__hash__()`` methods\n by default; with them, all objects compare unequal (except with\n themselves) and ``x.__hash__()`` returns ``id(x)``.\n\n Classes which inherit a ``__hash__()`` method from a parent class\n but change the meaning of ``__cmp__()`` or ``__eq__()`` such that\n the hash value returned is no longer appropriate (e.g. by switching\n to a value-based concept of equality instead of the default\n identity based equality) can explicitly flag themselves as being\n unhashable by setting ``__hash__ = None`` in the class definition.\n Doing so means that not only will instances of the class raise an\n appropriate ``TypeError`` when a program attempts to retrieve their\n hash value, but they will also be correctly identified as\n unhashable when checking ``isinstance(obj, collections.Hashable)``\n (unlike classes which define their own ``__hash__()`` to explicitly\n raise ``TypeError``).\n\n Changed in version 2.5: ``__hash__()`` may now also return a long\n integer object; the 32-bit integer is then derived from the hash of\n that object.\n\n Changed in version 2.6: ``__hash__`` may now be set to ``None`` to\n explicitly flag instances of a class as unhashable.\n\nobject.__nonzero__(self)\n\n Called to implement truth value testing and the built-in operation\n ``bool()``; should return ``False`` or ``True``, or their integer\n equivalents ``0`` or ``1``. When this method is not defined,\n ``__len__()`` is called, if it is defined, and the object is\n considered true if its result is nonzero. If a class defines\n neither ``__len__()`` nor ``__nonzero__()``, all its instances are\n considered true.\n\nobject.__unicode__(self)\n\n Called to implement ``unicode()`` builtin; should return a Unicode\n object. When this method is not defined, string conversion is\n attempted, and the result of string conversion is converted to\n Unicode using the system default encoding.\n', 'debugger': u'\n``pdb`` --- The Python Debugger\n*******************************\n\nThe module ``pdb`` defines an interactive source code debugger for\nPython programs. It supports setting (conditional) breakpoints and\nsingle stepping at the source line level, inspection of stack frames,\nsource code listing, and evaluation of arbitrary Python code in the\ncontext of any stack frame. It also supports post-mortem debugging\nand can be called under program control.\n\nThe debugger is extensible --- it is actually defined as the class\n``Pdb``. This is currently undocumented but easily understood by\nreading the source. The extension interface uses the modules ``bdb``\n(undocumented) and ``cmd``.\n\nThe debugger\'s prompt is ``(Pdb)``. Typical usage to run a program\nunder control of the debugger is:\n\n >>> import pdb\n >>> import mymodule\n >>> pdb.run(\'mymodule.test()\')\n > <string>(0)?()\n (Pdb) continue\n > <string>(1)?()\n (Pdb) continue\n NameError: \'spam\'\n > <string>(1)?()\n (Pdb)\n\n``pdb.py`` can also be invoked as a script to debug other scripts.\nFor example:\n\n python -m pdb myscript.py\n\nWhen invoked as a script, pdb will automatically enter post-mortem\ndebugging if the program being debugged exits abnormally. After post-\nmortem debugging (or after normal exit of the program), pdb will\nrestart the program. Automatic restarting preserves pdb\'s state (such\nas breakpoints) and in most cases is more useful than quitting the\ndebugger upon program\'s exit.\n\nNew in version 2.4: Restarting post-mortem behavior added.\n\nTypical usage to inspect a crashed program is:\n\n >>> import pdb\n >>> import mymodule\n >>> mymodule.test()\n Traceback (most recent call last):\n File "<stdin>", line 1, in ?\n File "./mymodule.py", line 4, in test\n test2()\n File "./mymodule.py", line 3, in test2\n print spam\n NameError: spam\n >>> pdb.pm()\n > ./mymodule.py(3)test2()\n -> print spam\n (Pdb)\n\nThe module defines the following functions; each enters the debugger\nin a slightly different way:\n\npdb.run(statement[, globals[, locals]])\n\n Execute the *statement* (given as a string) under debugger control.\n The debugger prompt appears before any code is executed; you can\n set breakpoints and type ``continue``, or you can step through the\n statement using ``step`` or ``next`` (all these commands are\n explained below). The optional *globals* and *locals* arguments\n specify the environment in which the code is executed; by default\n the dictionary of the module ``__main__`` is used. (See the\n explanation of the ``exec`` statement or the ``eval()`` built-in\n function.)\n\npdb.runeval(expression[, globals[, locals]])\n\n Evaluate the *expression* (given as a string) under debugger\n control. When ``runeval()`` returns, it returns the value of the\n expression. Otherwise this function is similar to ``run()``.\n\npdb.runcall(function[, argument, ...])\n\n Call the *function* (a function or method object, not a string)\n with the given arguments. When ``runcall()`` returns, it returns\n whatever the function call returned. The debugger prompt appears\n as soon as the function is entered.\n\npdb.set_trace()\n\n Enter the debugger at the calling stack frame. This is useful to\n hard-code a breakpoint at a given point in a program, even if the\n code is not otherwise being debugged (e.g. when an assertion\n fails).\n\npdb.post_mortem([traceback])\n\n Enter post-mortem debugging of the given *traceback* object. If no\n *traceback* is given, it uses the one of the exception that is\n currently being handled (an exception must be being handled if the\n default is to be used).\n\npdb.pm()\n\n Enter post-mortem debugging of the traceback found in\n ``sys.last_traceback``.\n', 'del': u'\nThe ``del`` statement\n*********************\n\n del_stmt ::= "del" target_list\n\nDeletion is recursively defined very similar to the way assignment is\ndefined. Rather that spelling it out in full details, here are some\nhints.\n\nDeletion of a target list recursively deletes each target, from left\nto right.\n\nDeletion of a name removes the binding of that name from the local or\nglobal namespace, depending on whether the name occurs in a ``global``\nstatement in the same code block. If the name is unbound, a\n``NameError`` exception will be raised.\n\nIt is illegal to delete a name from the local namespace if it occurs\nas a free variable in a nested block.\n\nDeletion of attribute references, subscriptions and slicings is passed\nto the primary object involved; deletion of a slicing is in general\nequivalent to assignment of an empty slice of the right type (but even\nthis is determined by the sliced object).\n', 'dict': u'\nDictionary displays\n*******************\n\nA dictionary display is a possibly empty series of key/datum pairs\nenclosed in curly braces:\n\n dict_display ::= "{" [key_datum_list] "}"\n key_datum_list ::= key_datum ("," key_datum)* [","]\n key_datum ::= expression ":" expression\n\nA dictionary display yields a new dictionary object.\n\nThe key/datum pairs are evaluated from left to right to define the\nentries of the dictionary: each key object is used as a key into the\ndictionary to store the corresponding datum.\n\nRestrictions on the types of the key values are listed earlier in\nsection *The standard type hierarchy*. (To summarize, the key type\nshould be *hashable*, which excludes all mutable objects.) Clashes\nbetween duplicate keys are not detected; the last datum (textually\nrightmost in the display) stored for a given key value prevails.\n', 'dynamic-features': u'\nInteraction with dynamic features\n*********************************\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- ``import *`` --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a ``SyntaxError``.\n\nIf ``exec`` is used in a function and the function contains or is a\nnested block with free variables, the compiler will raise a\n``SyntaxError`` unless the exec explicitly specifies the local\nnamespace for the ``exec``. (In other words, ``exec obj`` would be\nillegal, but ``exec obj in ns`` would be legal.)\n\nThe ``eval()``, ``execfile()``, and ``input()`` functions and the\n``exec`` statement do not have access to the full environment for\nresolving names. Names may be resolved in the local and global\nnamespaces of the caller. Free variables are not resolved in the\nnearest enclosing namespace, but in the global namespace. [1] The\n``exec`` statement and the ``eval()`` and ``execfile()`` functions\nhave optional arguments to override the global and local namespace.\nIf only one namespace is specified, it is used for both.\n', 'else': u'\nThe ``if`` statement\n********************\n\nThe ``if`` statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the ``if`` statement is executed or evaluated).\nIf all expressions are false, the suite of the ``else`` clause, if\npresent, is executed.\n', 'exceptions': u'\nExceptions\n**********\n\nExceptions are a means of breaking out of the normal flow of control\nof a code block in order to handle errors or other exceptional\nconditions. An exception is *raised* at the point where the error is\ndetected; it may be *handled* by the surrounding code block or by any\ncode block that directly or indirectly invoked the code block where\nthe error occurred.\n\nThe Python interpreter raises an exception when it detects a run-time\nerror (such as division by zero). A Python program can also\nexplicitly raise an exception with the ``raise`` statement. Exception\nhandlers are specified with the ``try`` ... ``except`` statement. The\n``finally`` clause of such a statement can be used to specify cleanup\ncode which does not handle the exception, but is executed whether an\nexception occurred or not in the preceding code.\n\nPython uses the "termination" model of error handling: an exception\nhandler can find out what happened and continue execution at an outer\nlevel, but it cannot repair the cause of the error and retry the\nfailing operation (except by re-entering the offending piece of code\nfrom the top).\n\nWhen an exception is not handled at all, the interpreter terminates\nexecution of the program, or returns to its interactive main loop. In\neither case, it prints a stack backtrace, except when the exception is\n``SystemExit``.\n\nExceptions are identified by class instances. The ``except`` clause\nis selected depending on the class of the instance: it must reference\nthe class of the instance or a base class thereof. The instance can\nbe received by the handler and can carry additional information about\nthe exceptional condition.\n\nExceptions can also be identified by strings, in which case the\n``except`` clause is selected by object identity. An arbitrary value\ncan be raised along with the identifying string which can be passed to\nthe handler.\n\nWarning: Messages to exceptions are not part of the Python API. Their\n contents may change from one version of Python to the next without\n warning and should not be relied on by code which will run under\n multiple versions of the interpreter.\n\nSee also the description of the ``try`` statement in section *The try\nstatement* and ``raise`` statement in section *The raise statement*.\n\n-[ Footnotes ]-\n\n[1] This limitation occurs because the code that is executed by these\n operations is not available at the time the module is compiled.\n', 'exec': u'\nThe ``exec`` statement\n**********************\n\n exec_stmt ::= "exec" or_expr ["in" expression ["," expression]]\n\nThis statement supports dynamic execution of Python code. The first\nexpression should evaluate to either a string, an open file object, or\na code object. If it is a string, the string is parsed as a suite of\nPython statements which is then executed (unless a syntax error\noccurs). [1] If it is an open file, the file is parsed until EOF and\nexecuted. If it is a code object, it is simply executed. In all\ncases, the code that\'s executed is expected to be valid as file input\n(see section *File input*). Be aware that the ``return`` and\n``yield`` statements may not be used outside of function definitions\neven within the context of code passed to the ``exec`` statement.\n\nIn all cases, if the optional parts are omitted, the code is executed\nin the current scope. If only the first expression after ``in`` is\nspecified, it should be a dictionary, which will be used for both the\nglobal and the local variables. If two expressions are given, they\nare used for the global and local variables, respectively. If\nprovided, *locals* can be any mapping object.\n\nChanged in version 2.4: Formerly, *locals* was required to be a\ndictionary.\n\nAs a side effect, an implementation may insert additional keys into\nthe dictionaries given besides those corresponding to variable names\nset by the executed code. For example, the current implementation may\nadd a reference to the dictionary of the built-in module\n``__builtin__`` under the key ``__builtins__`` (!).\n\n**Programmer\'s hints:** dynamic evaluation of expressions is supported\nby the built-in function ``eval()``. The built-in functions\n``globals()`` and ``locals()`` return the current global and local\ndictionary, respectively, which may be useful to pass around for use\nby ``exec``.\n\n-[ Footnotes ]-\n\n[1] Note that the parser only accepts the Unix-style end of line\n convention. If you are reading the code from a file, make sure to\n use universal newline mode to convert Windows or Mac-style\n newlines.\n', 'execmodel': u'\nExecution model\n***************\n\n\nNaming and binding\n==================\n\n*Names* refer to objects. Names are introduced by name binding\noperations. Each occurrence of a name in the program text refers to\nthe *binding* of that name established in the innermost function block\ncontaining the use.\n\nA *block* is a piece of Python program text that is executed as a\nunit. The following are blocks: a module, a function body, and a class\ndefinition. Each command typed interactively is a block. A script\nfile (a file given as standard input to the interpreter or specified\non the interpreter command line the first argument) is a code block.\nA script command (a command specified on the interpreter command line\nwith the \'**-c**\' option) is a code block. The file read by the\nbuilt-in function ``execfile()`` is a code block. The string argument\npassed to the built-in function ``eval()`` and to the ``exec``\nstatement is a code block. The expression read and evaluated by the\nbuilt-in function ``input()`` is a code block.\n\nA code block is executed in an *execution frame*. A frame contains\nsome administrative information (used for debugging) and determines\nwhere and how execution continues after the code block\'s execution has\ncompleted.\n\nA *scope* defines the visibility of a name within a block. If a local\nvariable is defined in a block, its scope includes that block. If the\ndefinition occurs in a function block, the scope extends to any blocks\ncontained within the defining one, unless a contained block introduces\na different binding for the name. The scope of names defined in a\nclass block is limited to the class block; it does not extend to the\ncode blocks of methods -- this includes generator expressions since\nthey are implemented using a function scope. This means that the\nfollowing will fail:\n\n class A:\n a = 42\n b = list(a + i for i in range(10))\n\nWhen a name is used in a code block, it is resolved using the nearest\nenclosing scope. The set of all such scopes visible to a code block\nis called the block\'s *environment*.\n\nIf a name is bound in a block, it is a local variable of that block.\nIf a name is bound at the module level, it is a global variable. (The\nvariables of the module code block are local and global.) If a\nvariable is used in a code block but not defined there, it is a *free\nvariable*.\n\nWhen a name is not found at all, a ``NameError`` exception is raised.\nIf the name refers to a local variable that has not been bound, a\n``UnboundLocalError`` exception is raised. ``UnboundLocalError`` is a\nsubclass of ``NameError``.\n\nThe following constructs bind names: formal parameters to functions,\n``import`` statements, class and function definitions (these bind the\nclass or function name in the defining block), and targets that are\nidentifiers if occurring in an assignment, ``for`` loop header, in the\nsecond position of an ``except`` clause header or after ``as`` in a\n``with`` statement. The ``import`` statement of the form ``from ...\nimport *`` binds all names defined in the imported module, except\nthose beginning with an underscore. This form may only be used at the\nmodule level.\n\nA target occurring in a ``del`` statement is also considered bound for\nthis purpose (though the actual semantics are to unbind the name). It\nis illegal to unbind a name that is referenced by an enclosing scope;\nthe compiler will report a ``SyntaxError``.\n\nEach assignment or import statement occurs within a block defined by a\nclass or function definition or at the module level (the top-level\ncode block).\n\nIf a name binding operation occurs anywhere within a code block, all\nuses of the name within the block are treated as references to the\ncurrent block. This can lead to errors when a name is used within a\nblock before it is bound. This rule is subtle. Python lacks\ndeclarations and allows name binding operations to occur anywhere\nwithin a code block. The local variables of a code block can be\ndetermined by scanning the entire text of the block for name binding\noperations.\n\nIf the global statement occurs within a block, all uses of the name\nspecified in the statement refer to the binding of that name in the\ntop-level namespace. Names are resolved in the top-level namespace by\nsearching the global namespace, i.e. the namespace of the module\ncontaining the code block, and the builtin namespace, the namespace of\nthe module ``__builtin__``. The global namespace is searched first.\nIf the name is not found there, the builtin namespace is searched.\nThe global statement must precede all uses of the name.\n\nThe built-in namespace associated with the execution of a code block\nis actually found by looking up the name ``__builtins__`` in its\nglobal namespace; this should be a dictionary or a module (in the\nlatter case the module\'s dictionary is used). By default, when in the\n``__main__`` module, ``__builtins__`` is the built-in module\n``__builtin__`` (note: no \'s\'); when in any other module,\n``__builtins__`` is an alias for the dictionary of the ``__builtin__``\nmodule itself. ``__builtins__`` can be set to a user-created\ndictionary to create a weak form of restricted execution.\n\nNote: Users should not touch ``__builtins__``; it is strictly an\n implementation detail. Users wanting to override values in the\n built-in namespace should ``import`` the ``__builtin__`` (no \'s\')\n module and modify its attributes appropriately.\n\nThe namespace for a module is automatically created the first time a\nmodule is imported. The main module for a script is always called\n``__main__``.\n\nThe global statement has the same scope as a name binding operation in\nthe same block. If the nearest enclosing scope for a free variable\ncontains a global statement, the free variable is treated as a global.\n\nA class definition is an executable statement that may use and define\nnames. These references follow the normal rules for name resolution.\nThe namespace of the class definition becomes the attribute dictionary\nof the class. Names defined at the class scope are not visible in\nmethods.\n\n\nInteraction with dynamic features\n---------------------------------\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- ``import *`` --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a ``SyntaxError``.\n\nIf ``exec`` is used in a function and the function contains or is a\nnested block with free variables, the compiler will raise a\n``SyntaxError`` unless the exec explicitly specifies the local\nnamespace for the ``exec``. (In other words, ``exec obj`` would be\nillegal, but ``exec obj in ns`` would be legal.)\n\nThe ``eval()``, ``execfile()``, and ``input()`` functions and the\n``exec`` statement do not have access to the full environment for\nresolving names. Names may be resolved in the local and global\nnamespaces of the caller. Free variables are not resolved in the\nnearest enclosing namespace, but in the global namespace. [1] The\n``exec`` statement and the ``eval()`` and ``execfile()`` functions\nhave optional arguments to override the global and local namespace.\nIf only one namespace is specified, it is used for both.\n\n\nExceptions\n==========\n\nExceptions are a means of breaking out of the normal flow of control\nof a code block in order to handle errors or other exceptional\nconditions. An exception is *raised* at the point where the error is\ndetected; it may be *handled* by the surrounding code block or by any\ncode block that directly or indirectly invoked the code block where\nthe error occurred.\n\nThe Python interpreter raises an exception when it detects a run-time\nerror (such as division by zero). A Python program can also\nexplicitly raise an exception with the ``raise`` statement. Exception\nhandlers are specified with the ``try`` ... ``except`` statement. The\n``finally`` clause of such a statement can be used to specify cleanup\ncode which does not handle the exception, but is executed whether an\nexception occurred or not in the preceding code.\n\nPython uses the "termination" model of error handling: an exception\nhandler can find out what happened and continue execution at an outer\nlevel, but it cannot repair the cause of the error and retry the\nfailing operation (except by re-entering the offending piece of code\nfrom the top).\n\nWhen an exception is not handled at all, the interpreter terminates\nexecution of the program, or returns to its interactive main loop. In\neither case, it prints a stack backtrace, except when the exception is\n``SystemExit``.\n\nExceptions are identified by class instances. The ``except`` clause\nis selected depending on the class of the instance: it must reference\nthe class of the instance or a base class thereof. The instance can\nbe received by the handler and can carry additional information about\nthe exceptional condition.\n\nExceptions can also be identified by strings, in which case the\n``except`` clause is selected by object identity. An arbitrary value\ncan be raised along with the identifying string which can be passed to\nthe handler.\n\nWarning: Messages to exceptions are not part of the Python API. Their\n contents may change from one version of Python to the next without\n warning and should not be relied on by code which will run under\n multiple versions of the interpreter.\n\nSee also the description of the ``try`` statement in section *The try\nstatement* and ``raise`` statement in section *The raise statement*.\n\n-[ Footnotes ]-\n\n[1] This limitation occurs because the code that is executed by these\n operations is not available at the time the module is compiled.\n', 'exprlists': u'\nExpression lists\n****************\n\n expression_list ::= expression ( "," expression )* [","]\n\nAn expression list containing at least one comma yields a tuple. The\nlength of the tuple is the number of expressions in the list. The\nexpressions are evaluated from left to right.\n\nThe trailing comma is required only to create a single tuple (a.k.a. a\n*singleton*); it is optional in all other cases. A single expression\nwithout a trailing comma doesn\'t create a tuple, but rather yields the\nvalue of that expression. (To create an empty tuple, use an empty pair\nof parentheses: ``()``.)\n', 'floating': u'\nFloating point literals\n***********************\n\nFloating point literals are described by the following lexical\ndefinitions:\n\n floatnumber ::= pointfloat | exponentfloat\n pointfloat ::= [intpart] fraction | intpart "."\n exponentfloat ::= (intpart | pointfloat) exponent\n intpart ::= digit+\n fraction ::= "." digit+\n exponent ::= ("e" | "E") ["+" | "-"] digit+\n\nNote that the integer and exponent parts of floating point numbers can\nlook like octal integers, but are interpreted using radix 10. For\nexample, ``077e010`` is legal, and denotes the same number as\n``77e10``. The allowed range of floating point literals is\nimplementation-dependent. Some examples of floating point literals:\n\n 3.14 10. .001 1e100 3.14e-10 0e0\n\nNote that numeric literals do not include a sign; a phrase like ``-1``\nis actually an expression composed of the unary operator ``-`` and the\nliteral ``1``.\n', 'for': u'\nThe ``for`` statement\n*********************\n\nThe ``for`` statement is used to iterate over the elements of a\nsequence (such as a string, tuple or list) or other iterable object:\n\n for_stmt ::= "for" target_list "in" expression_list ":" suite\n ["else" ":" suite]\n\nThe expression list is evaluated once; it should yield an iterable\nobject. An iterator is created for the result of the\n``expression_list``. The suite is then executed once for each item\nprovided by the iterator, in the order of ascending indices. Each\nitem in turn is assigned to the target list using the standard rules\nfor assignments, and then the suite is executed. When the items are\nexhausted (which is immediately when the sequence is empty), the suite\nin the ``else`` clause, if present, is executed, and the loop\nterminates.\n\nA ``break`` statement executed in the first suite terminates the loop\nwithout executing the ``else`` clause\'s suite. A ``continue``\nstatement executed in the first suite skips the rest of the suite and\ncontinues with the next item, or with the ``else`` clause if there was\nno next item.\n\nThe suite may assign to the variable(s) in the target list; this does\nnot affect the next item assigned to it.\n\nThe target list is not deleted when the loop is finished, but if the\nsequence is empty, it will not have been assigned to at all by the\nloop. Hint: the built-in function ``range()`` returns a sequence of\nintegers suitable to emulate the effect of Pascal\'s ``for i := a to b\ndo``; e.g., ``range(3)`` returns the list ``[0, 1, 2]``.\n\nWarning: There is a subtlety when the sequence is being modified by the loop\n (this can only occur for mutable sequences, i.e. lists). An internal\n counter is used to keep track of which item is used next, and this\n is incremented on each iteration. When this counter has reached the\n length of the sequence the loop terminates. This means that if the\n suite deletes the current (or a previous) item from the sequence,\n the next item will be skipped (since it gets the index of the\n current item which has already been treated). Likewise, if the\n suite inserts an item in the sequence before the current item, the\n current item will be treated again the next time through the loop.\n This can lead to nasty bugs that can be avoided by making a\n temporary copy using a slice of the whole sequence, e.g.,\n\n for x in a[:]:\n if x < 0: a.remove(x)\n', 'formatstrings': u'\nFormat String Syntax\n********************\n\nThe ``str.format()`` method and the ``Formatter`` class share the same\nsyntax for format strings (although in the case of ``Formatter``,\nsubclasses can define their own format string syntax.)\n\nFormat strings contain "replacement fields" surrounded by curly braces\n``{}``. Anything that is not contained in braces is considered literal\ntext, which is copied unchanged to the output. If you need to include\na brace character in the literal text, it can be escaped by doubling:\n``{{`` and ``}}``.\n\nThe grammar for a replacement field is as follows:\n\n replacement_field ::= "{" field_name ["!" conversion] [":" format_spec] "}"\n field_name ::= (identifier | integer) ("." attribute_name | "[" element_index "]")*\n attribute_name ::= identifier\n element_index ::= integer\n conversion ::= "r" | "s"\n format_spec ::= <described in the next section>\n\nIn less formal terms, the replacement field starts with a\n*field_name*, which can either be a number (for a positional\nargument), or an identifier (for keyword arguments). Following this\nis an optional *conversion* field, which is preceded by an exclamation\npoint ``\'!\'``, and a *format_spec*, which is preceded by a colon\n``\':\'``.\n\nThe *field_name* itself begins with either a number or a keyword. If\nit\'s a number, it refers to a positional argument, and if it\'s a\nkeyword it refers to a named keyword argument. This can be followed\nby any number of index or attribute expressions. An expression of the\nform ``\'.name\'`` selects the named attribute using ``getattr()``,\nwhile an expression of the form ``\'[index]\'`` does an index lookup\nusing ``__getitem__()``.\n\nSome simple format string examples:\n\n "First, thou shalt count to {0}" # References first positional argument\n "My quest is {name}" # References keyword argument \'name\'\n "Weight in tons {0.weight}" # \'weight\' attribute of first positional arg\n "Units destroyed: {players[0]}" # First element of keyword argument \'players\'.\n\nThe *conversion* field causes a type coercion before formatting.\nNormally, the job of formatting a value is done by the\n``__format__()`` method of the value itself. However, in some cases\nit is desirable to force a type to be formatted as a string,\noverriding its own definition of formatting. By converting the value\nto a string before calling ``__format__()``, the normal formatting\nlogic is bypassed.\n\nTwo conversion flags are currently supported: ``\'!s\'`` which calls\n``str()`` on the value, and ``\'!r\'`` which calls ``repr()``.\n\nSome examples:\n\n "Harold\'s a clever {0!s}" # Calls str() on the argument first\n "Bring out the holy {name!r}" # Calls repr() on the argument first\n\nThe *format_spec* field contains a specification of how the value\nshould be presented, including such details as field width, alignment,\npadding, decimal precision and so on. Each value type can define it\'s\nown "formatting mini-language" or interpretation of the *format_spec*.\n\nMost built-in types support a common formatting mini-language, which\nis described in the next section.\n\nA *format_spec* field can also include nested replacement fields\nwithin it. These nested replacement fields can contain only a field\nname; conversion flags and format specifications are not allowed. The\nreplacement fields within the format_spec are substituted before the\n*format_spec* string is interpreted. This allows the formatting of a\nvalue to be dynamically specified.\n\nFor example, suppose you wanted to have a replacement field whose\nfield width is determined by another variable:\n\n "A man with two {0:{1}}".format("noses", 10)\n\nThis would first evaluate the inner replacement field, making the\nformat string effectively:\n\n "A man with two {0:10}"\n\nThen the outer replacement field would be evaluated, producing:\n\n "noses "\n\nWhich is substituted into the string, yielding:\n\n "A man with two noses "\n\n(The extra space is because we specified a field width of 10, and\nbecause left alignment is the default for strings.)\n\n\nFormat Specification Mini-Language\n==================================\n\n"Format specifications" are used within replacement fields contained\nwithin a format string to define how individual values are presented\n(see *Format String Syntax*.) They can also be passed directly to the\nbuiltin ``format()`` function. Each formattable type may define how\nthe format specification is to be interpreted.\n\nMost built-in types implement the following options for format\nspecifications, although some of the formatting options are only\nsupported by the numeric types.\n\nA general convention is that an empty format string (``""``) produces\nthe same result as if you had called ``str()`` on the value.\n\nThe general form of a *standard format specifier* is:\n\n format_spec ::= [[fill]align][sign][#][0][width][.precision][type]\n fill ::= <a character other than \'}\'>\n align ::= "<" | ">" | "=" | "^"\n sign ::= "+" | "-" | " "\n width ::= integer\n precision ::= integer\n type ::= "b" | "c" | "d" | "e" | "E" | "f" | "F" | "g" | "G" | "n" | "o" | "x" | "X" | "%"\n\nThe *fill* character can be any character other than \'}\' (which\nsignifies the end of the field). The presence of a fill character is\nsignaled by the *next* character, which must be one of the alignment\noptions. If the second character of *format_spec* is not a valid\nalignment option, then it is assumed that both the fill character and\nthe alignment option are absent.\n\nThe meaning of the various alignment options is as follows:\n\n +-----------+------------------------------------------------------------+\n | Option | Meaning |\n +===========+============================================================+\n | ``\'<\'`` | Forces the field to be left-aligned within the available |\n | | space (This is the default.) |\n +-----------+------------------------------------------------------------+\n | ``\'>\'`` | Forces the field to be right-aligned within the available |\n | | space. |\n +-----------+------------------------------------------------------------+\n | ``\'=\'`` | Forces the padding to be placed after the sign (if any) |\n | | but before the digits. This is used for printing fields |\n | | in the form \'+000000120\'. This alignment option is only |\n | | valid for numeric types. |\n +-----------+------------------------------------------------------------+\n | ``\'^\'`` | Forces the field to be centered within the available |\n | | space. |\n +-----------+------------------------------------------------------------+\n\nNote that unless a minimum field width is defined, the field width\nwill always be the same size as the data to fill it, so that the\nalignment option has no meaning in this case.\n\nThe *sign* option is only valid for number types, and can be one of\nthe following:\n\n +-----------+------------------------------------------------------------+\n | Option | Meaning |\n +===========+============================================================+\n | ``\'+\'`` | indicates that a sign should be used for both positive as |\n | | well as negative numbers. |\n +-----------+------------------------------------------------------------+\n | ``\'-\'`` | indicates that a sign should be used only for negative |\n | | numbers (this is the default behavior). |\n +-----------+------------------------------------------------------------+\n | space | indicates that a leading space should be used on positive |\n | | numbers, and a minus sign on negative numbers. |\n +-----------+------------------------------------------------------------+\n\nThe ``\'#\'`` option is only valid for integers, and only for binary,\noctal, or hexadecimal output. If present, it specifies that the\noutput will be prefixed by ``\'0b\'``, ``\'0o\'``, or ``\'0x\'``,\nrespectively.\n\n*width* is a decimal integer defining the minimum field width. If not\nspecified, then the field width will be determined by the content.\n\nIf the *width* field is preceded by a zero (``\'0\'``) character, this\nenables zero-padding. This is equivalent to an *alignment* type of\n``\'=\'`` and a *fill* character of ``\'0\'``.\n\nThe *precision* is a decimal number indicating how many digits should\nbe displayed after the decimal point for a floating point value\nformatted with ``\'f\'`` and ``\'F\'``, or before and after the decimal\npoint for a floating point value formatted with ``\'g\'`` or ``\'G\'``.\nFor non-number types the field indicates the maximum field size - in\nother words, how many characters will be used from the field content.\nThe *precision* is ignored for integer values.\n\nFinally, the *type* determines how the data should be presented.\n\nThe available integer presentation types are:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | ``\'b\'`` | Binary format. Outputs the number in base 2. |\n +-----------+------------------------------------------------------------+\n | ``\'c\'`` | Character. Converts the integer to the corresponding |\n | | unicode character before printing. |\n +-----------+------------------------------------------------------------+\n | ``\'d\'`` | Decimal Integer. Outputs the number in base 10. |\n +-----------+------------------------------------------------------------+\n | ``\'o\'`` | Octal format. Outputs the number in base 8. |\n +-----------+------------------------------------------------------------+\n | ``\'x\'`` | Hex format. Outputs the number in base 16, using lower- |\n | | case letters for the digits above 9. |\n +-----------+------------------------------------------------------------+\n | ``\'X\'`` | Hex format. Outputs the number in base 16, using upper- |\n | | case letters for the digits above 9. |\n +-----------+------------------------------------------------------------+\n | ``\'n\'`` | Number. This is the same as ``\'d\'``, except that it uses |\n | | the current locale setting to insert the appropriate |\n | | number separator characters. |\n +-----------+------------------------------------------------------------+\n | None | The same as ``\'d\'``. |\n +-----------+------------------------------------------------------------+\n\nThe available presentation types for floating point and decimal values\nare:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | ``\'e\'`` | Exponent notation. Prints the number in scientific |\n | | notation using the letter \'e\' to indicate the exponent. |\n +-----------+------------------------------------------------------------+\n | ``\'E\'`` | Exponent notation. Same as ``\'e\'`` except it uses an upper |\n | | case \'E\' as the separator character. |\n +-----------+------------------------------------------------------------+\n | ``\'f\'`` | Fixed point. Displays the number as a fixed-point number. |\n +-----------+------------------------------------------------------------+\n | ``\'F\'`` | Fixed point. Same as ``\'f\'``. |\n +-----------+------------------------------------------------------------+\n | ``\'g\'`` | General format. This prints the number as a fixed-point |\n | | number, unless the number is too large, in which case it |\n | | switches to ``\'e\'`` exponent notation. Infinity and NaN |\n | | values are formatted as ``inf``, ``-inf`` and ``nan``, |\n | | respectively. |\n +-----------+------------------------------------------------------------+\n | ``\'G\'`` | General format. Same as ``\'g\'`` except switches to ``\'E\'`` |\n | | if the number gets to large. The representations of |\n | | infinity and NaN are uppercased, too. |\n +-----------+------------------------------------------------------------+\n | ``\'n\'`` | Number. This is the same as ``\'g\'``, except that it uses |\n | | the current locale setting to insert the appropriate |\n | | number separator characters. |\n +-----------+------------------------------------------------------------+\n | ``\'%\'`` | Percentage. Multiplies the number by 100 and displays in |\n | | fixed (``\'f\'``) format, followed by a percent sign. |\n +-----------+------------------------------------------------------------+\n | None | The same as ``\'g\'``. |\n +-----------+------------------------------------------------------------+\n', 'function': u'\nFunction definitions\n********************\n\nA function definition defines a user-defined function object (see\nsection *The standard type hierarchy*):\n\n decorated ::= decorators (classdef | funcdef)\n decorators ::= decorator+\n decorator ::= "@" dotted_name ["(" [argument_list [","]] ")"] NEWLINE\n funcdef ::= "def" funcname "(" [parameter_list] ")" ":" suite\n dotted_name ::= identifier ("." identifier)*\n parameter_list ::= (defparameter ",")*\n ( "*" identifier [, "**" identifier]\n | "**" identifier\n | defparameter [","] )\n defparameter ::= parameter ["=" expression]\n sublist ::= parameter ("," parameter)* [","]\n parameter ::= identifier | "(" sublist ")"\n funcname ::= identifier\n\nA function definition is an executable statement. Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function). This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition. The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object. Multiple decorators are applied in\nnested fashion. For example, the following code:\n\n @f1(arg)\n @f2\n def func(): pass\n\nis equivalent to:\n\n def func(): pass\n func = f1(arg)(f2(func))\n\nWhen one or more top-level parameters have the form *parameter* ``=``\n*expression*, the function is said to have "default parameter values."\nFor a parameter with a default value, the corresponding argument may\nbe omitted from a call, in which case the parameter\'s default value is\nsubstituted. If a parameter has a default value, all following\nparameters must also have a default value --- this is a syntactic\nrestriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated when the function definition\nis executed.** This means that the expression is evaluated once, when\nthe function is defined, and that that same "pre-computed" value is\nused for each call. This is especially important to understand when a\ndefault parameter is a mutable object, such as a list or a dictionary:\nif the function modifies the object (e.g. by appending an item to a\nlist), the default value is in effect modified. This is generally not\nwhat was intended. A way around this is to use ``None`` as the\ndefault, and explicitly test for it in the body of the function, e.g.:\n\n def whats_on_the_telly(penguin=None):\n if penguin is None:\n penguin = []\n penguin.append("property of the zoo")\n return penguin\n\nFunction call semantics are described in more detail in section\n*Calls*. A function call always assigns values to all parameters\nmentioned in the parameter list, either from position arguments, from\nkeyword arguments, or from default values. If the form\n"``*identifier``" is present, it is initialized to a tuple receiving\nany excess positional parameters, defaulting to the empty tuple. If\nthe form "``**identifier``" is present, it is initialized to a new\ndictionary receiving any excess keyword arguments, defaulting to a new\nempty dictionary.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions. This uses lambda forms,\ndescribed in section *Expression lists*. Note that the lambda form is\nmerely a shorthand for a simplified function definition; a function\ndefined in a "``def``" statement can be passed around or assigned to\nanother name just like a function defined by a lambda form. The\n"``def``" form is actually more powerful since it allows the execution\nof multiple statements.\n\n**Programmer\'s note:** Functions are first-class objects. A "``def``"\nform executed inside a function definition defines a local function\nthat can be returned or passed around. Free variables used in the\nnested function can access the local variables of the function\ncontaining the def. See section *Naming and binding* for details.\n', 'global': u'\nThe ``global`` statement\n************************\n\n global_stmt ::= "global" identifier ("," identifier)*\n\nThe ``global`` statement is a declaration which holds for the entire\ncurrent code block. It means that the listed identifiers are to be\ninterpreted as globals. It would be impossible to assign to a global\nvariable without ``global``, although free variables may refer to\nglobals without being declared global.\n\nNames listed in a ``global`` statement must not be used in the same\ncode block textually preceding that ``global`` statement.\n\nNames listed in a ``global`` statement must not be defined as formal\nparameters or in a ``for`` loop control target, ``class`` definition,\nfunction definition, or ``import`` statement.\n\n(The current implementation does not enforce the latter two\nrestrictions, but programs should not abuse this freedom, as future\nimplementations may enforce them or silently change the meaning of the\nprogram.)\n\n**Programmer\'s note:** the ``global`` is a directive to the parser.\nIt applies only to code parsed at the same time as the ``global``\nstatement. In particular, a ``global`` statement contained in an\n``exec`` statement does not affect the code block *containing* the\n``exec`` statement, and code contained in an ``exec`` statement is\nunaffected by ``global`` statements in the code containing the\n``exec`` statement. The same applies to the ``eval()``,\n``execfile()`` and ``compile()`` functions.\n', 'id-classes': u'\nReserved classes of identifiers\n*******************************\n\nCertain classes of identifiers (besides keywords) have special\nmeanings. These classes are identified by the patterns of leading and\ntrailing underscore characters:\n\n``_*``\n Not imported by ``from module import *``. The special identifier\n ``_`` is used in the interactive interpreter to store the result of\n the last evaluation; it is stored in the ``__builtin__`` module.\n When not in interactive mode, ``_`` has no special meaning and is\n not defined. See section *The import statement*.\n\n Note: The name ``_`` is often used in conjunction with\n internationalization; refer to the documentation for the\n ``gettext`` module for more information on this convention.\n\n``__*__``\n System-defined names. These names are defined by the interpreter\n and its implementation (including the standard library);\n applications should not expect to define additional names using\n this convention. The set of names of this class defined by Python\n may be extended in future versions. See section *Special method\n names*.\n\n``__*``\n Class-private names. Names in this category, when used within the\n context of a class definition, are re-written to use a mangled form\n to help avoid name clashes between "private" attributes of base and\n derived classes. See section *Identifiers (Names)*.\n', 'identifiers': u'\nIdentifiers and keywords\n************************\n\nIdentifiers (also referred to as *names*) are described by the\nfollowing lexical definitions:\n\n identifier ::= (letter|"_") (letter | digit | "_")*\n letter ::= lowercase | uppercase\n lowercase ::= "a"..."z"\n uppercase ::= "A"..."Z"\n digit ::= "0"..."9"\n\nIdentifiers are unlimited in length. Case is significant.\n\n\nKeywords\n========\n\nThe following identifiers are used as reserved words, or *keywords* of\nthe language, and cannot be used as ordinary identifiers. They must\nbe spelled exactly as written here:\n\n and del from not while\n as elif global or with\n assert else if pass yield\n break except import print\n class exec in raise\n continue finally is return\n def for lambda try\n\nChanged in version 2.4: ``None`` became a constant and is now\nrecognized by the compiler as a name for the built-in object ``None``.\nAlthough it is not a keyword, you cannot assign a different object to\nit.\n\nChanged in version 2.5: Both ``as`` and ``with`` are only recognized\nwhen the ``with_statement`` future feature has been enabled. It will\nalways be enabled in Python 2.6. See section *The with statement* for\ndetails. Note that using ``as`` and ``with`` as identifiers will\nalways issue a warning, even when the ``with_statement`` future\ndirective is not in effect.\n\n\nReserved classes of identifiers\n===============================\n\nCertain classes of identifiers (besides keywords) have special\nmeanings. These classes are identified by the patterns of leading and\ntrailing underscore characters:\n\n``_*``\n Not imported by ``from module import *``. The special identifier\n ``_`` is used in the interactive interpreter to store the result of\n the last evaluation; it is stored in the ``__builtin__`` module.\n When not in interactive mode, ``_`` has no special meaning and is\n not defined. See section *The import statement*.\n\n Note: The name ``_`` is often used in conjunction with\n internationalization; refer to the documentation for the\n ``gettext`` module for more information on this convention.\n\n``__*__``\n System-defined names. These names are defined by the interpreter\n and its implementation (including the standard library);\n applications should not expect to define additional names using\n this convention. The set of names of this class defined by Python\n may be extended in future versions. See section *Special method\n names*.\n\n``__*``\n Class-private names. Names in this category, when used within the\n context of a class definition, are re-written to use a mangled form\n to help avoid name clashes between "private" attributes of base and\n derived classes. See section *Identifiers (Names)*.\n', 'if': u'\nThe ``if`` statement\n********************\n\nThe ``if`` statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the ``if`` statement is executed or evaluated).\nIf all expressions are false, the suite of the ``else`` clause, if\npresent, is executed.\n', 'imaginary': u'\nImaginary literals\n******************\n\nImaginary literals are described by the following lexical definitions:\n\n imagnumber ::= (floatnumber | intpart) ("j" | "J")\n\nAn imaginary literal yields a complex number with a real part of 0.0.\nComplex numbers are represented as a pair of floating point numbers\nand have the same restrictions on their range. To create a complex\nnumber with a nonzero real part, add a floating point number to it,\ne.g., ``(3+4j)``. Some examples of imaginary literals:\n\n 3.14j 10.j 10j .001j 1e100j 3.14e-10j\n', 'import': u'\nThe ``import`` statement\n************************\n\n import_stmt ::= "import" module ["as" name] ( "," module ["as" name] )*\n | "from" relative_module "import" identifier ["as" name]\n ( "," identifier ["as" name] )*\n | "from" relative_module "import" "(" identifier ["as" name]\n ( "," identifier ["as" name] )* [","] ")"\n | "from" module "import" "*"\n module ::= (identifier ".")* identifier\n relative_module ::= "."* module | "."+\n name ::= identifier\n\nImport statements are executed in two steps: (1) find a module, and\ninitialize it if necessary; (2) define a name or names in the local\nnamespace (of the scope where the ``import`` statement occurs). The\nstatement comes in two forms differing on whether it uses the ``from``\nkeyword. The first form (without ``from``) repeats these steps for\neach identifier in the list. The form with ``from`` performs step (1)\nonce, and then performs step (2) repeatedly.\n\nTo understand how step (1) occurs, one must first understand how\nPython handles hierarchical naming of modules. To help organize\nmodules and provide a hierarchy in naming, Python has a concept of\npackages. A package can contain other packages and modules while\nmodules cannot contain other modules or packages. From a file system\nperspective, packages are directories and modules are files. The\noriginal specification for packages is still available to read,\nalthough minor details have changed since the writing of that\ndocument.\n\nOnce the name of the module is known (unless otherwise specified, the\nterm "module" will refer to both packages and modules), searching for\nthe module or package can begin. The first place checked is\n``sys.modules``, the cache of all modules that have been imported\npreviously. If the module is found there then it is used in step (2)\nof import.\n\nIf the module is not found in the cache, then ``sys.meta_path`` is\nsearched (the specification for ``sys.meta_path`` can be found in\n**PEP 302**). The object is a list of *finder* objects which are\nqueried in order as to whether they know how to load the module by\ncalling their ``find_module()`` method with the name of the module. If\nthe module happens to be contained within a package (as denoted by the\nexistence of a dot in the name), then a second argument to\n``find_module()`` is given as the value of the ``__path__`` attribute\nfrom the parent package (everything up to the last dot in the name of\nthe module being imported). If a finder can find the module it returns\na *loader* (discussed later) or returns ``None``.\n\nIf none of the finders on ``sys.meta_path`` are able to find the\nmodule then some implicitly defined finders are queried.\nImplementations of Python vary in what implicit meta path finders are\ndefined. The one they all do define, though, is one that handles\n``sys.path_hooks``, ``sys.path_importer_cache``, and ``sys.path``.\n\nThe implicit finder searches for the requested module in the "paths"\nspecified in one of two places ("paths" do not have to be file system\npaths). If the module being imported is supposed to be contained\nwithin a package then the second argument passed to ``find_module()``,\n``__path__`` on the parent package, is used as the source of paths. If\nthe module is not contained in a package then ``sys.path`` is used as\nthe source of paths.\n\nOnce the source of paths is chosen it is iterated over to find a\nfinder that can handle that path. The dict at\n``sys.path_importer_cache`` caches finders for paths and is checked\nfor a finder. If the path does not have a finder cached then\n``sys.path_hooks`` is searched by calling each object in the list with\na single argument of the path, returning a finder or raises\n``ImportError``. If a finder is returned then it is cached in\n``sys.path_importer_cache`` and then used for that path entry. If no\nfinder can be found but the path exists then a value of ``None`` is\nstored in ``sys.path_importer_cache`` to signify that an implicit,\nfile-based finder that handles modules stored as individual files\nshould be used for that path. If the path does not exist then a finder\nwhich always returns ``None`` is placed in the cache for the path.\n\nIf no finder can find the module then ``ImportError`` is raised.\nOtherwise some finder returned a loader whose ``load_module()`` method\nis called with the name of the module to load (see **PEP 302** for the\noriginal definition of loaders). A loader has several responsibilities\nto perform on a module it loads. First, if the module already exists\nin ``sys.modules`` (a possibility if the loader is called outside of\nthe import machinery) then it is to use that module for initialization\nand not a new module. But if the module does not exist in\n``sys.modules`` then it is to be added to that dict before\ninitialization begins. If an error occurs during loading of the module\nand it was added to ``sys.modules`` it is to be removed from the dict.\nIf an error occurs but the module was already in ``sys.modules`` it is\nleft in the dict.\n\nThe loader must set several attributes on the module. ``__name__`` is\nto be set to the name of the module. ``__file__`` is to be the "path"\nto the file unless the module is built-in (and thus listed in\n``sys.builtin_module_names``) in which case the attribute is not set.\nIf what is being imported is a package then ``__path__`` is to be set\nto a list of paths to be searched when looking for modules and\npackages contained within the package being imported. ``__package__``\nis optional but should be set to the name of package that contains the\nmodule or package (the empty string is used for module not contained\nin a package). ``__loader__`` is also optional but should be set to\nthe loader object that is loading the module.\n\nIf an error occurs during loading then the loader raises\n``ImportError`` if some other exception is not already being\npropagated. Otherwise the loader returns the module that was loaded\nand initialized.\n\nWhen step (1) finishes without raising an exception, step (2) can\nbegin.\n\nThe first form of ``import`` statement binds the module name in the\nlocal namespace to the module object, and then goes on to import the\nnext identifier, if any. If the module name is followed by ``as``,\nthe name following ``as`` is used as the local name for the module.\n\nThe ``from`` form does not bind the module name: it goes through the\nlist of identifiers, looks each one of them up in the module found in\nstep (1), and binds the name in the local namespace to the object thus\nfound. As with the first form of ``import``, an alternate local name\ncan be supplied by specifying "``as`` localname". If a name is not\nfound, ``ImportError`` is raised. If the list of identifiers is\nreplaced by a star (``\'*\'``), all public names defined in the module\nare bound in the local namespace of the ``import`` statement..\n\nThe *public names* defined by a module are determined by checking the\nmodule\'s namespace for a variable named ``__all__``; if defined, it\nmust be a sequence of strings which are names defined or imported by\nthat module. The names given in ``__all__`` are all considered public\nand are required to exist. If ``__all__`` is not defined, the set of\npublic names includes all names found in the module\'s namespace which\ndo not begin with an underscore character (``\'_\'``). ``__all__``\nshould contain the entire public API. It is intended to avoid\naccidentally exporting items that are not part of the API (such as\nlibrary modules which were imported and used within the module).\n\nThe ``from`` form with ``*`` may only occur in a module scope. If the\nwild card form of import --- ``import *`` --- is used in a function\nand the function contains or is a nested block with free variables,\nthe compiler will raise a ``SyntaxError``.\n\nWhen specifying what module to import you do not have to specify the\nabsolute name of the module. When a module or package is contained\nwithin another package it is possible to make a relative import within\nthe same top package without having to mention the package name. By\nusing leading dots in the specified module or package after ``from``\nyou can specify how high to traverse up the current package hierarchy\nwithout specifying exact names. One leading dot means the current\npackage where the module making the import exists. Two dots means up\none package level. Three dots is up two levels, etc. So if you execute\n``from . import mod`` from a module in the ``pkg`` package then you\nwill end up importing ``pkg.mod``. If you execute ``from ..subpkg2\nimprt mod`` from within ``pkg.subpkg1`` you will import\n``pkg.subpkg2.mod``. The specification for relative imports is\ncontained within **PEP 328**.\n\nThe built-in function ``__import__()`` is provided to support\napplications that determine which modules need to be loaded\ndynamically; refer to *Built-in Functions* for additional information.\n\n\nFuture statements\n=================\n\nA *future statement* is a directive to the compiler that a particular\nmodule should be compiled using syntax or semantics that will be\navailable in a specified future release of Python. The future\nstatement is intended to ease migration to future versions of Python\nthat introduce incompatible changes to the language. It allows use of\nthe new features on a per-module basis before the release in which the\nfeature becomes standard.\n\n future_statement ::= "from" "__future__" "import" feature ["as" name]\n ("," feature ["as" name])*\n | "from" "__future__" "import" "(" feature ["as" name]\n ("," feature ["as" name])* [","] ")"\n feature ::= identifier\n name ::= identifier\n\nA future statement must appear near the top of the module. The only\nlines that can appear before a future statement are:\n\n* the module docstring (if any),\n\n* comments,\n\n* blank lines, and\n\n* other future statements.\n\nThe features recognized by Python 2.6 are ``unicode_literals``,\n``print_function``, ``absolute_import``, ``division``, ``generators``,\n``nested_scopes`` and ``with_statement``. ``generators``,\n``with_statement``, ``nested_scopes`` are redundant in Python version\n2.6 and above because they are always enabled.\n\nA future statement is recognized and treated specially at compile\ntime: Changes to the semantics of core constructs are often\nimplemented by generating different code. It may even be the case\nthat a new feature introduces new incompatible syntax (such as a new\nreserved word), in which case the compiler may need to parse the\nmodule differently. Such decisions cannot be pushed off until\nruntime.\n\nFor any given release, the compiler knows which feature names have\nbeen defined, and raises a compile-time error if a future statement\ncontains a feature not known to it.\n\nThe direct runtime semantics are the same as for any import statement:\nthere is a standard module ``__future__``, described later, and it\nwill be imported in the usual way at the time the future statement is\nexecuted.\n\nThe interesting runtime semantics depend on the specific feature\nenabled by the future statement.\n\nNote that there is nothing special about the statement:\n\n import __future__ [as name]\n\nThat is not a future statement; it\'s an ordinary import statement with\nno special semantics or syntax restrictions.\n\nCode compiled by an ``exec`` statement or calls to the builtin\nfunctions ``compile()`` and ``execfile()`` that occur in a module\n``M`` containing a future statement will, by default, use the new\nsyntax or semantics associated with the future statement. This can,\nstarting with Python 2.2 be controlled by optional arguments to\n``compile()`` --- see the documentation of that function for details.\n\nA future statement typed at an interactive interpreter prompt will\ntake effect for the rest of the interpreter session. If an\ninterpreter is started with the *-i* option, is passed a script name\nto execute, and the script includes a future statement, it will be in\neffect in the interactive session started after the script is\nexecuted.\n', 'in': u'\nComparisons\n***********\n\nUnlike C, all comparison operations in Python have the same priority,\nwhich is lower than that of any arithmetic, shifting or bitwise\noperation. Also unlike C, expressions like ``a < b < c`` have the\ninterpretation that is conventional in mathematics:\n\n comparison ::= or_expr ( comp_operator or_expr )*\n comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "<>" | "!="\n | "is" ["not"] | ["not"] "in"\n\nComparisons yield boolean values: ``True`` or ``False``.\n\nComparisons can be chained arbitrarily, e.g., ``x < y <= z`` is\nequivalent to ``x < y and y <= z``, except that ``y`` is evaluated\nonly once (but in both cases ``z`` is not evaluated at all when ``x <\ny`` is found to be false).\n\nFormally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and *op1*,\n*op2*, ..., *opN* are comparison operators, then ``a op1 b op2 c ... y\nopN z`` is equivalent to ``a op1 b and b op2 c and ... y opN z``,\nexcept that each expression is evaluated at most once.\n\nNote that ``a op1 b op2 c`` doesn\'t imply any kind of comparison\nbetween *a* and *c*, so that, e.g., ``x < y > z`` is perfectly legal\n(though perhaps not pretty).\n\nThe forms ``<>`` and ``!=`` are equivalent; for consistency with C,\n``!=`` is preferred; where ``!=`` is mentioned below ``<>`` is also\naccepted. The ``<>`` spelling is considered obsolescent.\n\nThe operators ``<``, ``>``, ``==``, ``>=``, ``<=``, and ``!=`` compare\nthe values of two objects. The objects need not have the same type.\nIf both are numbers, they are converted to a common type. Otherwise,\nobjects of different types *always* compare unequal, and are ordered\nconsistently but arbitrarily. You can control comparison behavior of\nobjects of non-builtin types by defining a ``__cmp__`` method or rich\ncomparison methods like ``__gt__``, described in section *Special\nmethod names*.\n\n(This unusual definition of comparison was used to simplify the\ndefinition of operations like sorting and the ``in`` and ``not in``\noperators. In the future, the comparison rules for objects of\ndifferent types are likely to change.)\n\nComparison of objects of the same type depends on the type:\n\n* Numbers are compared arithmetically.\n\n* Strings are compared lexicographically using the numeric equivalents\n (the result of the built-in function ``ord()``) of their characters.\n Unicode and 8-bit strings are fully interoperable in this behavior.\n [4]\n\n* Tuples and lists are compared lexicographically using comparison of\n corresponding elements. This means that to compare equal, each\n element must compare equal and the two sequences must be of the same\n type and have the same length.\n\n If not equal, the sequences are ordered the same as their first\n differing elements. For example, ``cmp([1,2,x], [1,2,y])`` returns\n the same as ``cmp(x,y)``. If the corresponding element does not\n exist, the shorter sequence is ordered first (for example, ``[1,2] <\n [1,2,3]``).\n\n* Mappings (dictionaries) compare equal if and only if their sorted\n (key, value) lists compare equal. [5] Outcomes other than equality\n are resolved consistently, but are not otherwise defined. [6]\n\n* Most other objects of builtin types compare unequal unless they are\n the same object; the choice whether one object is considered smaller\n or larger than another one is made arbitrarily but consistently\n within one execution of a program.\n\nThe operators ``in`` and ``not in`` test for collection membership.\n``x in s`` evaluates to true if *x* is a member of the collection *s*,\nand false otherwise. ``x not in s`` returns the negation of ``x in\ns``. The collection membership test has traditionally been bound to\nsequences; an object is a member of a collection if the collection is\na sequence and contains an element equal to that object. However, it\nmake sense for many other object types to support membership tests\nwithout being a sequence. In particular, dictionaries (for keys) and\nsets support membership testing.\n\nFor the list and tuple types, ``x in y`` is true if and only if there\nexists an index *i* such that ``x == y[i]`` is true.\n\nFor the Unicode and string types, ``x in y`` is true if and only if\n*x* is a substring of *y*. An equivalent test is ``y.find(x) != -1``.\nNote, *x* and *y* need not be the same type; consequently, ``u\'ab\' in\n\'abc\'`` will return ``True``. Empty strings are always considered to\nbe a substring of any other string, so ``"" in "abc"`` will return\n``True``.\n\nChanged in version 2.3: Previously, *x* was required to be a string of\nlength ``1``.\n\nFor user-defined classes which define the ``__contains__()`` method,\n``x in y`` is true if and only if ``y.__contains__(x)`` is true.\n\nFor user-defined classes which do not define ``__contains__()`` and do\ndefine ``__getitem__()``, ``x in y`` is true if and only if there is a\nnon-negative integer index *i* such that ``x == y[i]``, and all lower\ninteger indices do not raise ``IndexError`` exception. (If any other\nexception is raised, it is as if ``in`` raised that exception).\n\nThe operator ``not in`` is defined to have the inverse true value of\n``in``.\n\nThe operators ``is`` and ``is not`` test for object identity: ``x is\ny`` is true if and only if *x* and *y* are the same object. ``x is\nnot y`` yields the inverse truth value. [7]\n', 'integers': u'\nInteger and long integer literals\n*********************************\n\nInteger and long integer literals are described by the following\nlexical definitions:\n\n longinteger ::= integer ("l" | "L")\n integer ::= decimalinteger | octinteger | hexinteger | bininteger\n decimalinteger ::= nonzerodigit digit* | "0"\n octinteger ::= "0" ("o" | "O") octdigit+ | "0" octdigit+\n hexinteger ::= "0" ("x" | "X") hexdigit+\n bininteger ::= "0" ("b" | "B") bindigit+\n nonzerodigit ::= "1"..."9"\n octdigit ::= "0"..."7"\n bindigit ::= "0" | "1"\n hexdigit ::= digit | "a"..."f" | "A"..."F"\n\nAlthough both lower case ``\'l\'`` and upper case ``\'L\'`` are allowed as\nsuffix for long integers, it is strongly recommended to always use\n``\'L\'``, since the letter ``\'l\'`` looks too much like the digit\n``\'1\'``.\n\nPlain integer literals that are above the largest representable plain\ninteger (e.g., 2147483647 when using 32-bit arithmetic) are accepted\nas if they were long integers instead. [1] There is no limit for long\ninteger literals apart from what can be stored in available memory.\n\nSome examples of plain integer literals (first row) and long integer\nliterals (second and third rows):\n\n 7 2147483647 0177\n 3L 79228162514264337593543950336L 0377L 0x100000000L\n 79228162514264337593543950336 0xdeadbeef\n', 'lambda': u'\nExpression lists\n****************\n\n expression_list ::= expression ( "," expression )* [","]\n\nAn expression list containing at least one comma yields a tuple. The\nlength of the tuple is the number of expressions in the list. The\nexpressions are evaluated from left to right.\n\nThe trailing comma is required only to create a single tuple (a.k.a. a\n*singleton*); it is optional in all other cases. A single expression\nwithout a trailing comma doesn\'t create a tuple, but rather yields the\nvalue of that expression. (To create an empty tuple, use an empty pair\nof parentheses: ``()``.)\n', 'lists': u'\nList displays\n*************\n\nA list display is a possibly empty series of expressions enclosed in\nsquare brackets:\n\n list_display ::= "[" [expression_list | list_comprehension] "]"\n list_comprehension ::= expression list_for\n list_for ::= "for" target_list "in" old_expression_list [list_iter]\n old_expression_list ::= old_expression [("," old_expression)+ [","]]\n list_iter ::= list_for | list_if\n list_if ::= "if" old_expression [list_iter]\n\nA list display yields a new list object. Its contents are specified\nby providing either a list of expressions or a list comprehension.\nWhen a comma-separated list of expressions is supplied, its elements\nare evaluated from left to right and placed into the list object in\nthat order. When a list comprehension is supplied, it consists of a\nsingle expression followed by at least one ``for`` clause and zero or\nmore ``for`` or ``if`` clauses. In this case, the elements of the new\nlist are those that would be produced by considering each of the\n``for`` or ``if`` clauses a block, nesting from left to right, and\nevaluating the expression to produce a list element each time the\ninnermost block is reached [1].\n', 'naming': u"\nNaming and binding\n******************\n\n*Names* refer to objects. Names are introduced by name binding\noperations. Each occurrence of a name in the program text refers to\nthe *binding* of that name established in the innermost function block\ncontaining the use.\n\nA *block* is a piece of Python program text that is executed as a\nunit. The following are blocks: a module, a function body, and a class\ndefinition. Each command typed interactively is a block. A script\nfile (a file given as standard input to the interpreter or specified\non the interpreter command line the first argument) is a code block.\nA script command (a command specified on the interpreter command line\nwith the '**-c**' option) is a code block. The file read by the\nbuilt-in function ``execfile()`` is a code block. The string argument\npassed to the built-in function ``eval()`` and to the ``exec``\nstatement is a code block. The expression read and evaluated by the\nbuilt-in function ``input()`` is a code block.\n\nA code block is executed in an *execution frame*. A frame contains\nsome administrative information (used for debugging) and determines\nwhere and how execution continues after the code block's execution has\ncompleted.\n\nA *scope* defines the visibility of a name within a block. If a local\nvariable is defined in a block, its scope includes that block. If the\ndefinition occurs in a function block, the scope extends to any blocks\ncontained within the defining one, unless a contained block introduces\na different binding for the name. The scope of names defined in a\nclass block is limited to the class block; it does not extend to the\ncode blocks of methods -- this includes generator expressions since\nthey are implemented using a function scope. This means that the\nfollowing will fail:\n\n class A:\n a = 42\n b = list(a + i for i in range(10))\n\nWhen a name is used in a code block, it is resolved using the nearest\nenclosing scope. The set of all such scopes visible to a code block\nis called the block's *environment*.\n\nIf a name is bound in a block, it is a local variable of that block.\nIf a name is bound at the module level, it is a global variable. (The\nvariables of the module code block are local and global.) If a\nvariable is used in a code block but not defined there, it is a *free\nvariable*.\n\nWhen a name is not found at all, a ``NameError`` exception is raised.\nIf the name refers to a local variable that has not been bound, a\n``UnboundLocalError`` exception is raised. ``UnboundLocalError`` is a\nsubclass of ``NameError``.\n\nThe following constructs bind names: formal parameters to functions,\n``import`` statements, class and function definitions (these bind the\nclass or function name in the defining block), and targets that are\nidentifiers if occurring in an assignment, ``for`` loop header, in the\nsecond position of an ``except`` clause header or after ``as`` in a\n``with`` statement. The ``import`` statement of the form ``from ...\nimport *`` binds all names defined in the imported module, except\nthose beginning with an underscore. This form may only be used at the\nmodule level.\n\nA target occurring in a ``del`` statement is also considered bound for\nthis purpose (though the actual semantics are to unbind the name). It\nis illegal to unbind a name that is referenced by an enclosing scope;\nthe compiler will report a ``SyntaxError``.\n\nEach assignment or import statement occurs within a block defined by a\nclass or function definition or at the module level (the top-level\ncode block).\n\nIf a name binding operation occurs anywhere within a code block, all\nuses of the name within the block are treated as references to the\ncurrent block. This can lead to errors when a name is used within a\nblock before it is bound. This rule is subtle. Python lacks\ndeclarations and allows name binding operations to occur anywhere\nwithin a code block. The local variables of a code block can be\ndetermined by scanning the entire text of the block for name binding\noperations.\n\nIf the global statement occurs within a block, all uses of the name\nspecified in the statement refer to the binding of that name in the\ntop-level namespace. Names are resolved in the top-level namespace by\nsearching the global namespace, i.e. the namespace of the module\ncontaining the code block, and the builtin namespace, the namespace of\nthe module ``__builtin__``. The global namespace is searched first.\nIf the name is not found there, the builtin namespace is searched.\nThe global statement must precede all uses of the name.\n\nThe built-in namespace associated with the execution of a code block\nis actually found by looking up the name ``__builtins__`` in its\nglobal namespace; this should be a dictionary or a module (in the\nlatter case the module's dictionary is used). By default, when in the\n``__main__`` module, ``__builtins__`` is the built-in module\n``__builtin__`` (note: no 's'); when in any other module,\n``__builtins__`` is an alias for the dictionary of the ``__builtin__``\nmodule itself. ``__builtins__`` can be set to a user-created\ndictionary to create a weak form of restricted execution.\n\nNote: Users should not touch ``__builtins__``; it is strictly an\n implementation detail. Users wanting to override values in the\n built-in namespace should ``import`` the ``__builtin__`` (no 's')\n module and modify its attributes appropriately.\n\nThe namespace for a module is automatically created the first time a\nmodule is imported. The main module for a script is always called\n``__main__``.\n\nThe global statement has the same scope as a name binding operation in\nthe same block. If the nearest enclosing scope for a free variable\ncontains a global statement, the free variable is treated as a global.\n\nA class definition is an executable statement that may use and define\nnames. These references follow the normal rules for name resolution.\nThe namespace of the class definition becomes the attribute dictionary\nof the class. Names defined at the class scope are not visible in\nmethods.\n\n\nInteraction with dynamic features\n=================================\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- ``import *`` --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a ``SyntaxError``.\n\nIf ``exec`` is used in a function and the function contains or is a\nnested block with free variables, the compiler will raise a\n``SyntaxError`` unless the exec explicitly specifies the local\nnamespace for the ``exec``. (In other words, ``exec obj`` would be\nillegal, but ``exec obj in ns`` would be legal.)\n\nThe ``eval()``, ``execfile()``, and ``input()`` functions and the\n``exec`` statement do not have access to the full environment for\nresolving names. Names may be resolved in the local and global\nnamespaces of the caller. Free variables are not resolved in the\nnearest enclosing namespace, but in the global namespace. [1] The\n``exec`` statement and the ``eval()`` and ``execfile()`` functions\nhave optional arguments to override the global and local namespace.\nIf only one namespace is specified, it is used for both.\n", 'numbers': u"\nNumeric literals\n****************\n\nThere are four types of numeric literals: plain integers, long\nintegers, floating point numbers, and imaginary numbers. There are no\ncomplex literals (complex numbers can be formed by adding a real\nnumber and an imaginary number).\n\nNote that numeric literals do not include a sign; a phrase like ``-1``\nis actually an expression composed of the unary operator '``-``' and\nthe literal ``1``.\n", 'numeric-types': u'\nEmulating numeric types\n***********************\n\nThe following methods can be defined to emulate numeric objects.\nMethods corresponding to operations that are not supported by the\nparticular kind of number implemented (e.g., bitwise operations for\nnon-integral numbers) should be left undefined.\n\nobject.__add__(self, other)\nobject.__sub__(self, other)\nobject.__mul__(self, other)\nobject.__floordiv__(self, other)\nobject.__mod__(self, other)\nobject.__divmod__(self, other)\nobject.__pow__(self, other[, modulo])\nobject.__lshift__(self, other)\nobject.__rshift__(self, other)\nobject.__and__(self, other)\nobject.__xor__(self, other)\nobject.__or__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations (``+``, ``-``, ``*``, ``//``, ``%``, ``divmod()``,\n ``pow()``, ``**``, ``<<``, ``>>``, ``&``, ``^``, ``|``). For\n instance, to evaluate the expression ``x + y``, where *x* is an\n instance of a class that has an ``__add__()`` method,\n ``x.__add__(y)`` is called. The ``__divmod__()`` method should be\n the equivalent to using ``__floordiv__()`` and ``__mod__()``; it\n should not be related to ``__truediv__()`` (described below). Note\n that ``__pow__()`` should be defined to accept an optional third\n argument if the ternary version of the built-in ``pow()`` function\n is to be supported.\n\n If one of those methods does not support the operation with the\n supplied arguments, it should return ``NotImplemented``.\n\nobject.__div__(self, other)\nobject.__truediv__(self, other)\n\n The division operator (``/``) is implemented by these methods. The\n ``__truediv__()`` method is used when ``__future__.division`` is in\n effect, otherwise ``__div__()`` is used. If only one of these two\n methods is defined, the object will not support division in the\n alternate context; ``TypeError`` will be raised instead.\n\nobject.__radd__(self, other)\nobject.__rsub__(self, other)\nobject.__rmul__(self, other)\nobject.__rdiv__(self, other)\nobject.__rtruediv__(self, other)\nobject.__rfloordiv__(self, other)\nobject.__rmod__(self, other)\nobject.__rdivmod__(self, other)\nobject.__rpow__(self, other)\nobject.__rlshift__(self, other)\nobject.__rrshift__(self, other)\nobject.__rand__(self, other)\nobject.__rxor__(self, other)\nobject.__ror__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations (``+``, ``-``, ``*``, ``/``, ``%``, ``divmod()``,\n ``pow()``, ``**``, ``<<``, ``>>``, ``&``, ``^``, ``|``) with\n reflected (swapped) operands. These functions are only called if\n the left operand does not support the corresponding operation and\n the operands are of different types. [3] For instance, to evaluate\n the expression ``x - y``, where *y* is an instance of a class that\n has an ``__rsub__()`` method, ``y.__rsub__(x)`` is called if\n ``x.__sub__(y)`` returns *NotImplemented*.\n\n Note that ternary ``pow()`` will not try calling ``__rpow__()``\n (the coercion rules would become too complicated).\n\n Note: If the right operand\'s type is a subclass of the left operand\'s\n type and that subclass provides the reflected method for the\n operation, this method will be called before the left operand\'s\n non-reflected method. This behavior allows subclasses to\n override their ancestors\' operations.\n\nobject.__iadd__(self, other)\nobject.__isub__(self, other)\nobject.__imul__(self, other)\nobject.__idiv__(self, other)\nobject.__itruediv__(self, other)\nobject.__ifloordiv__(self, other)\nobject.__imod__(self, other)\nobject.__ipow__(self, other[, modulo])\nobject.__ilshift__(self, other)\nobject.__irshift__(self, other)\nobject.__iand__(self, other)\nobject.__ixor__(self, other)\nobject.__ior__(self, other)\n\n These methods are called to implement the augmented arithmetic\n assignments (``+=``, ``-=``, ``*=``, ``/=``, ``//=``, ``%=``,\n ``**=``, ``<<=``, ``>>=``, ``&=``, ``^=``, ``|=``). These methods\n should attempt to do the operation in-place (modifying *self*) and\n return the result (which could be, but does not have to be,\n *self*). If a specific method is not defined, the augmented\n assignment falls back to the normal methods. For instance, to\n execute the statement ``x += y``, where *x* is an instance of a\n class that has an ``__iadd__()`` method, ``x.__iadd__(y)`` is\n called. If *x* is an instance of a class that does not define a\n ``__iadd__()`` method, ``x.__add__(y)`` and ``y.__radd__(x)`` are\n considered, as with the evaluation of ``x + y``.\n\nobject.__neg__(self)\nobject.__pos__(self)\nobject.__abs__(self)\nobject.__invert__(self)\n\n Called to implement the unary arithmetic operations (``-``, ``+``,\n ``abs()`` and ``~``).\n\nobject.__complex__(self)\nobject.__int__(self)\nobject.__long__(self)\nobject.__float__(self)\n\n Called to implement the built-in functions ``complex()``,\n ``int()``, ``long()``, and ``float()``. Should return a value of\n the appropriate type.\n\nobject.__oct__(self)\nobject.__hex__(self)\n\n Called to implement the built-in functions ``oct()`` and ``hex()``.\n Should return a string value.\n\nobject.__index__(self)\n\n Called to implement ``operator.index()``. Also called whenever\n Python needs an integer object (such as in slicing). Must return\n an integer (int or long).\n\n New in version 2.5.\n\nobject.__coerce__(self, other)\n\n Called to implement "mixed-mode" numeric arithmetic. Should either\n return a 2-tuple containing *self* and *other* converted to a\n common numeric type, or ``None`` if conversion is impossible. When\n the common type would be the type of ``other``, it is sufficient to\n return ``None``, since the interpreter will also ask the other\n object to attempt a coercion (but sometimes, if the implementation\n of the other type cannot be changed, it is useful to do the\n conversion to the other type here). A return value of\n ``NotImplemented`` is equivalent to returning ``None``.\n', 'objects': u'\nObjects, values and types\n*************************\n\n*Objects* are Python\'s abstraction for data. All data in a Python\nprogram is represented by objects or by relations between objects. (In\na sense, and in conformance to Von Neumann\'s model of a "stored\nprogram computer," code is also represented by objects.)\n\nEvery object has an identity, a type and a value. An object\'s\n*identity* never changes once it has been created; you may think of it\nas the object\'s address in memory. The \'``is``\' operator compares the\nidentity of two objects; the ``id()`` function returns an integer\nrepresenting its identity (currently implemented as its address). An\nobject\'s *type* is also unchangeable. [1] An object\'s type determines\nthe operations that the object supports (e.g., "does it have a\nlength?") and also defines the possible values for objects of that\ntype. The ``type()`` function returns an object\'s type (which is an\nobject itself). The *value* of some objects can change. Objects\nwhose value can change are said to be *mutable*; objects whose value\nis unchangeable once they are created are called *immutable*. (The\nvalue of an immutable container object that contains a reference to a\nmutable object can change when the latter\'s value is changed; however\nthe container is still considered immutable, because the collection of\nobjects it contains cannot be changed. So, immutability is not\nstrictly the same as having an unchangeable value, it is more subtle.)\nAn object\'s mutability is determined by its type; for instance,\nnumbers, strings and tuples are immutable, while dictionaries and\nlists are mutable.\n\nObjects are never explicitly destroyed; however, when they become\nunreachable they may be garbage-collected. An implementation is\nallowed to postpone garbage collection or omit it altogether --- it is\na matter of implementation quality how garbage collection is\nimplemented, as long as no objects are collected that are still\nreachable. (Implementation note: CPython currently uses a reference-\ncounting scheme with (optional) delayed detection of cyclically linked\ngarbage, which collects most objects as soon as they become\nunreachable, but is not guaranteed to collect garbage containing\ncircular references. See the documentation of the ``gc`` module for\ninformation on controlling the collection of cyclic garbage. Other\nimplementations act differently and CPython may change.)\n\nNote that the use of the implementation\'s tracing or debugging\nfacilities may keep objects alive that would normally be collectable.\nAlso note that catching an exception with a \'``try``...``except``\'\nstatement may keep objects alive.\n\nSome objects contain references to "external" resources such as open\nfiles or windows. It is understood that these resources are freed\nwhen the object is garbage-collected, but since garbage collection is\nnot guaranteed to happen, such objects also provide an explicit way to\nrelease the external resource, usually a ``close()`` method. Programs\nare strongly recommended to explicitly close such objects. The\n\'``try``...``finally``\' statement provides a convenient way to do\nthis.\n\nSome objects contain references to other objects; these are called\n*containers*. Examples of containers are tuples, lists and\ndictionaries. The references are part of a container\'s value. In\nmost cases, when we talk about the value of a container, we imply the\nvalues, not the identities of the contained objects; however, when we\ntalk about the mutability of a container, only the identities of the\nimmediately contained objects are implied. So, if an immutable\ncontainer (like a tuple) contains a reference to a mutable object, its\nvalue changes if that mutable object is changed.\n\nTypes affect almost all aspects of object behavior. Even the\nimportance of object identity is affected in some sense: for immutable\ntypes, operations that compute new values may actually return a\nreference to any existing object with the same type and value, while\nfor mutable objects this is not allowed. E.g., after ``a = 1; b =\n1``, ``a`` and ``b`` may or may not refer to the same object with the\nvalue one, depending on the implementation, but after ``c = []; d =\n[]``, ``c`` and ``d`` are guaranteed to refer to two different,\nunique, newly created empty lists. (Note that ``c = d = []`` assigns\nthe same object to both ``c`` and ``d``.)\n', 'operator-summary': u'\nSummary\n*******\n\nThe following table summarizes the operator precedences in Python,\nfrom lowest precedence (least binding) to highest precedence (most\nbinding). Operators in the same box have the same precedence. Unless\nthe syntax is explicitly given, operators are binary. Operators in\nthe same box group left to right (except for comparisons, including\ntests, which all have the same precedence and chain from left to right\n--- see section *Comparisons* --- and exponentiation, which groups\nfrom right to left).\n\n+-------------------------------------------------+---------------------------------------+\n| Operator | Description |\n+=================================================+=======================================+\n| ``lambda`` | Lambda expression |\n+-------------------------------------------------+---------------------------------------+\n| ``or`` | Boolean OR |\n+-------------------------------------------------+---------------------------------------+\n| ``and`` | Boolean AND |\n+-------------------------------------------------+---------------------------------------+\n| ``not`` *x* | Boolean NOT |\n+-------------------------------------------------+---------------------------------------+\n| ``in``, ``not`` ``in``, ``is``, ``is not``, | Comparisons, including membership |\n| ``<``, ``<=``, ``>``, ``>=``, ``<>``, ``!=``, | tests and identity tests, |\n| ``==`` | |\n+-------------------------------------------------+---------------------------------------+\n| ``|`` | Bitwise OR |\n+-------------------------------------------------+---------------------------------------+\n| ``^`` | Bitwise XOR |\n+-------------------------------------------------+---------------------------------------+\n| ``&`` | Bitwise AND |\n+-------------------------------------------------+---------------------------------------+\n| ``<<``, ``>>`` | Shifts |\n+-------------------------------------------------+---------------------------------------+\n| ``+``, ``-`` | Addition and subtraction |\n+-------------------------------------------------+---------------------------------------+\n| ``*``, ``/``, ``//``, ``%`` | Multiplication, division, remainder |\n+-------------------------------------------------+---------------------------------------+\n| ``+x``, ``-x``, ``~x`` | Positive, negative, bitwise NOT |\n+-------------------------------------------------+---------------------------------------+\n| ``**`` | Exponentiation [8] |\n+-------------------------------------------------+---------------------------------------+\n| ``x[index]``, ``x[index:index]``, | Subscription, slicing, call, |\n| ``x(arguments...)``, ``x.attribute`` | attribute reference |\n+-------------------------------------------------+---------------------------------------+\n| ``(expressions...)``, ``[expressions...]``, | Binding or tuple display, list |\n| ``{key:datum...}``, ```expressions...``` | display, dictionary display, string |\n| | conversion |\n+-------------------------------------------------+---------------------------------------+\n\n-[ Footnotes ]-\n\n[1] In Python 2.3 and later releases, a list comprehension "leaks" the\n control variables of each ``for`` it contains into the containing\n scope. However, this behavior is deprecated, and relying on it\n will not work in Python 3.0\n\n[2] While ``abs(x%y) < abs(y)`` is true mathematically, for floats it\n may not be true numerically due to roundoff. For example, and\n assuming a platform on which a Python float is an IEEE 754 double-\n precision number, in order that ``-1e-100 % 1e100`` have the same\n sign as ``1e100``, the computed result is ``-1e-100 + 1e100``,\n which is numerically exactly equal to ``1e100``. Function\n ``fmod()`` in the ``math`` module returns a result whose sign\n matches the sign of the first argument instead, and so returns\n ``-1e-100`` in this case. Which approach is more appropriate\n depends on the application.\n\n[3] If x is very close to an exact integer multiple of y, it\'s\n possible for ``floor(x/y)`` to be one larger than ``(x-x%y)/y``\n due to rounding. In such cases, Python returns the latter result,\n in order to preserve that ``divmod(x,y)[0] * y + x % y`` be very\n close to ``x``.\n\n[4] While comparisons between unicode strings make sense at the byte\n level, they may be counter-intuitive to users. For example, the\n strings ``u"\\u00C7"`` and ``u"\\u0043\\u0327"`` compare differently,\n even though they both represent the same unicode character (LATIN\n CAPTITAL LETTER C WITH CEDILLA). To compare strings in a human\n recognizable way, compare using ``unicodedata.normalize()``.\n\n[5] The implementation computes this efficiently, without constructing\n lists or sorting.\n\n[6] Earlier versions of Python used lexicographic comparison of the\n sorted (key, value) lists, but this was very expensive for the\n common case of comparing for equality. An even earlier version of\n Python compared dictionaries by identity only, but this caused\n surprises because people expected to be able to test a dictionary\n for emptiness by comparing it to ``{}``.\n\n[7] Due to automatic garbage-collection, free lists, and the dynamic\n nature of descriptors, you may notice seemingly unusual behaviour\n in certain uses of the ``is`` operator, like those involving\n comparisons between instance methods, or constants. Check their\n documentation for more info.\n\n[8] The power operator ``**`` binds less tightly than an arithmetic or\n bitwise unary operator on its right, that is, ``2**-1`` is\n ``0.5``.\n', 'pass': u'\nThe ``pass`` statement\n**********************\n\n pass_stmt ::= "pass"\n\n``pass`` is a null operation --- when it is executed, nothing happens.\nIt is useful as a placeholder when a statement is required\nsyntactically, but no code needs to be executed, for example:\n\n def f(arg): pass # a function that does nothing (yet)\n\n class C: pass # a class with no methods (yet)\n', 'power': u'\nThe power operator\n******************\n\nThe power operator binds more tightly than unary operators on its\nleft; it binds less tightly than unary operators on its right. The\nsyntax is:\n\n power ::= primary ["**" u_expr]\n\nThus, in an unparenthesized sequence of power and unary operators, the\noperators are evaluated from right to left (this does not constrain\nthe evaluation order for the operands): ``-1**2`` results in ``-1``.\n\nThe power operator has the same semantics as the built-in ``pow()``\nfunction, when called with two arguments: it yields its left argument\nraised to the power of its right argument. The numeric arguments are\nfirst converted to a common type. The result type is that of the\narguments after coercion.\n\nWith mixed operand types, the coercion rules for binary arithmetic\noperators apply. For int and long int operands, the result has the\nsame type as the operands (after coercion) unless the second argument\nis negative; in that case, all arguments are converted to float and a\nfloat result is delivered. For example, ``10**2`` returns ``100``, but\n``10**-2`` returns ``0.01``. (This last feature was added in Python\n2.2. In Python 2.1 and before, if both arguments were of integer types\nand the second argument was negative, an exception was raised).\n\nRaising ``0.0`` to a negative power results in a\n``ZeroDivisionError``. Raising a negative number to a fractional power\nresults in a ``ValueError``.\n', 'print': u'\nThe ``print`` statement\n***********************\n\n print_stmt ::= "print" ([expression ("," expression)* [","]]\n | ">>" expression [("," expression)+ [","]])\n\n``print`` evaluates each expression in turn and writes the resulting\nobject to standard output (see below). If an object is not a string,\nit is first converted to a string using the rules for string\nconversions. The (resulting or original) string is then written. A\nspace is written before each object is (converted and) written, unless\nthe output system believes it is positioned at the beginning of a\nline. This is the case (1) when no characters have yet been written\nto standard output, (2) when the last character written to standard\noutput is ``\'\\n\'``, or (3) when the last write operation on standard\noutput was not a ``print`` statement. (In some cases it may be\nfunctional to write an empty string to standard output for this\nreason.)\n\nNote: Objects which act like file objects but which are not the built-in\n file objects often do not properly emulate this aspect of the file\n object\'s behavior, so it is best not to rely on this.\n\nA ``\'\\n\'`` character is written at the end, unless the ``print``\nstatement ends with a comma. This is the only action if the statement\ncontains just the keyword ``print``.\n\nStandard output is defined as the file object named ``stdout`` in the\nbuilt-in module ``sys``. If no such object exists, or if it does not\nhave a ``write()`` method, a ``RuntimeError`` exception is raised.\n\n``print`` also has an extended form, defined by the second portion of\nthe syntax described above. This form is sometimes referred to as\n"``print`` chevron." In this form, the first expression after the\n``>>`` must evaluate to a "file-like" object, specifically an object\nthat has a ``write()`` method as described above. With this extended\nform, the subsequent expressions are printed to this file object. If\nthe first expression evaluates to ``None``, then ``sys.stdout`` is\nused as the file for output.\n', 'raise': u'\nThe ``raise`` statement\n***********************\n\n raise_stmt ::= "raise" [expression ["," expression ["," expression]]]\n\nIf no expressions are present, ``raise`` re-raises the last exception\nthat was active in the current scope. If no exception is active in\nthe current scope, a ``TypeError`` exception is raised indicating that\nthis is an error (if running under IDLE, a ``Queue.Empty`` exception\nis raised instead).\n\nOtherwise, ``raise`` evaluates the expressions to get three objects,\nusing ``None`` as the value of omitted expressions. The first two\nobjects are used to determine the *type* and *value* of the exception.\n\nIf the first object is an instance, the type of the exception is the\nclass of the instance, the instance itself is the value, and the\nsecond object must be ``None``.\n\nIf the first object is a class, it becomes the type of the exception.\nThe second object is used to determine the exception value: If it is\nan instance of the class, the instance becomes the exception value. If\nthe second object is a tuple, it is used as the argument list for the\nclass constructor; if it is ``None``, an empty argument list is used,\nand any other object is treated as a single argument to the\nconstructor. The instance so created by calling the constructor is\nused as the exception value.\n\nIf a third object is present and not ``None``, it must be a traceback\nobject (see section *The standard type hierarchy*), and it is\nsubstituted instead of the current location as the place where the\nexception occurred. If the third object is present and not a\ntraceback object or ``None``, a ``TypeError`` exception is raised.\nThe three-expression form of ``raise`` is useful to re-raise an\nexception transparently in an except clause, but ``raise`` with no\nexpressions should be preferred if the exception to be re-raised was\nthe most recently active exception in the current scope.\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information about handling exceptions is in section\n*The try statement*.\n', 'return': u'\nThe ``return`` statement\n************************\n\n return_stmt ::= "return" [expression_list]\n\n``return`` may only occur syntactically nested in a function\ndefinition, not within a nested class definition.\n\nIf an expression list is present, it is evaluated, else ``None`` is\nsubstituted.\n\n``return`` leaves the current function call with the expression list\n(or ``None``) as return value.\n\nWhen ``return`` passes control out of a ``try`` statement with a\n``finally`` clause, that ``finally`` clause is executed before really\nleaving the function.\n\nIn a generator function, the ``return`` statement is not allowed to\ninclude an **expression_list**. In that context, a bare ``return``\nindicates that the generator is done and will cause ``StopIteration``\nto be raised.\n', 'sequence-methods': u'\nAdditional methods for emulation of sequence types\n**************************************************\n\nThe following optional methods can be defined to further emulate\nsequence objects. Immutable sequences methods should at most only\ndefine ``__getslice__()``; mutable sequences might define all three\nmethods.\n\nobject.__getslice__(self, i, j)\n\n Deprecated since version 2.0: Support slice objects as parameters\n to the ``__getitem__()`` method. (However, built-in types in\n CPython currently still implement ``__getslice__()``. Therefore,\n you have to override it in derived classes when implementing\n slicing.)\n\n Called to implement evaluation of ``self[i:j]``. The returned\n object should be of the same type as *self*. Note that missing *i*\n or *j* in the slice expression are replaced by zero or\n ``sys.maxint``, respectively. If negative indexes are used in the\n slice, the length of the sequence is added to that index. If the\n instance does not implement the ``__len__()`` method, an\n ``AttributeError`` is raised. No guarantee is made that indexes\n adjusted this way are not still negative. Indexes which are\n greater than the length of the sequence are not modified. If no\n ``__getslice__()`` is found, a slice object is created instead, and\n passed to ``__getitem__()`` instead.\n\nobject.__setslice__(self, i, j, sequence)\n\n Called to implement assignment to ``self[i:j]``. Same notes for *i*\n and *j* as for ``__getslice__()``.\n\n This method is deprecated. If no ``__setslice__()`` is found, or\n for extended slicing of the form ``self[i:j:k]``, a slice object is\n created, and passed to ``__setitem__()``, instead of\n ``__setslice__()`` being called.\n\nobject.__delslice__(self, i, j)\n\n Called to implement deletion of ``self[i:j]``. Same notes for *i*\n and *j* as for ``__getslice__()``. This method is deprecated. If no\n ``__delslice__()`` is found, or for extended slicing of the form\n ``self[i:j:k]``, a slice object is created, and passed to\n ``__delitem__()``, instead of ``__delslice__()`` being called.\n\nNotice that these methods are only invoked when a single slice with a\nsingle colon is used, and the slice method is available. For slice\noperations involving extended slice notation, or in absence of the\nslice methods, ``__getitem__()``, ``__setitem__()`` or\n``__delitem__()`` is called with a slice object as argument.\n\nThe following example demonstrate how to make your program or module\ncompatible with earlier versions of Python (assuming that methods\n``__getitem__()``, ``__setitem__()`` and ``__delitem__()`` support\nslice objects as arguments):\n\n class MyClass:\n ...\n def __getitem__(self, index):\n ...\n def __setitem__(self, index, value):\n ...\n def __delitem__(self, index):\n ...\n\n if sys.version_info < (2, 0):\n # They won\'t be defined if version is at least 2.0 final\n\n def __getslice__(self, i, j):\n return self[max(0, i):max(0, j):]\n def __setslice__(self, i, j, seq):\n self[max(0, i):max(0, j):] = seq\n def __delslice__(self, i, j):\n del self[max(0, i):max(0, j):]\n ...\n\nNote the calls to ``max()``; these are necessary because of the\nhandling of negative indices before the ``__*slice__()`` methods are\ncalled. When negative indexes are used, the ``__*item__()`` methods\nreceive them as provided, but the ``__*slice__()`` methods get a\n"cooked" form of the index values. For each negative index value, the\nlength of the sequence is added to the index before calling the method\n(which may still result in a negative index); this is the customary\nhandling of negative indexes by the built-in sequence types, and the\n``__*item__()`` methods are expected to do this as well. However,\nsince they should already be doing that, negative indexes cannot be\npassed in; they must be constrained to the bounds of the sequence\nbefore being passed to the ``__*item__()`` methods. Calling ``max(0,\ni)`` conveniently returns the proper value.\n', 'sequence-types': u"\nEmulating container types\n*************************\n\nThe following methods can be defined to implement container objects.\nContainers usually are sequences (such as lists or tuples) or mappings\n(like dictionaries), but can represent other containers as well. The\nfirst set of methods is used either to emulate a sequence or to\nemulate a mapping; the difference is that for a sequence, the\nallowable keys should be the integers *k* for which ``0 <= k < N``\nwhere *N* is the length of the sequence, or slice objects, which\ndefine a range of items. (For backwards compatibility, the method\n``__getslice__()`` (see below) can also be defined to handle simple,\nbut not extended slices.) It is also recommended that mappings provide\nthe methods ``keys()``, ``values()``, ``items()``, ``has_key()``,\n``get()``, ``clear()``, ``setdefault()``, ``iterkeys()``,\n``itervalues()``, ``iteritems()``, ``pop()``, ``popitem()``,\n``copy()``, and ``update()`` behaving similar to those for Python's\nstandard dictionary objects. The ``UserDict`` module provides a\n``DictMixin`` class to help create those methods from a base set of\n``__getitem__()``, ``__setitem__()``, ``__delitem__()``, and\n``keys()``. Mutable sequences should provide methods ``append()``,\n``count()``, ``index()``, ``extend()``, ``insert()``, ``pop()``,\n``remove()``, ``reverse()`` and ``sort()``, like Python standard list\nobjects. Finally, sequence types should implement addition (meaning\nconcatenation) and multiplication (meaning repetition) by defining the\nmethods ``__add__()``, ``__radd__()``, ``__iadd__()``, ``__mul__()``,\n``__rmul__()`` and ``__imul__()`` described below; they should not\ndefine ``__coerce__()`` or other numerical operators. It is\nrecommended that both mappings and sequences implement the\n``__contains__()`` method to allow efficient use of the ``in``\noperator; for mappings, ``in`` should be equivalent of ``has_key()``;\nfor sequences, it should search through the values. It is further\nrecommended that both mappings and sequences implement the\n``__iter__()`` method to allow efficient iteration through the\ncontainer; for mappings, ``__iter__()`` should be the same as\n``iterkeys()``; for sequences, it should iterate through the values.\n\nobject.__len__(self)\n\n Called to implement the built-in function ``len()``. Should return\n the length of the object, an integer ``>=`` 0. Also, an object\n that doesn't define a ``__nonzero__()`` method and whose\n ``__len__()`` method returns zero is considered to be false in a\n Boolean context.\n\nobject.__getitem__(self, key)\n\n Called to implement evaluation of ``self[key]``. For sequence\n types, the accepted keys should be integers and slice objects.\n Note that the special interpretation of negative indexes (if the\n class wishes to emulate a sequence type) is up to the\n ``__getitem__()`` method. If *key* is of an inappropriate type,\n ``TypeError`` may be raised; if of a value outside the set of\n indexes for the sequence (after any special interpretation of\n negative values), ``IndexError`` should be raised. For mapping\n types, if *key* is missing (not in the container), ``KeyError``\n should be raised.\n\n Note: ``for`` loops expect that an ``IndexError`` will be raised for\n illegal indexes to allow proper detection of the end of the\n sequence.\n\nobject.__setitem__(self, key, value)\n\n Called to implement assignment to ``self[key]``. Same note as for\n ``__getitem__()``. This should only be implemented for mappings if\n the objects support changes to the values for keys, or if new keys\n can be added, or for sequences if elements can be replaced. The\n same exceptions should be raised for improper *key* values as for\n the ``__getitem__()`` method.\n\nobject.__delitem__(self, key)\n\n Called to implement deletion of ``self[key]``. Same note as for\n ``__getitem__()``. This should only be implemented for mappings if\n the objects support removal of keys, or for sequences if elements\n can be removed from the sequence. The same exceptions should be\n raised for improper *key* values as for the ``__getitem__()``\n method.\n\nobject.__iter__(self)\n\n This method is called when an iterator is required for a container.\n This method should return a new iterator object that can iterate\n over all the objects in the container. For mappings, it should\n iterate over the keys of the container, and should also be made\n available as the method ``iterkeys()``.\n\n Iterator objects also need to implement this method; they are\n required to return themselves. For more information on iterator\n objects, see *Iterator Types*.\n\nobject.__reversed__(self)\n\n Called (if present) by the ``reversed()`` builtin to implement\n reverse iteration. It should return a new iterator object that\n iterates over all the objects in the container in reverse order.\n\n If the ``__reversed__()`` method is not provided, the\n ``reversed()`` builtin will fall back to using the sequence\n protocol (``__len__()`` and ``__getitem__()``). Objects should\n normally only provide ``__reversed__()`` if they do not support the\n sequence protocol and an efficient implementation of reverse\n iteration is possible.\n\n New in version 2.6.\n\nThe membership test operators (``in`` and ``not in``) are normally\nimplemented as an iteration through a sequence. However, container\nobjects can supply the following special method with a more efficient\nimplementation, which also does not require the object be a sequence.\n\nobject.__contains__(self, item)\n\n Called to implement membership test operators. Should return true\n if *item* is in *self*, false otherwise. For mapping objects, this\n should consider the keys of the mapping rather than the values or\n the key-item pairs.\n", 'shifting': u'\nShifting operations\n*******************\n\nThe shifting operations have lower priority than the arithmetic\noperations:\n\n shift_expr ::= a_expr | shift_expr ( "<<" | ">>" ) a_expr\n\nThese operators accept plain or long integers as arguments. The\narguments are converted to a common type. They shift the first\nargument to the left or right by the number of bits given by the\nsecond argument.\n\nA right shift by *n* bits is defined as division by ``pow(2, n)``. A\nleft shift by *n* bits is defined as multiplication with ``pow(2,\nn)``. Negative shift counts raise a ``ValueError`` exception.\n', 'slicings': u'\nSlicings\n********\n\nA slicing selects a range of items in a sequence object (e.g., a\nstring, tuple or list). Slicings may be used as expressions or as\ntargets in assignment or ``del`` statements. The syntax for a\nslicing:\n\n slicing ::= simple_slicing | extended_slicing\n simple_slicing ::= primary "[" short_slice "]"\n extended_slicing ::= primary "[" slice_list "]"\n slice_list ::= slice_item ("," slice_item)* [","]\n slice_item ::= expression | proper_slice | ellipsis\n proper_slice ::= short_slice | long_slice\n short_slice ::= [lower_bound] ":" [upper_bound]\n long_slice ::= short_slice ":" [stride]\n lower_bound ::= expression\n upper_bound ::= expression\n stride ::= expression\n ellipsis ::= "..."\n\nThere is ambiguity in the formal syntax here: anything that looks like\nan expression list also looks like a slice list, so any subscription\ncan be interpreted as a slicing. Rather than further complicating the\nsyntax, this is disambiguated by defining that in this case the\ninterpretation as a subscription takes priority over the\ninterpretation as a slicing (this is the case if the slice list\ncontains no proper slice nor ellipses). Similarly, when the slice\nlist has exactly one short slice and no trailing comma, the\ninterpretation as a simple slicing takes priority over that as an\nextended slicing.\n\nThe semantics for a simple slicing are as follows. The primary must\nevaluate to a sequence object. The lower and upper bound expressions,\nif present, must evaluate to plain integers; defaults are zero and the\n``sys.maxint``, respectively. If either bound is negative, the\nsequence\'s length is added to it. The slicing now selects all items\nwith index *k* such that ``i <= k < j`` where *i* and *j* are the\nspecified lower and upper bounds. This may be an empty sequence. It\nis not an error if *i* or *j* lie outside the range of valid indexes\n(such items don\'t exist so they aren\'t selected).\n\nThe semantics for an extended slicing are as follows. The primary\nmust evaluate to a mapping object, and it is indexed with a key that\nis constructed from the slice list, as follows. If the slice list\ncontains at least one comma, the key is a tuple containing the\nconversion of the slice items; otherwise, the conversion of the lone\nslice item is the key. The conversion of a slice item that is an\nexpression is that expression. The conversion of an ellipsis slice\nitem is the built-in ``Ellipsis`` object. The conversion of a proper\nslice is a slice object (see section *The standard type hierarchy*)\nwhose ``start``, ``stop`` and ``step`` attributes are the values of\nthe expressions given as lower bound, upper bound and stride,\nrespectively, substituting ``None`` for missing expressions.\n', 'specialattrs': u"\nSpecial Attributes\n******************\n\nThe implementation adds a few special read-only attributes to several\nobject types, where they are relevant. Some of these are not reported\nby the ``dir()`` built-in function.\n\nobject.__dict__\n\n A dictionary or other mapping object used to store an object's\n (writable) attributes.\n\nobject.__methods__\n\n Deprecated since version 2.2: Use the built-in function ``dir()``\n to get a list of an object's attributes. This attribute is no\n longer available.\n\nobject.__members__\n\n Deprecated since version 2.2: Use the built-in function ``dir()``\n to get a list of an object's attributes. This attribute is no\n longer available.\n\ninstance.__class__\n\n The class to which a class instance belongs.\n\nclass.__bases__\n\n The tuple of base classes of a class object. If there are no base\n classes, this will be an empty tuple.\n\nclass.__name__\n\n The name of the class or type.\n\nThe following attributes are only supported by *new-style class*es.\n\nclass.__mro__\n\n This attribute is a tuple of classes that are considered when\n looking for base classes during method resolution.\n\nclass.mro()\n\n This method can be overridden by a metaclass to customize the\n method resolution order for its instances. It is called at class\n instantiation, and its result is stored in ``__mro__``.\n\nclass.__subclasses__()\n\n Each new-style class keeps a list of weak references to its\n immediate subclasses. This method returns a list of all those\n references still alive. Example:\n\n >>> int.__subclasses__()\n [<type 'bool'>]\n\n-[ Footnotes ]-\n\n[1] Additional information on these special methods may be found in\n the Python Reference Manual (*Basic customization*).\n\n[2] As a consequence, the list ``[1, 2]`` is considered equal to\n ``[1.0, 2.0]``, and similarly for tuples.\n\n[3] They must have since the parser can't tell the type of the\n operands.\n\n[4] To format only a tuple you should therefore provide a singleton\n tuple whose only element is the tuple to be formatted.\n\n[5] These numbers are fairly arbitrary. They are intended to avoid\n printing endless strings of meaningless digits without hampering\n correct use and without having to know the exact precision of\n floating point values on a particular machine.\n\n[6] The advantage of leaving the newline on is that returning an empty\n string is then an unambiguous EOF indication. It is also possible\n (in cases where it might matter, for example, if you want to make\n an exact copy of a file while scanning its lines) to tell whether\n the last line of a file ended in a newline or not (yes this\n happens!).\n", 'specialnames': u'\nSpecial method names\n********************\n\nA class can implement certain operations that are invoked by special\nsyntax (such as arithmetic operations or subscripting and slicing) by\ndefining methods with special names. This is Python\'s approach to\n*operator overloading*, allowing classes to define their own behavior\nwith respect to language operators. For instance, if a class defines\na method named ``__getitem__()``, and ``x`` is an instance of this\nclass, then ``x[i]`` is roughly equivalent to ``x.__getitem__(i)`` for\nold-style classes and ``type(x).__getitem__(x, i)`` for new-style\nclasses. Except where mentioned, attempts to execute an operation\nraise an exception when no appropriate method is defined (typically\n``AttributeError`` or ``TypeError``).\n\nWhen implementing a class that emulates any built-in type, it is\nimportant that the emulation only be implemented to the degree that it\nmakes sense for the object being modelled. For example, some\nsequences may work well with retrieval of individual elements, but\nextracting a slice may not make sense. (One example of this is the\n``NodeList`` interface in the W3C\'s Document Object Model.)\n\n\nBasic customization\n===================\n\nobject.__new__(cls[, ...])\n\n Called to create a new instance of class *cls*. ``__new__()`` is a\n static method (special-cased so you need not declare it as such)\n that takes the class of which an instance was requested as its\n first argument. The remaining arguments are those passed to the\n object constructor expression (the call to the class). The return\n value of ``__new__()`` should be the new object instance (usually\n an instance of *cls*).\n\n Typical implementations create a new instance of the class by\n invoking the superclass\'s ``__new__()`` method using\n ``super(currentclass, cls).__new__(cls[, ...])`` with appropriate\n arguments and then modifying the newly-created instance as\n necessary before returning it.\n\n If ``__new__()`` returns an instance of *cls*, then the new\n instance\'s ``__init__()`` method will be invoked like\n ``__init__(self[, ...])``, where *self* is the new instance and the\n remaining arguments are the same as were passed to ``__new__()``.\n\n If ``__new__()`` does not return an instance of *cls*, then the new\n instance\'s ``__init__()`` method will not be invoked.\n\n ``__new__()`` is intended mainly to allow subclasses of immutable\n types (like int, str, or tuple) to customize instance creation. It\n is also commonly overridden in custom metaclasses in order to\n customize class creation.\n\nobject.__init__(self[, ...])\n\n Called when the instance is created. The arguments are those\n passed to the class constructor expression. If a base class has an\n ``__init__()`` method, the derived class\'s ``__init__()`` method,\n if any, must explicitly call it to ensure proper initialization of\n the base class part of the instance; for example:\n ``BaseClass.__init__(self, [args...])``. As a special constraint\n on constructors, no value may be returned; doing so will cause a\n ``TypeError`` to be raised at runtime.\n\nobject.__del__(self)\n\n Called when the instance is about to be destroyed. This is also\n called a destructor. If a base class has a ``__del__()`` method,\n the derived class\'s ``__del__()`` method, if any, must explicitly\n call it to ensure proper deletion of the base class part of the\n instance. Note that it is possible (though not recommended!) for\n the ``__del__()`` method to postpone destruction of the instance by\n creating a new reference to it. It may then be called at a later\n time when this new reference is deleted. It is not guaranteed that\n ``__del__()`` methods are called for objects that still exist when\n the interpreter exits.\n\n Note: ``del x`` doesn\'t directly call ``x.__del__()`` --- the former\n decrements the reference count for ``x`` by one, and the latter\n is only called when ``x``\'s reference count reaches zero. Some\n common situations that may prevent the reference count of an\n object from going to zero include: circular references between\n objects (e.g., a doubly-linked list or a tree data structure with\n parent and child pointers); a reference to the object on the\n stack frame of a function that caught an exception (the traceback\n stored in ``sys.exc_traceback`` keeps the stack frame alive); or\n a reference to the object on the stack frame that raised an\n unhandled exception in interactive mode (the traceback stored in\n ``sys.last_traceback`` keeps the stack frame alive). The first\n situation can only be remedied by explicitly breaking the cycles;\n the latter two situations can be resolved by storing ``None`` in\n ``sys.exc_traceback`` or ``sys.last_traceback``. Circular\n references which are garbage are detected when the option cycle\n detector is enabled (it\'s on by default), but can only be cleaned\n up if there are no Python-level ``__del__()`` methods involved.\n Refer to the documentation for the ``gc`` module for more\n information about how ``__del__()`` methods are handled by the\n cycle detector, particularly the description of the ``garbage``\n value.\n\n Warning: Due to the precarious circumstances under which ``__del__()``\n methods are invoked, exceptions that occur during their execution\n are ignored, and a warning is printed to ``sys.stderr`` instead.\n Also, when ``__del__()`` is invoked in response to a module being\n deleted (e.g., when execution of the program is done), other\n globals referenced by the ``__del__()`` method may already have\n been deleted or in the process of being torn down (e.g. the\n import machinery shutting down). For this reason, ``__del__()``\n methods should do the absolute minimum needed to maintain\n external invariants. Starting with version 1.5, Python\n guarantees that globals whose name begins with a single\n underscore are deleted from their module before other globals are\n deleted; if no other references to such globals exist, this may\n help in assuring that imported modules are still available at the\n time when the ``__del__()`` method is called.\n\nobject.__repr__(self)\n\n Called by the ``repr()`` built-in function and by string\n conversions (reverse quotes) to compute the "official" string\n representation of an object. If at all possible, this should look\n like a valid Python expression that could be used to recreate an\n object with the same value (given an appropriate environment). If\n this is not possible, a string of the form ``<...some useful\n description...>`` should be returned. The return value must be a\n string object. If a class defines ``__repr__()`` but not\n ``__str__()``, then ``__repr__()`` is also used when an "informal"\n string representation of instances of that class is required.\n\n This is typically used for debugging, so it is important that the\n representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n Called by the ``str()`` built-in function and by the ``print``\n statement to compute the "informal" string representation of an\n object. This differs from ``__repr__()`` in that it does not have\n to be a valid Python expression: a more convenient or concise\n representation may be used instead. The return value must be a\n string object.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n New in version 2.1.\n\n These are the so-called "rich comparison" methods, and are called\n for comparison operators in preference to ``__cmp__()`` below. The\n correspondence between operator symbols and method names is as\n follows: ``x<y`` calls ``x.__lt__(y)``, ``x<=y`` calls\n ``x.__le__(y)``, ``x==y`` calls ``x.__eq__(y)``, ``x!=y`` and\n ``x<>y`` call ``x.__ne__(y)``, ``x>y`` calls ``x.__gt__(y)``, and\n ``x>=y`` calls ``x.__ge__(y)``.\n\n A rich comparison method may return the singleton\n ``NotImplemented`` if it does not implement the operation for a\n given pair of arguments. By convention, ``False`` and ``True`` are\n returned for a successful comparison. However, these methods can\n return any value, so if the comparison operator is used in a\n Boolean context (e.g., in the condition of an ``if`` statement),\n Python will call ``bool()`` on the value to determine if the result\n is true or false.\n\n There are no implied relationships among the comparison operators.\n The truth of ``x==y`` does not imply that ``x!=y`` is false.\n Accordingly, when defining ``__eq__()``, one should also define\n ``__ne__()`` so that the operators will behave as expected. See\n the paragraph on ``__hash__()`` for some important notes on\n creating *hashable* objects which support custom comparison\n operations and are usable as dictionary keys.\n\n There are no swapped-argument versions of these methods (to be used\n when the left argument does not support the operation but the right\n argument does); rather, ``__lt__()`` and ``__gt__()`` are each\n other\'s reflection, ``__le__()`` and ``__ge__()`` are each other\'s\n reflection, and ``__eq__()`` and ``__ne__()`` are their own\n reflection.\n\n Arguments to rich comparison methods are never coerced.\n\nobject.__cmp__(self, other)\n\n Called by comparison operations if rich comparison (see above) is\n not defined. Should return a negative integer if ``self < other``,\n zero if ``self == other``, a positive integer if ``self > other``.\n If no ``__cmp__()``, ``__eq__()`` or ``__ne__()`` operation is\n defined, class instances are compared by object identity\n ("address"). See also the description of ``__hash__()`` for some\n important notes on creating *hashable* objects which support custom\n comparison operations and are usable as dictionary keys. (Note: the\n restriction that exceptions are not propagated by ``__cmp__()`` has\n been removed since Python 1.5.)\n\nobject.__rcmp__(self, other)\n\n Changed in version 2.1: No longer supported.\n\nobject.__hash__(self)\n\n Called by built-in function ``hash()`` and for operations on\n members of hashed collections including ``set``, ``frozenset``, and\n ``dict``. ``__hash__()`` should return an integer. The only\n required property is that objects which compare equal have the same\n hash value; it is advised to somehow mix together (e.g. using\n exclusive or) the hash values for the components of the object that\n also play a part in comparison of objects.\n\n If a class does not define a ``__cmp__()`` or ``__eq__()`` method\n it should not define a ``__hash__()`` operation either; if it\n defines ``__cmp__()`` or ``__eq__()`` but not ``__hash__()``, its\n instances will not be usable in hashed collections. If a class\n defines mutable objects and implements a ``__cmp__()`` or\n ``__eq__()`` method, it should not implement ``__hash__()``, since\n hashable collection implementations require that a object\'s hash\n value is immutable (if the object\'s hash value changes, it will be\n in the wrong hash bucket).\n\n User-defined classes have ``__cmp__()`` and ``__hash__()`` methods\n by default; with them, all objects compare unequal (except with\n themselves) and ``x.__hash__()`` returns ``id(x)``.\n\n Classes which inherit a ``__hash__()`` method from a parent class\n but change the meaning of ``__cmp__()`` or ``__eq__()`` such that\n the hash value returned is no longer appropriate (e.g. by switching\n to a value-based concept of equality instead of the default\n identity based equality) can explicitly flag themselves as being\n unhashable by setting ``__hash__ = None`` in the class definition.\n Doing so means that not only will instances of the class raise an\n appropriate ``TypeError`` when a program attempts to retrieve their\n hash value, but they will also be correctly identified as\n unhashable when checking ``isinstance(obj, collections.Hashable)``\n (unlike classes which define their own ``__hash__()`` to explicitly\n raise ``TypeError``).\n\n Changed in version 2.5: ``__hash__()`` may now also return a long\n integer object; the 32-bit integer is then derived from the hash of\n that object.\n\n Changed in version 2.6: ``__hash__`` may now be set to ``None`` to\n explicitly flag instances of a class as unhashable.\n\nobject.__nonzero__(self)\n\n Called to implement truth value testing and the built-in operation\n ``bool()``; should return ``False`` or ``True``, or their integer\n equivalents ``0`` or ``1``. When this method is not defined,\n ``__len__()`` is called, if it is defined, and the object is\n considered true if its result is nonzero. If a class defines\n neither ``__len__()`` nor ``__nonzero__()``, all its instances are\n considered true.\n\nobject.__unicode__(self)\n\n Called to implement ``unicode()`` builtin; should return a Unicode\n object. When this method is not defined, string conversion is\n attempted, and the result of string conversion is converted to\n Unicode using the system default encoding.\n\n\nCustomizing attribute access\n============================\n\nThe following methods can be defined to customize the meaning of\nattribute access (use of, assignment to, or deletion of ``x.name``)\nfor class instances.\n\nobject.__getattr__(self, name)\n\n Called when an attribute lookup has not found the attribute in the\n usual places (i.e. it is not an instance attribute nor is it found\n in the class tree for ``self``). ``name`` is the attribute name.\n This method should return the (computed) attribute value or raise\n an ``AttributeError`` exception.\n\n Note that if the attribute is found through the normal mechanism,\n ``__getattr__()`` is not called. (This is an intentional asymmetry\n between ``__getattr__()`` and ``__setattr__()``.) This is done both\n for efficiency reasons and because otherwise ``__getattr__()``\n would have no way to access other attributes of the instance. Note\n that at least for instance variables, you can fake total control by\n not inserting any values in the instance attribute dictionary (but\n instead inserting them in another object). See the\n ``__getattribute__()`` method below for a way to actually get total\n control in new-style classes.\n\nobject.__setattr__(self, name, value)\n\n Called when an attribute assignment is attempted. This is called\n instead of the normal mechanism (i.e. store the value in the\n instance dictionary). *name* is the attribute name, *value* is the\n value to be assigned to it.\n\n If ``__setattr__()`` wants to assign to an instance attribute, it\n should not simply execute ``self.name = value`` --- this would\n cause a recursive call to itself. Instead, it should insert the\n value in the dictionary of instance attributes, e.g.,\n ``self.__dict__[name] = value``. For new-style classes, rather\n than accessing the instance dictionary, it should call the base\n class method with the same name, for example,\n ``object.__setattr__(self, name, value)``.\n\nobject.__delattr__(self, name)\n\n Like ``__setattr__()`` but for attribute deletion instead of\n assignment. This should only be implemented if ``del obj.name`` is\n meaningful for the object.\n\n\nMore attribute access for new-style classes\n-------------------------------------------\n\nThe following methods only apply to new-style classes.\n\nobject.__getattribute__(self, name)\n\n Called unconditionally to implement attribute accesses for\n instances of the class. If the class also defines\n ``__getattr__()``, the latter will not be called unless\n ``__getattribute__()`` either calls it explicitly or raises an\n ``AttributeError``. This method should return the (computed)\n attribute value or raise an ``AttributeError`` exception. In order\n to avoid infinite recursion in this method, its implementation\n should always call the base class method with the same name to\n access any attributes it needs, for example,\n ``object.__getattribute__(self, name)``.\n\n Note: This method may still be bypassed when looking up special methods\n as the result of implicit invocation via language syntax or\n builtin functions. See *Special method lookup for new-style\n classes*.\n\n\nImplementing Descriptors\n------------------------\n\nThe following methods only apply when an instance of the class\ncontaining the method (a so-called *descriptor* class) appears in the\nclass dictionary of another new-style class, known as the *owner*\nclass. In the examples below, "the attribute" refers to the attribute\nwhose name is the key of the property in the owner class\'\n``__dict__``. Descriptors can only be implemented as new-style\nclasses themselves.\n\nobject.__get__(self, instance, owner)\n\n Called to get the attribute of the owner class (class attribute\n access) or of an instance of that class (instance attribute\n access). *owner* is always the owner class, while *instance* is the\n instance that the attribute was accessed through, or ``None`` when\n the attribute is accessed through the *owner*. This method should\n return the (computed) attribute value or raise an\n ``AttributeError`` exception.\n\nobject.__set__(self, instance, value)\n\n Called to set the attribute on an instance *instance* of the owner\n class to a new value, *value*.\n\nobject.__delete__(self, instance)\n\n Called to delete the attribute on an instance *instance* of the\n owner class.\n\n\nInvoking Descriptors\n--------------------\n\nIn general, a descriptor is an object attribute with "binding\nbehavior", one whose attribute access has been overridden by methods\nin the descriptor protocol: ``__get__()``, ``__set__()``, and\n``__delete__()``. If any of those methods are defined for an object,\nit is said to be a descriptor.\n\nThe default behavior for attribute access is to get, set, or delete\nthe attribute from an object\'s dictionary. For instance, ``a.x`` has a\nlookup chain starting with ``a.__dict__[\'x\']``, then\n``type(a).__dict__[\'x\']``, and continuing through the base classes of\n``type(a)`` excluding metaclasses.\n\nHowever, if the looked-up value is an object defining one of the\ndescriptor methods, then Python may override the default behavior and\ninvoke the descriptor method instead. Where this occurs in the\nprecedence chain depends on which descriptor methods were defined and\nhow they were called. Note that descriptors are only invoked for new\nstyle objects or classes (ones that subclass ``object()`` or\n``type()``).\n\nThe starting point for descriptor invocation is a binding, ``a.x``.\nHow the arguments are assembled depends on ``a``:\n\nDirect Call\n The simplest and least common call is when user code directly\n invokes a descriptor method: ``x.__get__(a)``.\n\nInstance Binding\n If binding to a new-style object instance, ``a.x`` is transformed\n into the call: ``type(a).__dict__[\'x\'].__get__(a, type(a))``.\n\nClass Binding\n If binding to a new-style class, ``A.x`` is transformed into the\n call: ``A.__dict__[\'x\'].__get__(None, A)``.\n\nSuper Binding\n If ``a`` is an instance of ``super``, then the binding ``super(B,\n obj).m()`` searches ``obj.__class__.__mro__`` for the base class\n ``A`` immediately preceding ``B`` and then invokes the descriptor\n with the call: ``A.__dict__[\'m\'].__get__(obj, A)``.\n\nFor instance bindings, the precedence of descriptor invocation depends\non the which descriptor methods are defined. Normally, data\ndescriptors define both ``__get__()`` and ``__set__()``, while non-\ndata descriptors have just the ``__get__()`` method. Data descriptors\nalways override a redefinition in an instance dictionary. In\ncontrast, non-data descriptors can be overridden by instances. [2]\n\nPython methods (including ``staticmethod()`` and ``classmethod()``)\nare implemented as non-data descriptors. Accordingly, instances can\nredefine and override methods. This allows individual instances to\nacquire behaviors that differ from other instances of the same class.\n\nThe ``property()`` function is implemented as a data descriptor.\nAccordingly, instances cannot override the behavior of a property.\n\n\n__slots__\n---------\n\nBy default, instances of both old and new-style classes have a\ndictionary for attribute storage. This wastes space for objects\nhaving very few instance variables. The space consumption can become\nacute when creating large numbers of instances.\n\nThe default can be overridden by defining *__slots__* in a new-style\nclass definition. The *__slots__* declaration takes a sequence of\ninstance variables and reserves just enough space in each instance to\nhold a value for each variable. Space is saved because *__dict__* is\nnot created for each instance.\n\n__slots__\n\n This class variable can be assigned a string, iterable, or sequence\n of strings with variable names used by instances. If defined in a\n new-style class, *__slots__* reserves space for the declared\n variables and prevents the automatic creation of *__dict__* and\n *__weakref__* for each instance.\n\n New in version 2.2.\n\nNotes on using *__slots__*\n\n* When inheriting from a class without *__slots__*, the *__dict__*\n attribute of that class will always be accessible, so a *__slots__*\n definition in the subclass is meaningless.\n\n* Without a *__dict__* variable, instances cannot be assigned new\n variables not listed in the *__slots__* definition. Attempts to\n assign to an unlisted variable name raises ``AttributeError``. If\n dynamic assignment of new variables is desired, then add\n ``\'__dict__\'`` to the sequence of strings in the *__slots__*\n declaration.\n\n Changed in version 2.3: Previously, adding ``\'__dict__\'`` to the\n *__slots__* declaration would not enable the assignment of new\n attributes not specifically listed in the sequence of instance\n variable names.\n\n* Without a *__weakref__* variable for each instance, classes defining\n *__slots__* do not support weak references to its instances. If weak\n reference support is needed, then add ``\'__weakref__\'`` to the\n sequence of strings in the *__slots__* declaration.\n\n Changed in version 2.3: Previously, adding ``\'__weakref__\'`` to the\n *__slots__* declaration would not enable support for weak\n references.\n\n* *__slots__* are implemented at the class level by creating\n descriptors (*Implementing Descriptors*) for each variable name. As\n a result, class attributes cannot be used to set default values for\n instance variables defined by *__slots__*; otherwise, the class\n attribute would overwrite the descriptor assignment.\n\n* If a class defines a slot also defined in a base class, the instance\n variable defined by the base class slot is inaccessible (except by\n retrieving its descriptor directly from the base class). This\n renders the meaning of the program undefined. In the future, a\n check may be added to prevent this.\n\n* The action of a *__slots__* declaration is limited to the class\n where it is defined. As a result, subclasses will have a *__dict__*\n unless they also define *__slots__*.\n\n* Nonempty *__slots__* does not work for classes derived from\n "variable-length" built-in types such as ``long``, ``str`` and\n ``tuple``.\n\n* Any non-string iterable may be assigned to *__slots__*. Mappings may\n also be used; however, in the future, special meaning may be\n assigned to the values corresponding to each key.\n\n* *__class__* assignment works only if both classes have the same\n *__slots__*.\n\n Changed in version 2.6: Previously, *__class__* assignment raised an\n error if either new or old class had *__slots__*.\n\n\nCustomizing class creation\n==========================\n\nBy default, new-style classes are constructed using ``type()``. A\nclass definition is read into a separate namespace and the value of\nclass name is bound to the result of ``type(name, bases, dict)``.\n\nWhen the class definition is read, if *__metaclass__* is defined then\nthe callable assigned to it will be called instead of ``type()``. This\nallows classes or functions to be written which monitor or alter the\nclass creation process:\n\n* Modifying the class dictionary prior to the class being created.\n\n* Returning an instance of another class -- essentially performing the\n role of a factory function.\n\nThese steps will have to be performed in the metaclass\'s ``__new__()``\nmethod -- ``type.__new__()`` can then be called from this method to\ncreate a class with different properties. This example adds a new\nelement to the class dictionary before creating the class:\n\n class metacls(type):\n def __new__(mcs, name, bases, dict):\n dict[\'foo\'] = \'metacls was here\'\n return type.__new__(mcs, name, bases, dict)\n\nYou can of course also override other class methods (or add new\nmethods); for example defining a custom ``__call__()`` method in the\nmetaclass allows custom behavior when the class is called, e.g. not\nalways creating a new instance.\n\n__metaclass__\n\n This variable can be any callable accepting arguments for ``name``,\n ``bases``, and ``dict``. Upon class creation, the callable is used\n instead of the built-in ``type()``.\n\n New in version 2.2.\n\nThe appropriate metaclass is determined by the following precedence\nrules:\n\n* If ``dict[\'__metaclass__\']`` exists, it is used.\n\n* Otherwise, if there is at least one base class, its metaclass is\n used (this looks for a *__class__* attribute first and if not found,\n uses its type).\n\n* Otherwise, if a global variable named __metaclass__ exists, it is\n used.\n\n* Otherwise, the old-style, classic metaclass (types.ClassType) is\n used.\n\nThe potential uses for metaclasses are boundless. Some ideas that have\nbeen explored including logging, interface checking, automatic\ndelegation, automatic property creation, proxies, frameworks, and\nautomatic resource locking/synchronization.\n\n\nEmulating callable objects\n==========================\n\nobject.__call__(self[, args...])\n\n Called when the instance is "called" as a function; if this method\n is defined, ``x(arg1, arg2, ...)`` is a shorthand for\n ``x.__call__(arg1, arg2, ...)``.\n\n\nEmulating container types\n=========================\n\nThe following methods can be defined to implement container objects.\nContainers usually are sequences (such as lists or tuples) or mappings\n(like dictionaries), but can represent other containers as well. The\nfirst set of methods is used either to emulate a sequence or to\nemulate a mapping; the difference is that for a sequence, the\nallowable keys should be the integers *k* for which ``0 <= k < N``\nwhere *N* is the length of the sequence, or slice objects, which\ndefine a range of items. (For backwards compatibility, the method\n``__getslice__()`` (see below) can also be defined to handle simple,\nbut not extended slices.) It is also recommended that mappings provide\nthe methods ``keys()``, ``values()``, ``items()``, ``has_key()``,\n``get()``, ``clear()``, ``setdefault()``, ``iterkeys()``,\n``itervalues()``, ``iteritems()``, ``pop()``, ``popitem()``,\n``copy()``, and ``update()`` behaving similar to those for Python\'s\nstandard dictionary objects. The ``UserDict`` module provides a\n``DictMixin`` class to help create those methods from a base set of\n``__getitem__()``, ``__setitem__()``, ``__delitem__()``, and\n``keys()``. Mutable sequences should provide methods ``append()``,\n``count()``, ``index()``, ``extend()``, ``insert()``, ``pop()``,\n``remove()``, ``reverse()`` and ``sort()``, like Python standard list\nobjects. Finally, sequence types should implement addition (meaning\nconcatenation) and multiplication (meaning repetition) by defining the\nmethods ``__add__()``, ``__radd__()``, ``__iadd__()``, ``__mul__()``,\n``__rmul__()`` and ``__imul__()`` described below; they should not\ndefine ``__coerce__()`` or other numerical operators. It is\nrecommended that both mappings and sequences implement the\n``__contains__()`` method to allow efficient use of the ``in``\noperator; for mappings, ``in`` should be equivalent of ``has_key()``;\nfor sequences, it should search through the values. It is further\nrecommended that both mappings and sequences implement the\n``__iter__()`` method to allow efficient iteration through the\ncontainer; for mappings, ``__iter__()`` should be the same as\n``iterkeys()``; for sequences, it should iterate through the values.\n\nobject.__len__(self)\n\n Called to implement the built-in function ``len()``. Should return\n the length of the object, an integer ``>=`` 0. Also, an object\n that doesn\'t define a ``__nonzero__()`` method and whose\n ``__len__()`` method returns zero is considered to be false in a\n Boolean context.\n\nobject.__getitem__(self, key)\n\n Called to implement evaluation of ``self[key]``. For sequence\n types, the accepted keys should be integers and slice objects.\n Note that the special interpretation of negative indexes (if the\n class wishes to emulate a sequence type) is up to the\n ``__getitem__()`` method. If *key* is of an inappropriate type,\n ``TypeError`` may be raised; if of a value outside the set of\n indexes for the sequence (after any special interpretation of\n negative values), ``IndexError`` should be raised. For mapping\n types, if *key* is missing (not in the container), ``KeyError``\n should be raised.\n\n Note: ``for`` loops expect that an ``IndexError`` will be raised for\n illegal indexes to allow proper detection of the end of the\n sequence.\n\nobject.__setitem__(self, key, value)\n\n Called to implement assignment to ``self[key]``. Same note as for\n ``__getitem__()``. This should only be implemented for mappings if\n the objects support changes to the values for keys, or if new keys\n can be added, or for sequences if elements can be replaced. The\n same exceptions should be raised for improper *key* values as for\n the ``__getitem__()`` method.\n\nobject.__delitem__(self, key)\n\n Called to implement deletion of ``self[key]``. Same note as for\n ``__getitem__()``. This should only be implemented for mappings if\n the objects support removal of keys, or for sequences if elements\n can be removed from the sequence. The same exceptions should be\n raised for improper *key* values as for the ``__getitem__()``\n method.\n\nobject.__iter__(self)\n\n This method is called when an iterator is required for a container.\n This method should return a new iterator object that can iterate\n over all the objects in the container. For mappings, it should\n iterate over the keys of the container, and should also be made\n available as the method ``iterkeys()``.\n\n Iterator objects also need to implement this method; they are\n required to return themselves. For more information on iterator\n objects, see *Iterator Types*.\n\nobject.__reversed__(self)\n\n Called (if present) by the ``reversed()`` builtin to implement\n reverse iteration. It should return a new iterator object that\n iterates over all the objects in the container in reverse order.\n\n If the ``__reversed__()`` method is not provided, the\n ``reversed()`` builtin will fall back to using the sequence\n protocol (``__len__()`` and ``__getitem__()``). Objects should\n normally only provide ``__reversed__()`` if they do not support the\n sequence protocol and an efficient implementation of reverse\n iteration is possible.\n\n New in version 2.6.\n\nThe membership test operators (``in`` and ``not in``) are normally\nimplemented as an iteration through a sequence. However, container\nobjects can supply the following special method with a more efficient\nimplementation, which also does not require the object be a sequence.\n\nobject.__contains__(self, item)\n\n Called to implement membership test operators. Should return true\n if *item* is in *self*, false otherwise. For mapping objects, this\n should consider the keys of the mapping rather than the values or\n the key-item pairs.\n\n\nAdditional methods for emulation of sequence types\n==================================================\n\nThe following optional methods can be defined to further emulate\nsequence objects. Immutable sequences methods should at most only\ndefine ``__getslice__()``; mutable sequences might define all three\nmethods.\n\nobject.__getslice__(self, i, j)\n\n Deprecated since version 2.0: Support slice objects as parameters\n to the ``__getitem__()`` method. (However, built-in types in\n CPython currently still implement ``__getslice__()``. Therefore,\n you have to override it in derived classes when implementing\n slicing.)\n\n Called to implement evaluation of ``self[i:j]``. The returned\n object should be of the same type as *self*. Note that missing *i*\n or *j* in the slice expression are replaced by zero or\n ``sys.maxint``, respectively. If negative indexes are used in the\n slice, the length of the sequence is added to that index. If the\n instance does not implement the ``__len__()`` method, an\n ``AttributeError`` is raised. No guarantee is made that indexes\n adjusted this way are not still negative. Indexes which are\n greater than the length of the sequence are not modified. If no\n ``__getslice__()`` is found, a slice object is created instead, and\n passed to ``__getitem__()`` instead.\n\nobject.__setslice__(self, i, j, sequence)\n\n Called to implement assignment to ``self[i:j]``. Same notes for *i*\n and *j* as for ``__getslice__()``.\n\n This method is deprecated. If no ``__setslice__()`` is found, or\n for extended slicing of the form ``self[i:j:k]``, a slice object is\n created, and passed to ``__setitem__()``, instead of\n ``__setslice__()`` being called.\n\nobject.__delslice__(self, i, j)\n\n Called to implement deletion of ``self[i:j]``. Same notes for *i*\n and *j* as for ``__getslice__()``. This method is deprecated. If no\n ``__delslice__()`` is found, or for extended slicing of the form\n ``self[i:j:k]``, a slice object is created, and passed to\n ``__delitem__()``, instead of ``__delslice__()`` being called.\n\nNotice that these methods are only invoked when a single slice with a\nsingle colon is used, and the slice method is available. For slice\noperations involving extended slice notation, or in absence of the\nslice methods, ``__getitem__()``, ``__setitem__()`` or\n``__delitem__()`` is called with a slice object as argument.\n\nThe following example demonstrate how to make your program or module\ncompatible with earlier versions of Python (assuming that methods\n``__getitem__()``, ``__setitem__()`` and ``__delitem__()`` support\nslice objects as arguments):\n\n class MyClass:\n ...\n def __getitem__(self, index):\n ...\n def __setitem__(self, index, value):\n ...\n def __delitem__(self, index):\n ...\n\n if sys.version_info < (2, 0):\n # They won\'t be defined if version is at least 2.0 final\n\n def __getslice__(self, i, j):\n return self[max(0, i):max(0, j):]\n def __setslice__(self, i, j, seq):\n self[max(0, i):max(0, j):] = seq\n def __delslice__(self, i, j):\n del self[max(0, i):max(0, j):]\n ...\n\nNote the calls to ``max()``; these are necessary because of the\nhandling of negative indices before the ``__*slice__()`` methods are\ncalled. When negative indexes are used, the ``__*item__()`` methods\nreceive them as provided, but the ``__*slice__()`` methods get a\n"cooked" form of the index values. For each negative index value, the\nlength of the sequence is added to the index before calling the method\n(which may still result in a negative index); this is the customary\nhandling of negative indexes by the built-in sequence types, and the\n``__*item__()`` methods are expected to do this as well. However,\nsince they should already be doing that, negative indexes cannot be\npassed in; they must be constrained to the bounds of the sequence\nbefore being passed to the ``__*item__()`` methods. Calling ``max(0,\ni)`` conveniently returns the proper value.\n\n\nEmulating numeric types\n=======================\n\nThe following methods can be defined to emulate numeric objects.\nMethods corresponding to operations that are not supported by the\nparticular kind of number implemented (e.g., bitwise operations for\nnon-integral numbers) should be left undefined.\n\nobject.__add__(self, other)\nobject.__sub__(self, other)\nobject.__mul__(self, other)\nobject.__floordiv__(self, other)\nobject.__mod__(self, other)\nobject.__divmod__(self, other)\nobject.__pow__(self, other[, modulo])\nobject.__lshift__(self, other)\nobject.__rshift__(self, other)\nobject.__and__(self, other)\nobject.__xor__(self, other)\nobject.__or__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations (``+``, ``-``, ``*``, ``//``, ``%``, ``divmod()``,\n ``pow()``, ``**``, ``<<``, ``>>``, ``&``, ``^``, ``|``). For\n instance, to evaluate the expression ``x + y``, where *x* is an\n instance of a class that has an ``__add__()`` method,\n ``x.__add__(y)`` is called. The ``__divmod__()`` method should be\n the equivalent to using ``__floordiv__()`` and ``__mod__()``; it\n should not be related to ``__truediv__()`` (described below). Note\n that ``__pow__()`` should be defined to accept an optional third\n argument if the ternary version of the built-in ``pow()`` function\n is to be supported.\n\n If one of those methods does not support the operation with the\n supplied arguments, it should return ``NotImplemented``.\n\nobject.__div__(self, other)\nobject.__truediv__(self, other)\n\n The division operator (``/``) is implemented by these methods. The\n ``__truediv__()`` method is used when ``__future__.division`` is in\n effect, otherwise ``__div__()`` is used. If only one of these two\n methods is defined, the object will not support division in the\n alternate context; ``TypeError`` will be raised instead.\n\nobject.__radd__(self, other)\nobject.__rsub__(self, other)\nobject.__rmul__(self, other)\nobject.__rdiv__(self, other)\nobject.__rtruediv__(self, other)\nobject.__rfloordiv__(self, other)\nobject.__rmod__(self, other)\nobject.__rdivmod__(self, other)\nobject.__rpow__(self, other)\nobject.__rlshift__(self, other)\nobject.__rrshift__(self, other)\nobject.__rand__(self, other)\nobject.__rxor__(self, other)\nobject.__ror__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations (``+``, ``-``, ``*``, ``/``, ``%``, ``divmod()``,\n ``pow()``, ``**``, ``<<``, ``>>``, ``&``, ``^``, ``|``) with\n reflected (swapped) operands. These functions are only called if\n the left operand does not support the corresponding operation and\n the operands are of different types. [3] For instance, to evaluate\n the expression ``x - y``, where *y* is an instance of a class that\n has an ``__rsub__()`` method, ``y.__rsub__(x)`` is called if\n ``x.__sub__(y)`` returns *NotImplemented*.\n\n Note that ternary ``pow()`` will not try calling ``__rpow__()``\n (the coercion rules would become too complicated).\n\n Note: If the right operand\'s type is a subclass of the left operand\'s\n type and that subclass provides the reflected method for the\n operation, this method will be called before the left operand\'s\n non-reflected method. This behavior allows subclasses to\n override their ancestors\' operations.\n\nobject.__iadd__(self, other)\nobject.__isub__(self, other)\nobject.__imul__(self, other)\nobject.__idiv__(self, other)\nobject.__itruediv__(self, other)\nobject.__ifloordiv__(self, other)\nobject.__imod__(self, other)\nobject.__ipow__(self, other[, modulo])\nobject.__ilshift__(self, other)\nobject.__irshift__(self, other)\nobject.__iand__(self, other)\nobject.__ixor__(self, other)\nobject.__ior__(self, other)\n\n These methods are called to implement the augmented arithmetic\n assignments (``+=``, ``-=``, ``*=``, ``/=``, ``//=``, ``%=``,\n ``**=``, ``<<=``, ``>>=``, ``&=``, ``^=``, ``|=``). These methods\n should attempt to do the operation in-place (modifying *self*) and\n return the result (which could be, but does not have to be,\n *self*). If a specific method is not defined, the augmented\n assignment falls back to the normal methods. For instance, to\n execute the statement ``x += y``, where *x* is an instance of a\n class that has an ``__iadd__()`` method, ``x.__iadd__(y)`` is\n called. If *x* is an instance of a class that does not define a\n ``__iadd__()`` method, ``x.__add__(y)`` and ``y.__radd__(x)`` are\n considered, as with the evaluation of ``x + y``.\n\nobject.__neg__(self)\nobject.__pos__(self)\nobject.__abs__(self)\nobject.__invert__(self)\n\n Called to implement the unary arithmetic operations (``-``, ``+``,\n ``abs()`` and ``~``).\n\nobject.__complex__(self)\nobject.__int__(self)\nobject.__long__(self)\nobject.__float__(self)\n\n Called to implement the built-in functions ``complex()``,\n ``int()``, ``long()``, and ``float()``. Should return a value of\n the appropriate type.\n\nobject.__oct__(self)\nobject.__hex__(self)\n\n Called to implement the built-in functions ``oct()`` and ``hex()``.\n Should return a string value.\n\nobject.__index__(self)\n\n Called to implement ``operator.index()``. Also called whenever\n Python needs an integer object (such as in slicing). Must return\n an integer (int or long).\n\n New in version 2.5.\n\nobject.__coerce__(self, other)\n\n Called to implement "mixed-mode" numeric arithmetic. Should either\n return a 2-tuple containing *self* and *other* converted to a\n common numeric type, or ``None`` if conversion is impossible. When\n the common type would be the type of ``other``, it is sufficient to\n return ``None``, since the interpreter will also ask the other\n object to attempt a coercion (but sometimes, if the implementation\n of the other type cannot be changed, it is useful to do the\n conversion to the other type here). A return value of\n ``NotImplemented`` is equivalent to returning ``None``.\n\n\nCoercion rules\n==============\n\nThis section used to document the rules for coercion. As the language\nhas evolved, the coercion rules have become hard to document\nprecisely; documenting what one version of one particular\nimplementation does is undesirable. Instead, here are some informal\nguidelines regarding coercion. In Python 3.0, coercion will not be\nsupported.\n\n* If the left operand of a % operator is a string or Unicode object,\n no coercion takes place and the string formatting operation is\n invoked instead.\n\n* It is no longer recommended to define a coercion operation. Mixed-\n mode operations on types that don\'t define coercion pass the\n original arguments to the operation.\n\n* New-style classes (those derived from ``object``) never invoke the\n ``__coerce__()`` method in response to a binary operator; the only\n time ``__coerce__()`` is invoked is when the built-in function\n ``coerce()`` is called.\n\n* For most intents and purposes, an operator that returns\n ``NotImplemented`` is treated the same as one that is not\n implemented at all.\n\n* Below, ``__op__()`` and ``__rop__()`` are used to signify the\n generic method names corresponding to an operator; ``__iop__()`` is\n used for the corresponding in-place operator. For example, for the\n operator \'``+``\', ``__add__()`` and ``__radd__()`` are used for the\n left and right variant of the binary operator, and ``__iadd__()``\n for the in-place variant.\n\n* For objects *x* and *y*, first ``x.__op__(y)`` is tried. If this is\n not implemented or returns ``NotImplemented``, ``y.__rop__(x)`` is\n tried. If this is also not implemented or returns\n ``NotImplemented``, a ``TypeError`` exception is raised. But see\n the following exception:\n\n* Exception to the previous item: if the left operand is an instance\n of a built-in type or a new-style class, and the right operand is an\n instance of a proper subclass of that type or class and overrides\n the base\'s ``__rop__()`` method, the right operand\'s ``__rop__()``\n method is tried *before* the left operand\'s ``__op__()`` method.\n\n This is done so that a subclass can completely override binary\n operators. Otherwise, the left operand\'s ``__op__()`` method would\n always accept the right operand: when an instance of a given class\n is expected, an instance of a subclass of that class is always\n acceptable.\n\n* When either operand type defines a coercion, this coercion is called\n before that type\'s ``__op__()`` or ``__rop__()`` method is called,\n but no sooner. If the coercion returns an object of a different\n type for the operand whose coercion is invoked, part of the process\n is redone using the new object.\n\n* When an in-place operator (like \'``+=``\') is used, if the left\n operand implements ``__iop__()``, it is invoked without any\n coercion. When the operation falls back to ``__op__()`` and/or\n ``__rop__()``, the normal coercion rules apply.\n\n* In ``x + y``, if *x* is a sequence that implements sequence\n concatenation, sequence concatenation is invoked.\n\n* In ``x * y``, if one operator is a sequence that implements sequence\n repetition, and the other is an integer (``int`` or ``long``),\n sequence repetition is invoked.\n\n* Rich comparisons (implemented by methods ``__eq__()`` and so on)\n never use coercion. Three-way comparison (implemented by\n ``__cmp__()``) does use coercion under the same conditions as other\n binary operations use it.\n\n* In the current implementation, the built-in numeric types ``int``,\n ``long`` and ``float`` do not use coercion; the type ``complex``\n however does use coercion for binary operators and rich comparisons,\n despite the above rules. The difference can become apparent when\n subclassing these types. Over time, the type ``complex`` may be\n fixed to avoid coercion. All these types implement a\n ``__coerce__()`` method, for use by the built-in ``coerce()``\n function.\n\n\nWith Statement Context Managers\n===============================\n\nNew in version 2.5.\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a ``with`` statement. The context\nmanager handles the entry into, and the exit from, the desired runtime\ncontext for the execution of the block of code. Context managers are\nnormally invoked using the ``with`` statement (described in section\n*The with statement*), but can also be used by directly invoking their\nmethods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see *Context Manager Types*.\n\nobject.__enter__(self)\n\n Enter the runtime context related to this object. The ``with``\n statement will bind this method\'s return value to the target(s)\n specified in the ``as`` clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n Exit the runtime context related to this object. The parameters\n describe the exception that caused the context to be exited. If the\n context was exited without an exception, all three arguments will\n be ``None``.\n\n If an exception is supplied, and the method wishes to suppress the\n exception (i.e., prevent it from being propagated), it should\n return a true value. Otherwise, the exception will be processed\n normally upon exit from this method.\n\n Note that ``__exit__()`` methods should not reraise the passed-in\n exception; this is the caller\'s responsibility.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python\n ``with`` statement.\n\n\nSpecial method lookup for old-style classes\n===========================================\n\nFor old-style classes, special methods are always looked up in exactly\nthe same way as any other method or attribute. This is the case\nregardless of whether the method is being looked up explicitly as in\n``x.__getitem__(i)`` or implicitly as in ``x[i]``.\n\nThis behaviour means that special methods may exhibit different\nbehaviour for different instances of a single old-style class if the\nappropriate special attributes are set differently:\n\n >>> class C:\n ... pass\n ...\n >>> c1 = C()\n >>> c2 = C()\n >>> c1.__len__ = lambda: 5\n >>> c2.__len__ = lambda: 9\n >>> len(c1)\n 5\n >>> len(c2)\n 9\n\n\nSpecial method lookup for new-style classes\n===========================================\n\nFor new-style classes, implicit invocations of special methods are\nonly guaranteed to work correctly if defined on an object\'s type, not\nin the object\'s instance dictionary. That behaviour is the reason why\nthe following code raises an exception (unlike the equivalent example\nwith old-style classes):\n\n >>> class C(object):\n ... pass\n ...\n >>> c = C()\n >>> c.__len__ = lambda: 5\n >>> len(c)\n Traceback (most recent call last):\n File "<stdin>", line 1, in <module>\n TypeError: object of type \'C\' has no len()\n\nThe rationale behind this behaviour lies with a number of special\nmethods such as ``__hash__()`` and ``__repr__()`` that are implemented\nby all objects, including type objects. If the implicit lookup of\nthese methods used the conventional lookup process, they would fail\nwhen invoked on the type object itself:\n\n >>> 1 .__hash__() == hash(1)\n True\n >>> int.__hash__() == hash(int)\n Traceback (most recent call last):\n File "<stdin>", line 1, in <module>\n TypeError: descriptor \'__hash__\' of \'int\' object needs an argument\n\nIncorrectly attempting to invoke an unbound method of a class in this\nway is sometimes referred to as \'metaclass confusion\', and is avoided\nby bypassing the instance when looking up special methods:\n\n >>> type(1).__hash__(1) == hash(1)\n True\n >>> type(int).__hash__(int) == hash(int)\n True\n\nIn addition to bypassing any instance attributes in the interest of\ncorrectness, implicit special method lookup generally also bypasses\nthe ``__getattribute__()`` method even of the object\'s metaclass:\n\n >>> class Meta(type):\n ... def __getattribute__(*args):\n ... print "Metaclass getattribute invoked"\n ... return type.__getattribute__(*args)\n ...\n >>> class C(object):\n ... __metaclass__ = Meta\n ... def __len__(self):\n ... return 10\n ... def __getattribute__(*args):\n ... print "Class getattribute invoked"\n ... return object.__getattribute__(*args)\n ...\n >>> c = C()\n >>> c.__len__() # Explicit lookup via instance\n Class getattribute invoked\n 10\n >>> type(c).__len__(c) # Explicit lookup via type\n Metaclass getattribute invoked\n 10\n >>> len(c) # Implicit lookup\n 10\n\nBypassing the ``__getattribute__()`` machinery in this fashion\nprovides significant scope for speed optimisations within the\ninterpreter, at the cost of some flexibility in the handling of\nspecial methods (the special method *must* be set on the class object\nitself in order to be consistently invoked by the interpreter).\n\n-[ Footnotes ]-\n\n[1] It *is* possible in some cases to change an object\'s type, under\n certain controlled conditions. It generally isn\'t a good idea\n though, since it can lead to some very strange behaviour if it is\n handled incorrectly.\n\n[2] A descriptor can define any combination of ``__get__()``,\n ``__set__()`` and ``__delete__()``. If it does not define\n ``__get__()``, then accessing the attribute even on an instance\n will return the descriptor object itself. If the descriptor\n defines ``__set__()`` and/or ``__delete__()``, it is a data\n descriptor; if it defines neither, it is a non-data descriptor.\n\n[3] For operands of the same type, it is assumed that if the non-\n reflected method (such as ``__add__()``) fails the operation is\n not supported, which is why the reflected method is not called.\n', 'string-conversions': u'\nString conversions\n******************\n\nA string conversion is an expression list enclosed in reverse (a.k.a.\nbackward) quotes:\n\n string_conversion ::= "\'" expression_list "\'"\n\nA string conversion evaluates the contained expression list and\nconverts the resulting object into a string according to rules\nspecific to its type.\n\nIf the object is a string, a number, ``None``, or a tuple, list or\ndictionary containing only objects whose type is one of these, the\nresulting string is a valid Python expression which can be passed to\nthe built-in function ``eval()`` to yield an expression with the same\nvalue (or an approximation, if floating point numbers are involved).\n\n(In particular, converting a string adds quotes around it and converts\n"funny" characters to escape sequences that are safe to print.)\n\nRecursive objects (for example, lists or dictionaries that contain a\nreference to themselves, directly or indirectly) use ``...`` to\nindicate a recursive reference, and the result cannot be passed to\n``eval()`` to get an equal value (``SyntaxError`` will be raised\ninstead).\n\nThe built-in function ``repr()`` performs exactly the same conversion\nin its argument as enclosing it in parentheses and reverse quotes\ndoes. The built-in function ``str()`` performs a similar but more\nuser-friendly conversion.\n', 'string-methods': u'\nString Methods\n**************\n\nBelow are listed the string methods which both 8-bit strings and\nUnicode objects support. Note that none of these methods take keyword\narguments.\n\nIn addition, Python\'s strings support the sequence type methods\ndescribed in the *Sequence Types --- str, unicode, list, tuple,\nbuffer, xrange* section. To output formatted strings use template\nstrings or the ``%`` operator described in the *String Formatting\nOperations* section. Also, see the ``re`` module for string functions\nbased on regular expressions.\n\nstr.capitalize()\n\n Return a copy of the string with only its first character\n capitalized.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.center(width[, fillchar])\n\n Return centered in a string of length *width*. Padding is done\n using the specified *fillchar* (default is a space).\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.count(sub[, start[, end]])\n\n Return the number of non-overlapping occurrences of substring *sub*\n in the range [*start*, *end*]. Optional arguments *start* and\n *end* are interpreted as in slice notation.\n\nstr.decode([encoding[, errors]])\n\n Decodes the string using the codec registered for *encoding*.\n *encoding* defaults to the default string encoding. *errors* may\n be given to set a different error handling scheme. The default is\n ``\'strict\'``, meaning that encoding errors raise ``UnicodeError``.\n Other possible values are ``\'ignore\'``, ``\'replace\'`` and any other\n name registered via ``codecs.register_error()``, see section *Codec\n Base Classes*.\n\n New in version 2.2.\n\n Changed in version 2.3: Support for other error handling schemes\n added.\n\nstr.encode([encoding[, errors]])\n\n Return an encoded version of the string. Default encoding is the\n current default string encoding. *errors* may be given to set a\n different error handling scheme. The default for *errors* is\n ``\'strict\'``, meaning that encoding errors raise a\n ``UnicodeError``. Other possible values are ``\'ignore\'``,\n ``\'replace\'``, ``\'xmlcharrefreplace\'``, ``\'backslashreplace\'`` and\n any other name registered via ``codecs.register_error()``, see\n section *Codec Base Classes*. For a list of possible encodings, see\n section *Standard Encodings*.\n\n New in version 2.0.\n\n Changed in version 2.3: Support for ``\'xmlcharrefreplace\'`` and\n ``\'backslashreplace\'`` and other error handling schemes added.\n\nstr.endswith(suffix[, start[, end]])\n\n Return ``True`` if the string ends with the specified *suffix*,\n otherwise return ``False``. *suffix* can also be a tuple of\n suffixes to look for. With optional *start*, test beginning at\n that position. With optional *end*, stop comparing at that\n position.\n\n Changed in version 2.5: Accept tuples as *suffix*.\n\nstr.expandtabs([tabsize])\n\n Return a copy of the string where all tab characters are replaced\n by one or more spaces, depending on the current column and the\n given tab size. The column number is reset to zero after each\n newline occurring in the string. If *tabsize* is not given, a tab\n size of ``8`` characters is assumed. This doesn\'t understand other\n non-printing characters or escape sequences.\n\nstr.find(sub[, start[, end]])\n\n Return the lowest index in the string where substring *sub* is\n found, such that *sub* is contained in the range [*start*, *end*].\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return ``-1`` if *sub* is not found.\n\nstr.format(format_string, *args, **kwargs)\n\n Perform a string formatting operation. The *format_string*\n argument can contain literal text or replacement fields delimited\n by braces ``{}``. Each replacement field contains either the\n numeric index of a positional argument, or the name of a keyword\n argument. Returns a copy of *format_string* where each replacement\n field is replaced with the string value of the corresponding\n argument.\n\n >>> "The sum of 1 + 2 is {0}".format(1+2)\n \'The sum of 1 + 2 is 3\'\n\n See *Format String Syntax* for a description of the various\n formatting options that can be specified in format strings.\n\n This method of string formatting is the new standard in Python 3.0,\n and should be preferred to the ``%`` formatting described in\n *String Formatting Operations* in new code.\n\n New in version 2.6.\n\nstr.index(sub[, start[, end]])\n\n Like ``find()``, but raise ``ValueError`` when the substring is not\n found.\n\nstr.isalnum()\n\n Return true if all characters in the string are alphanumeric and\n there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isalpha()\n\n Return true if all characters in the string are alphabetic and\n there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isdigit()\n\n Return true if all characters in the string are digits and there is\n at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.islower()\n\n Return true if all cased characters in the string are lowercase and\n there is at least one cased character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isspace()\n\n Return true if there are only whitespace characters in the string\n and there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.istitle()\n\n Return true if the string is a titlecased string and there is at\n least one character, for example uppercase characters may only\n follow uncased characters and lowercase characters only cased ones.\n Return false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isupper()\n\n Return true if all cased characters in the string are uppercase and\n there is at least one cased character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.join(seq)\n\n Return a string which is the concatenation of the strings in the\n sequence *seq*. The separator between elements is the string\n providing this method.\n\nstr.ljust(width[, fillchar])\n\n Return the string left justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than\n ``len(s)``.\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.lower()\n\n Return a copy of the string converted to lowercase.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.lstrip([chars])\n\n Return a copy of the string with leading characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or ``None``, the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a prefix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.lstrip()\n \'spacious \'\n >>> \'www.example.com\'.lstrip(\'cmowz.\')\n \'example.com\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.partition(sep)\n\n Split the string at the first occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing the string itself, followed by\n two empty strings.\n\n New in version 2.5.\n\nstr.replace(old, new[, count])\n\n Return a copy of the string with all occurrences of substring *old*\n replaced by *new*. If the optional argument *count* is given, only\n the first *count* occurrences are replaced.\n\nstr.rfind(sub[, start[, end]])\n\n Return the highest index in the string where substring *sub* is\n found, such that *sub* is contained within s[start,end]. Optional\n arguments *start* and *end* are interpreted as in slice notation.\n Return ``-1`` on failure.\n\nstr.rindex(sub[, start[, end]])\n\n Like ``rfind()`` but raises ``ValueError`` when the substring *sub*\n is not found.\n\nstr.rjust(width[, fillchar])\n\n Return the string right justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than\n ``len(s)``.\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.rpartition(sep)\n\n Split the string at the last occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing two empty strings, followed by\n the string itself.\n\n New in version 2.5.\n\nstr.rsplit([sep[, maxsplit]])\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit* splits\n are done, the *rightmost* ones. If *sep* is not specified or\n ``None``, any whitespace string is a separator. Except for\n splitting from the right, ``rsplit()`` behaves like ``split()``\n which is described in detail below.\n\n New in version 2.4.\n\nstr.rstrip([chars])\n\n Return a copy of the string with trailing characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or ``None``, the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a suffix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.rstrip()\n \' spacious\'\n >>> \'mississippi\'.rstrip(\'ipz\')\n \'mississ\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.split([sep[, maxsplit]])\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit*\n splits are done (thus, the list will have at most ``maxsplit+1``\n elements). If *maxsplit* is not specified, then there is no limit\n on the number of splits (all possible splits are made).\n\n If *sep* is given, consecutive delimiters are not grouped together\n and are deemed to delimit empty strings (for example,\n ``\'1,,2\'.split(\',\')`` returns ``[\'1\', \'\', \'2\']``). The *sep*\n argument may consist of multiple characters (for example,\n ``\'1<>2<>3\'.split(\'<>\')`` returns ``[\'1\', \'2\', \'3\']``). Splitting\n an empty string with a specified separator returns ``[\'\']``.\n\n If *sep* is not specified or is ``None``, a different splitting\n algorithm is applied: runs of consecutive whitespace are regarded\n as a single separator, and the result will contain no empty strings\n at the start or end if the string has leading or trailing\n whitespace. Consequently, splitting an empty string or a string\n consisting of just whitespace with a ``None`` separator returns\n ``[]``.\n\n For example, ``\' 1 2 3 \'.split()`` returns ``[\'1\', \'2\', \'3\']``,\n and ``\' 1 2 3 \'.split(None, 1)`` returns ``[\'1\', \'2 3 \']``.\n\nstr.splitlines([keepends])\n\n Return a list of the lines in the string, breaking at line\n boundaries. Line breaks are not included in the resulting list\n unless *keepends* is given and true.\n\nstr.startswith(prefix[, start[, end]])\n\n Return ``True`` if string starts with the *prefix*, otherwise\n return ``False``. *prefix* can also be a tuple of prefixes to look\n for. With optional *start*, test string beginning at that\n position. With optional *end*, stop comparing string at that\n position.\n\n Changed in version 2.5: Accept tuples as *prefix*.\n\nstr.strip([chars])\n\n Return a copy of the string with the leading and trailing\n characters removed. The *chars* argument is a string specifying the\n set of characters to be removed. If omitted or ``None``, the\n *chars* argument defaults to removing whitespace. The *chars*\n argument is not a prefix or suffix; rather, all combinations of its\n values are stripped:\n\n >>> \' spacious \'.strip()\n \'spacious\'\n >>> \'www.example.com\'.strip(\'cmowz.\')\n \'example\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.swapcase()\n\n Return a copy of the string with uppercase characters converted to\n lowercase and vice versa.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.title()\n\n Return a titlecased version of the string: words start with\n uppercase characters, all remaining cased characters are lowercase.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.translate(table[, deletechars])\n\n Return a copy of the string where all characters occurring in the\n optional argument *deletechars* are removed, and the remaining\n characters have been mapped through the given translation table,\n which must be a string of length 256.\n\n You can use the ``maketrans()`` helper function in the ``string``\n module to create a translation table. For string objects, set the\n *table* argument to ``None`` for translations that only delete\n characters:\n\n >>> \'read this short text\'.translate(None, \'aeiou\')\n \'rd ths shrt txt\'\n\n New in version 2.6: Support for a ``None`` *table* argument.\n\n For Unicode objects, the ``translate()`` method does not accept the\n optional *deletechars* argument. Instead, it returns a copy of the\n *s* where all characters have been mapped through the given\n translation table which must be a mapping of Unicode ordinals to\n Unicode ordinals, Unicode strings or ``None``. Unmapped characters\n are left untouched. Characters mapped to ``None`` are deleted.\n Note, a more flexible approach is to create a custom character\n mapping codec using the ``codecs`` module (see ``encodings.cp1251``\n for an example).\n\nstr.upper()\n\n Return a copy of the string converted to uppercase.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.zfill(width)\n\n Return the numeric string left filled with zeros in a string of\n length *width*. A sign prefix is handled correctly. The original\n string is returned if *width* is less than ``len(s)``.\n\n New in version 2.2.2.\n\nThe following methods are present only on unicode objects:\n\nunicode.isnumeric()\n\n Return ``True`` if there are only numeric characters in S,\n ``False`` otherwise. Numeric characters include digit characters,\n and all characters that have the Unicode numeric value property,\n e.g. U+2155, VULGAR FRACTION ONE FIFTH.\n\nunicode.isdecimal()\n\n Return ``True`` if there are only decimal characters in S,\n ``False`` otherwise. Decimal characters include digit characters,\n and all characters that that can be used to form decimal-radix\n numbers, e.g. U+0660, ARABIC-INDIC DIGIT ZERO.\n', 'strings': u'\nString literals\n***************\n\nString literals are described by the following lexical definitions:\n\n stringliteral ::= [stringprefix](shortstring | longstring)\n stringprefix ::= "r" | "u" | "ur" | "R" | "U" | "UR" | "Ur" | "uR"\n shortstring ::= "\'" shortstringitem* "\'" | \'"\' shortstringitem* \'"\'\n longstring ::= "\'\'\'" longstringitem* "\'\'\'"\n | \'"""\' longstringitem* \'"""\'\n shortstringitem ::= shortstringchar | escapeseq\n longstringitem ::= longstringchar | escapeseq\n shortstringchar ::= <any source character except "\\" or newline or the quote>\n longstringchar ::= <any source character except "\\">\n escapeseq ::= "\\" <any ASCII character>\n\nOne syntactic restriction not indicated by these productions is that\nwhitespace is not allowed between the **stringprefix** and the rest of\nthe string literal. The source character set is defined by the\nencoding declaration; it is ASCII if no encoding declaration is given\nin the source file; see section *Encoding declarations*.\n\nIn plain English: String literals can be enclosed in matching single\nquotes (``\'``) or double quotes (``"``). They can also be enclosed in\nmatching groups of three single or double quotes (these are generally\nreferred to as *triple-quoted strings*). The backslash (``\\``)\ncharacter is used to escape characters that otherwise have a special\nmeaning, such as newline, backslash itself, or the quote character.\nString literals may optionally be prefixed with a letter ``\'r\'`` or\n``\'R\'``; such strings are called *raw strings* and use different rules\nfor interpreting backslash escape sequences. A prefix of ``\'u\'`` or\n``\'U\'`` makes the string a Unicode string. Unicode strings use the\nUnicode character set as defined by the Unicode Consortium and ISO\n10646. Some additional escape sequences, described below, are\navailable in Unicode strings. The two prefix characters may be\ncombined; in this case, ``\'u\'`` must appear before ``\'r\'``.\n\nIn triple-quoted strings, unescaped newlines and quotes are allowed\n(and are retained), except that three unescaped quotes in a row\nterminate the string. (A "quote" is the character used to open the\nstring, i.e. either ``\'`` or ``"``.)\n\nUnless an ``\'r\'`` or ``\'R\'`` prefix is present, escape sequences in\nstrings are interpreted according to rules similar to those used by\nStandard C. The recognized escape sequences are:\n\n+-------------------+-----------------------------------+---------+\n| Escape Sequence | Meaning | Notes |\n+===================+===================================+=========+\n| ``\\newline`` | Ignored | |\n+-------------------+-----------------------------------+---------+\n| ``\\\\`` | Backslash (``\\``) | |\n+-------------------+-----------------------------------+---------+\n| ``\\\'`` | Single quote (``\'``) | |\n+-------------------+-----------------------------------+---------+\n| ``\\"`` | Double quote (``"``) | |\n+-------------------+-----------------------------------+---------+\n| ``\\a`` | ASCII Bell (BEL) | |\n+-------------------+-----------------------------------+---------+\n| ``\\b`` | ASCII Backspace (BS) | |\n+-------------------+-----------------------------------+---------+\n| ``\\f`` | ASCII Formfeed (FF) | |\n+-------------------+-----------------------------------+---------+\n| ``\\n`` | ASCII Linefeed (LF) | |\n+-------------------+-----------------------------------+---------+\n| ``\\N{name}`` | Character named *name* in the | |\n| | Unicode database (Unicode only) | |\n+-------------------+-----------------------------------+---------+\n| ``\\r`` | ASCII Carriage Return (CR) | |\n+-------------------+-----------------------------------+---------+\n| ``\\t`` | ASCII Horizontal Tab (TAB) | |\n+-------------------+-----------------------------------+---------+\n| ``\\uxxxx`` | Character with 16-bit hex value | (1) |\n| | *xxxx* (Unicode only) | |\n+-------------------+-----------------------------------+---------+\n| ``\\Uxxxxxxxx`` | Character with 32-bit hex value | (2) |\n| | *xxxxxxxx* (Unicode only) | |\n+-------------------+-----------------------------------+---------+\n| ``\\v`` | ASCII Vertical Tab (VT) | |\n+-------------------+-----------------------------------+---------+\n| ``\\ooo`` | Character with octal value *ooo* | (3,5) |\n+-------------------+-----------------------------------+---------+\n| ``\\xhh`` | Character with hex value *hh* | (4,5) |\n+-------------------+-----------------------------------+---------+\n\nNotes:\n\n1. Individual code units which form parts of a surrogate pair can be\n encoded using this escape sequence.\n\n2. Any Unicode character can be encoded this way, but characters\n outside the Basic Multilingual Plane (BMP) will be encoded using a\n surrogate pair if Python is compiled to use 16-bit code units (the\n default). Individual code units which form parts of a surrogate\n pair can be encoded using this escape sequence.\n\n3. As in Standard C, up to three octal digits are accepted.\n\n4. Unlike in Standard C, exactly two hex digits are required.\n\n5. In a string literal, hexadecimal and octal escapes denote the byte\n with the given value; it is not necessary that the byte encodes a\n character in the source character set. In a Unicode literal, these\n escapes denote a Unicode character with the given value.\n\nUnlike Standard C, all unrecognized escape sequences are left in the\nstring unchanged, i.e., *the backslash is left in the string*. (This\nbehavior is useful when debugging: if an escape sequence is mistyped,\nthe resulting output is more easily recognized as broken.) It is also\nimportant to note that the escape sequences marked as "(Unicode only)"\nin the table above fall into the category of unrecognized escapes for\nnon-Unicode string literals.\n\nWhen an ``\'r\'`` or ``\'R\'`` prefix is present, a character following a\nbackslash is included in the string without change, and *all\nbackslashes are left in the string*. For example, the string literal\n``r"\\n"`` consists of two characters: a backslash and a lowercase\n``\'n\'``. String quotes can be escaped with a backslash, but the\nbackslash remains in the string; for example, ``r"\\""`` is a valid\nstring literal consisting of two characters: a backslash and a double\nquote; ``r"\\"`` is not a valid string literal (even a raw string\ncannot end in an odd number of backslashes). Specifically, *a raw\nstring cannot end in a single backslash* (since the backslash would\nescape the following quote character). Note also that a single\nbackslash followed by a newline is interpreted as those two characters\nas part of the string, *not* as a line continuation.\n\nWhen an ``\'r\'`` or ``\'R\'`` prefix is used in conjunction with a\n``\'u\'`` or ``\'U\'`` prefix, then the ``\\uXXXX`` and ``\\UXXXXXXXX``\nescape sequences are processed while *all other backslashes are left\nin the string*. For example, the string literal ``ur"\\u0062\\n"``\nconsists of three Unicode characters: \'LATIN SMALL LETTER B\', \'REVERSE\nSOLIDUS\', and \'LATIN SMALL LETTER N\'. Backslashes can be escaped with\na preceding backslash; however, both remain in the string. As a\nresult, ``\\uXXXX`` escape sequences are only recognized when there are\nan odd number of backslashes.\n', 'subscriptions': u'\nSubscriptions\n*************\n\nA subscription selects an item of a sequence (string, tuple or list)\nor mapping (dictionary) object:\n\n subscription ::= primary "[" expression_list "]"\n\nThe primary must evaluate to an object of a sequence or mapping type.\n\nIf the primary is a mapping, the expression list must evaluate to an\nobject whose value is one of the keys of the mapping, and the\nsubscription selects the value in the mapping that corresponds to that\nkey. (The expression list is a tuple except if it has exactly one\nitem.)\n\nIf the primary is a sequence, the expression (list) must evaluate to a\nplain integer. If this value is negative, the length of the sequence\nis added to it (so that, e.g., ``x[-1]`` selects the last item of\n``x``.) The resulting value must be a nonnegative integer less than\nthe number of items in the sequence, and the subscription selects the\nitem whose index is that value (counting from zero).\n\nA string\'s items are characters. A character is not a separate data\ntype but a string of exactly one character.\n', 'truth': u"\nTruth Value Testing\n*******************\n\nAny object can be tested for truth value, for use in an ``if`` or\n``while`` condition or as operand of the Boolean operations below. The\nfollowing values are considered false:\n\n* ``None``\n\n* ``False``\n\n* zero of any numeric type, for example, ``0``, ``0L``, ``0.0``,\n ``0j``.\n\n* any empty sequence, for example, ``''``, ``()``, ``[]``.\n\n* any empty mapping, for example, ``{}``.\n\n* instances of user-defined classes, if the class defines a\n ``__nonzero__()`` or ``__len__()`` method, when that method returns\n the integer zero or ``bool`` value ``False``. [1]\n\nAll other values are considered true --- so objects of many types are\nalways true.\n\nOperations and built-in functions that have a Boolean result always\nreturn ``0`` or ``False`` for false and ``1`` or ``True`` for true,\nunless otherwise stated. (Important exception: the Boolean operations\n``or`` and ``and`` always return one of their operands.)\n", 'try': u'\nThe ``try`` statement\n*********************\n\nThe ``try`` statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n try_stmt ::= try1_stmt | try2_stmt\n try1_stmt ::= "try" ":" suite\n ("except" [expression [("as" | ",") target]] ":" suite)+\n ["else" ":" suite]\n ["finally" ":" suite]\n try2_stmt ::= "try" ":" suite\n "finally" ":" suite\n\nChanged in version 2.5: In previous versions of Python,\n``try``...``except``...``finally`` did not work. ``try``...``except``\nhad to be nested in ``try``...``finally``.\n\nThe ``except`` clause(s) specify one or more exception handlers. When\nno exception occurs in the ``try`` clause, no exception handler is\nexecuted. When an exception occurs in the ``try`` suite, a search for\nan exception handler is started. This search inspects the except\nclauses in turn until one is found that matches the exception. An\nexpression-less except clause, if present, must be last; it matches\nany exception. For an except clause with an expression, that\nexpression is evaluated, and the clause matches the exception if the\nresulting object is "compatible" with the exception. An object is\ncompatible with an exception if it is the class or a base class of the\nexception object, a tuple containing an item compatible with the\nexception, or, in the (deprecated) case of string exceptions, is the\nraised string itself (note that the object identities must match, i.e.\nit must be the same string object, not just a string with the same\nvalue).\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire ``try`` statement\nraised the exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified in that except clause, if present, and the except\nclause\'s suite is executed. All except clauses must have an\nexecutable block. When the end of this block is reached, execution\ncontinues normally after the entire try statement. (This means that\nif two nested handlers exist for the same exception, and the exception\noccurs in the try clause of the inner handler, the outer handler will\nnot handle the exception.)\n\nBefore an except clause\'s suite is executed, details about the\nexception are assigned to three variables in the ``sys`` module:\n``sys.exc_type`` receives the object identifying the exception;\n``sys.exc_value`` receives the exception\'s parameter;\n``sys.exc_traceback`` receives a traceback object (see section *The\nstandard type hierarchy*) identifying the point in the program where\nthe exception occurred. These details are also available through the\n``sys.exc_info()`` function, which returns a tuple ``(exc_type,\nexc_value, exc_traceback)``. Use of the corresponding variables is\ndeprecated in favor of this function, since their use is unsafe in a\nthreaded program. As of Python 1.5, the variables are restored to\ntheir previous values (before the call) when returning from a function\nthat handled an exception.\n\nThe optional ``else`` clause is executed if and when control flows off\nthe end of the ``try`` clause. [2] Exceptions in the ``else`` clause\nare not handled by the preceding ``except`` clauses.\n\nIf ``finally`` is present, it specifies a \'cleanup\' handler. The\n``try`` clause is executed, including any ``except`` and ``else``\nclauses. If an exception occurs in any of the clauses and is not\nhandled, the exception is temporarily saved. The ``finally`` clause is\nexecuted. If there is a saved exception, it is re-raised at the end\nof the ``finally`` clause. If the ``finally`` clause raises another\nexception or executes a ``return`` or ``break`` statement, the saved\nexception is lost. The exception information is not available to the\nprogram during execution of the ``finally`` clause.\n\nWhen a ``return``, ``break`` or ``continue`` statement is executed in\nthe ``try`` suite of a ``try``...``finally`` statement, the\n``finally`` clause is also executed \'on the way out.\' A ``continue``\nstatement is illegal in the ``finally`` clause. (The reason is a\nproblem with the current implementation --- this restriction may be\nlifted in the future).\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information on using the ``raise`` statement to\ngenerate exceptions may be found in section *The raise statement*.\n', 'types': u'\nThe standard type hierarchy\n***************************\n\nBelow is a list of the types that are built into Python. Extension\nmodules (written in C, Java, or other languages, depending on the\nimplementation) can define additional types. Future versions of\nPython may add types to the type hierarchy (e.g., rational numbers,\nefficiently stored arrays of integers, etc.).\n\nSome of the type descriptions below contain a paragraph listing\n\'special attributes.\' These are attributes that provide access to the\nimplementation and are not intended for general use. Their definition\nmay change in the future.\n\nNone\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name ``None``.\n It is used to signify the absence of a value in many situations,\n e.g., it is returned from functions that don\'t explicitly return\n anything. Its truth value is false.\n\nNotImplemented\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name\n ``NotImplemented``. Numeric methods and rich comparison methods may\n return this value if they do not implement the operation for the\n operands provided. (The interpreter will then try the reflected\n operation, or some other fallback, depending on the operator.) Its\n truth value is true.\n\nEllipsis\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name\n ``Ellipsis``. It is used to indicate the presence of the ``...``\n syntax in a slice. Its truth value is true.\n\n``numbers.Number``\n These are created by numeric literals and returned as results by\n arithmetic operators and arithmetic built-in functions. Numeric\n objects are immutable; once created their value never changes.\n Python numbers are of course strongly related to mathematical\n numbers, but subject to the limitations of numerical representation\n in computers.\n\n Python distinguishes between integers, floating point numbers, and\n complex numbers:\n\n ``numbers.Integral``\n These represent elements from the mathematical set of integers\n (positive and negative).\n\n There are three types of integers:\n\n Plain integers\n These represent numbers in the range -2147483648 through\n 2147483647. (The range may be larger on machines with a\n larger natural word size, but not smaller.) When the result\n of an operation would fall outside this range, the result is\n normally returned as a long integer (in some cases, the\n exception ``OverflowError`` is raised instead). For the\n purpose of shift and mask operations, integers are assumed to\n have a binary, 2\'s complement notation using 32 or more bits,\n and hiding no bits from the user (i.e., all 4294967296\n different bit patterns correspond to different values).\n\n Long integers\n These represent numbers in an unlimited range, subject to\n available (virtual) memory only. For the purpose of shift\n and mask operations, a binary representation is assumed, and\n negative numbers are represented in a variant of 2\'s\n complement which gives the illusion of an infinite string of\n sign bits extending to the left.\n\n Booleans\n These represent the truth values False and True. The two\n objects representing the values False and True are the only\n Boolean objects. The Boolean type is a subtype of plain\n integers, and Boolean values behave like the values 0 and 1,\n respectively, in almost all contexts, the exception being\n that when converted to a string, the strings ``"False"`` or\n ``"True"`` are returned, respectively.\n\n The rules for integer representation are intended to give the\n most meaningful interpretation of shift and mask operations\n involving negative integers and the least surprises when\n switching between the plain and long integer domains. Any\n operation, if it yields a result in the plain integer domain,\n will yield the same result in the long integer domain or when\n using mixed operands. The switch between domains is transparent\n to the programmer.\n\n ``numbers.Real`` (``float``)\n These represent machine-level double precision floating point\n numbers. You are at the mercy of the underlying machine\n architecture (and C or Java implementation) for the accepted\n range and handling of overflow. Python does not support single-\n precision floating point numbers; the savings in processor and\n memory usage that are usually the reason for using these is\n dwarfed by the overhead of using objects in Python, so there is\n no reason to complicate the language with two kinds of floating\n point numbers.\n\n ``numbers.Complex``\n These represent complex numbers as a pair of machine-level\n double precision floating point numbers. The same caveats apply\n as for floating point numbers. The real and imaginary parts of a\n complex number ``z`` can be retrieved through the read-only\n attributes ``z.real`` and ``z.imag``.\n\nSequences\n These represent finite ordered sets indexed by non-negative\n numbers. The built-in function ``len()`` returns the number of\n items of a sequence. When the length of a sequence is *n*, the\n index set contains the numbers 0, 1, ..., *n*-1. Item *i* of\n sequence *a* is selected by ``a[i]``.\n\n Sequences also support slicing: ``a[i:j]`` selects all items with\n index *k* such that *i* ``<=`` *k* ``<`` *j*. When used as an\n expression, a slice is a sequence of the same type. This implies\n that the index set is renumbered so that it starts at 0.\n\n Some sequences also support "extended slicing" with a third "step"\n parameter: ``a[i:j:k]`` selects all items of *a* with index *x*\n where ``x = i + n*k``, *n* ``>=`` ``0`` and *i* ``<=`` *x* ``<``\n *j*.\n\n Sequences are distinguished according to their mutability:\n\n Immutable sequences\n An object of an immutable sequence type cannot change once it is\n created. (If the object contains references to other objects,\n these other objects may be mutable and may be changed; however,\n the collection of objects directly referenced by an immutable\n object cannot change.)\n\n The following types are immutable sequences:\n\n Strings\n The items of a string are characters. There is no separate\n character type; a character is represented by a string of one\n item. Characters represent (at least) 8-bit bytes. The\n built-in functions ``chr()`` and ``ord()`` convert between\n characters and nonnegative integers representing the byte\n values. Bytes with the values 0-127 usually represent the\n corresponding ASCII values, but the interpretation of values\n is up to the program. The string data type is also used to\n represent arrays of bytes, e.g., to hold data read from a\n file.\n\n (On systems whose native character set is not ASCII, strings\n may use EBCDIC in their internal representation, provided the\n functions ``chr()`` and ``ord()`` implement a mapping between\n ASCII and EBCDIC, and string comparison preserves the ASCII\n order. Or perhaps someone can propose a better rule?)\n\n Unicode\n The items of a Unicode object are Unicode code units. A\n Unicode code unit is represented by a Unicode object of one\n item and can hold either a 16-bit or 32-bit value\n representing a Unicode ordinal (the maximum value for the\n ordinal is given in ``sys.maxunicode``, and depends on how\n Python is configured at compile time). Surrogate pairs may\n be present in the Unicode object, and will be reported as two\n separate items. The built-in functions ``unichr()`` and\n ``ord()`` convert between code units and nonnegative integers\n representing the Unicode ordinals as defined in the Unicode\n Standard 3.0. Conversion from and to other encodings are\n possible through the Unicode method ``encode()`` and the\n built-in function ``unicode()``.\n\n Tuples\n The items of a tuple are arbitrary Python objects. Tuples of\n two or more items are formed by comma-separated lists of\n expressions. A tuple of one item (a \'singleton\') can be\n formed by affixing a comma to an expression (an expression by\n itself does not create a tuple, since parentheses must be\n usable for grouping of expressions). An empty tuple can be\n formed by an empty pair of parentheses.\n\n Mutable sequences\n Mutable sequences can be changed after they are created. The\n subscription and slicing notations can be used as the target of\n assignment and ``del`` (delete) statements.\n\n There is currently a single intrinsic mutable sequence type:\n\n Lists\n The items of a list are arbitrary Python objects. Lists are\n formed by placing a comma-separated list of expressions in\n square brackets. (Note that there are no special cases needed\n to form lists of length 0 or 1.)\n\n The extension module ``array`` provides an additional example of\n a mutable sequence type.\n\nSet types\n These represent unordered, finite sets of unique, immutable\n objects. As such, they cannot be indexed by any subscript. However,\n they can be iterated over, and the built-in function ``len()``\n returns the number of items in a set. Common uses for sets are fast\n membership testing, removing duplicates from a sequence, and\n computing mathematical operations such as intersection, union,\n difference, and symmetric difference.\n\n For set elements, the same immutability rules apply as for\n dictionary keys. Note that numeric types obey the normal rules for\n numeric comparison: if two numbers compare equal (e.g., ``1`` and\n ``1.0``), only one of them can be contained in a set.\n\n There are currently two intrinsic set types:\n\n Sets\n These represent a mutable set. They are created by the built-in\n ``set()`` constructor and can be modified afterwards by several\n methods, such as ``add()``.\n\n Frozen sets\n These represent an immutable set. They are created by the\n built-in ``frozenset()`` constructor. As a frozenset is\n immutable and *hashable*, it can be used again as an element of\n another set, or as a dictionary key.\n\nMappings\n These represent finite sets of objects indexed by arbitrary index\n sets. The subscript notation ``a[k]`` selects the item indexed by\n ``k`` from the mapping ``a``; this can be used in expressions and\n as the target of assignments or ``del`` statements. The built-in\n function ``len()`` returns the number of items in a mapping.\n\n There is currently a single intrinsic mapping type:\n\n Dictionaries\n These represent finite sets of objects indexed by nearly\n arbitrary values. The only types of values not acceptable as\n keys are values containing lists or dictionaries or other\n mutable types that are compared by value rather than by object\n identity, the reason being that the efficient implementation of\n dictionaries requires a key\'s hash value to remain constant.\n Numeric types used for keys obey the normal rules for numeric\n comparison: if two numbers compare equal (e.g., ``1`` and\n ``1.0``) then they can be used interchangeably to index the same\n dictionary entry.\n\n Dictionaries are mutable; they can be created by the ``{...}``\n notation (see section *Dictionary displays*).\n\n The extension modules ``dbm``, ``gdbm``, and ``bsddb`` provide\n additional examples of mapping types.\n\nCallable types\n These are the types to which the function call operation (see\n section *Calls*) can be applied:\n\n User-defined functions\n A user-defined function object is created by a function\n definition (see section *Function definitions*). It should be\n called with an argument list containing the same number of items\n as the function\'s formal parameter list.\n\n Special attributes:\n\n +-------------------------+---------------------------------+-------------+\n | Attribute | Meaning | |\n +=========================+=================================+=============+\n | ``func_doc`` | The function\'s documentation | Writable |\n | | string, or ``None`` if | |\n | | unavailable | |\n +-------------------------+---------------------------------+-------------+\n | ``__doc__`` | Another way of spelling | Writable |\n | | ``func_doc`` | |\n +-------------------------+---------------------------------+-------------+\n | ``func_name`` | The function\'s name | Writable |\n +-------------------------+---------------------------------+-------------+\n | ``__name__`` | Another way of spelling | Writable |\n | | ``func_name`` | |\n +-------------------------+---------------------------------+-------------+\n | ``__module__`` | The name of the module the | Writable |\n | | function was defined in, or | |\n | | ``None`` if unavailable. | |\n +-------------------------+---------------------------------+-------------+\n | ``func_defaults`` | A tuple containing default | Writable |\n | | argument values for those | |\n | | arguments that have defaults, | |\n | | or ``None`` if no arguments | |\n | | have a default value | |\n +-------------------------+---------------------------------+-------------+\n | ``func_code`` | The code object representing | Writable |\n | | the compiled function body. | |\n +-------------------------+---------------------------------+-------------+\n | ``func_globals`` | A reference to the dictionary | Read-only |\n | | that holds the function\'s | |\n | | global variables --- the global | |\n | | namespace of the module in | |\n | | which the function was defined. | |\n +-------------------------+---------------------------------+-------------+\n | ``func_dict`` | The namespace supporting | Writable |\n | | arbitrary function attributes. | |\n +-------------------------+---------------------------------+-------------+\n | ``func_closure`` | ``None`` or a tuple of cells | Read-only |\n | | that contain bindings for the | |\n | | function\'s free variables. | |\n +-------------------------+---------------------------------+-------------+\n\n Most of the attributes labelled "Writable" check the type of the\n assigned value.\n\n Changed in version 2.4: ``func_name`` is now writable.\n\n Function objects also support getting and setting arbitrary\n attributes, which can be used, for example, to attach metadata\n to functions. Regular attribute dot-notation is used to get and\n set such attributes. *Note that the current implementation only\n supports function attributes on user-defined functions. Function\n attributes on built-in functions may be supported in the\n future.*\n\n Additional information about a function\'s definition can be\n retrieved from its code object; see the description of internal\n types below.\n\n User-defined methods\n A user-defined method object combines a class, a class instance\n (or ``None``) and any callable object (normally a user-defined\n function).\n\n Special read-only attributes: ``im_self`` is the class instance\n object, ``im_func`` is the function object; ``im_class`` is the\n class of ``im_self`` for bound methods or the class that asked\n for the method for unbound methods; ``__doc__`` is the method\'s\n documentation (same as ``im_func.__doc__``); ``__name__`` is the\n method name (same as ``im_func.__name__``); ``__module__`` is\n the name of the module the method was defined in, or ``None`` if\n unavailable.\n\n Changed in version 2.2: ``im_self`` used to refer to the class\n that defined the method.\n\n Changed in version 2.6: For 3.0 forward-compatibility,\n ``im_func`` is also available as ``__func__``, and ``im_self``\n as ``__self__``.\n\n Methods also support accessing (but not setting) the arbitrary\n function attributes on the underlying function object.\n\n User-defined method objects may be created when getting an\n attribute of a class (perhaps via an instance of that class), if\n that attribute is a user-defined function object, an unbound\n user-defined method object, or a class method object. When the\n attribute is a user-defined method object, a new method object\n is only created if the class from which it is being retrieved is\n the same as, or a derived class of, the class stored in the\n original method object; otherwise, the original method object is\n used as it is.\n\n When a user-defined method object is created by retrieving a\n user-defined function object from a class, its ``im_self``\n attribute is ``None`` and the method object is said to be\n unbound. When one is created by retrieving a user-defined\n function object from a class via one of its instances, its\n ``im_self`` attribute is the instance, and the method object is\n said to be bound. In either case, the new method\'s ``im_class``\n attribute is the class from which the retrieval takes place, and\n its ``im_func`` attribute is the original function object.\n\n When a user-defined method object is created by retrieving\n another method object from a class or instance, the behaviour is\n the same as for a function object, except that the ``im_func``\n attribute of the new instance is not the original method object\n but its ``im_func`` attribute.\n\n When a user-defined method object is created by retrieving a\n class method object from a class or instance, its ``im_self``\n attribute is the class itself (the same as the ``im_class``\n attribute), and its ``im_func`` attribute is the function object\n underlying the class method.\n\n When an unbound user-defined method object is called, the\n underlying function (``im_func``) is called, with the\n restriction that the first argument must be an instance of the\n proper class (``im_class``) or of a derived class thereof.\n\n When a bound user-defined method object is called, the\n underlying function (``im_func``) is called, inserting the class\n instance (``im_self``) in front of the argument list. For\n instance, when ``C`` is a class which contains a definition for\n a function ``f()``, and ``x`` is an instance of ``C``, calling\n ``x.f(1)`` is equivalent to calling ``C.f(x, 1)``.\n\n When a user-defined method object is derived from a class method\n object, the "class instance" stored in ``im_self`` will actually\n be the class itself, so that calling either ``x.f(1)`` or\n ``C.f(1)`` is equivalent to calling ``f(C,1)`` where ``f`` is\n the underlying function.\n\n Note that the transformation from function object to (unbound or\n bound) method object happens each time the attribute is\n retrieved from the class or instance. In some cases, a fruitful\n optimization is to assign the attribute to a local variable and\n call that local variable. Also notice that this transformation\n only happens for user-defined functions; other callable objects\n (and all non-callable objects) are retrieved without\n transformation. It is also important to note that user-defined\n functions which are attributes of a class instance are not\n converted to bound methods; this *only* happens when the\n function is an attribute of the class.\n\n Generator functions\n A function or method which uses the ``yield`` statement (see\n section *The yield statement*) is called a *generator function*.\n Such a function, when called, always returns an iterator object\n which can be used to execute the body of the function: calling\n the iterator\'s ``next()`` method will cause the function to\n execute until it provides a value using the ``yield`` statement.\n When the function executes a ``return`` statement or falls off\n the end, a ``StopIteration`` exception is raised and the\n iterator will have reached the end of the set of values to be\n returned.\n\n Built-in functions\n A built-in function object is a wrapper around a C function.\n Examples of built-in functions are ``len()`` and ``math.sin()``\n (``math`` is a standard built-in module). The number and type of\n the arguments are determined by the C function. Special read-\n only attributes: ``__doc__`` is the function\'s documentation\n string, or ``None`` if unavailable; ``__name__`` is the\n function\'s name; ``__self__`` is set to ``None`` (but see the\n next item); ``__module__`` is the name of the module the\n function was defined in or ``None`` if unavailable.\n\n Built-in methods\n This is really a different disguise of a built-in function, this\n time containing an object passed to the C function as an\n implicit extra argument. An example of a built-in method is\n ``alist.append()``, assuming *alist* is a list object. In this\n case, the special read-only attribute ``__self__`` is set to the\n object denoted by *list*.\n\n Class Types\n Class types, or "new-style classes," are callable. These\n objects normally act as factories for new instances of\n themselves, but variations are possible for class types that\n override ``__new__()``. The arguments of the call are passed to\n ``__new__()`` and, in the typical case, to ``__init__()`` to\n initialize the new instance.\n\n Classic Classes\n Class objects are described below. When a class object is\n called, a new class instance (also described below) is created\n and returned. This implies a call to the class\'s ``__init__()``\n method if it has one. Any arguments are passed on to the\n ``__init__()`` method. If there is no ``__init__()`` method,\n the class must be called without arguments.\n\n Class instances\n Class instances are described below. Class instances are\n callable only when the class has a ``__call__()`` method;\n ``x(arguments)`` is a shorthand for ``x.__call__(arguments)``.\n\nModules\n Modules are imported by the ``import`` statement (see section *The\n import statement*). A module object has a namespace implemented by\n a dictionary object (this is the dictionary referenced by the\n func_globals attribute of functions defined in the module).\n Attribute references are translated to lookups in this dictionary,\n e.g., ``m.x`` is equivalent to ``m.__dict__["x"]``. A module object\n does not contain the code object used to initialize the module\n (since it isn\'t needed once the initialization is done).\n\n Attribute assignment updates the module\'s namespace dictionary,\n e.g., ``m.x = 1`` is equivalent to ``m.__dict__["x"] = 1``.\n\n Special read-only attribute: ``__dict__`` is the module\'s namespace\n as a dictionary object.\n\n Predefined (writable) attributes: ``__name__`` is the module\'s\n name; ``__doc__`` is the module\'s documentation string, or ``None``\n if unavailable; ``__file__`` is the pathname of the file from which\n the module was loaded, if it was loaded from a file. The\n ``__file__`` attribute is not present for C modules that are\n statically linked into the interpreter; for extension modules\n loaded dynamically from a shared library, it is the pathname of the\n shared library file.\n\nClasses\n Both class types (new-style classes) and class objects (old-\n style/classic classes) are typically created by class definitions\n (see section *Class definitions*). A class has a namespace\n implemented by a dictionary object. Class attribute references are\n translated to lookups in this dictionary, e.g., ``C.x`` is\n translated to ``C.__dict__["x"]`` (although for new-style classes\n in particular there are a number of hooks which allow for other\n means of locating attributes). When the attribute name is not found\n there, the attribute search continues in the base classes. For\n old-style classes, the search is depth-first, left-to-right in the\n order of occurrence in the base class list. New-style classes use\n the more complex C3 method resolution order which behaves correctly\n even in the presence of \'diamond\' inheritance structures where\n there are multiple inheritance paths leading back to a common\n ancestor. Additional details on the C3 MRO used by new-style\n classes can be found in the documentation accompanying the 2.3\n release at http://www.python.org/download/releases/2.3/mro/.\n\n When a class attribute reference (for class ``C``, say) would yield\n a user-defined function object or an unbound user-defined method\n object whose associated class is either ``C`` or one of its base\n classes, it is transformed into an unbound user-defined method\n object whose ``im_class`` attribute is ``C``. When it would yield a\n class method object, it is transformed into a bound user-defined\n method object whose ``im_class`` and ``im_self`` attributes are\n both ``C``. When it would yield a static method object, it is\n transformed into the object wrapped by the static method object.\n See section *Implementing Descriptors* for another way in which\n attributes retrieved from a class may differ from those actually\n contained in its ``__dict__`` (note that only new-style classes\n support descriptors).\n\n Class attribute assignments update the class\'s dictionary, never\n the dictionary of a base class.\n\n A class object can be called (see above) to yield a class instance\n (see below).\n\n Special attributes: ``__name__`` is the class name; ``__module__``\n is the module name in which the class was defined; ``__dict__`` is\n the dictionary containing the class\'s namespace; ``__bases__`` is a\n tuple (possibly empty or a singleton) containing the base classes,\n in the order of their occurrence in the base class list;\n ``__doc__`` is the class\'s documentation string, or None if\n undefined.\n\nClass instances\n A class instance is created by calling a class object (see above).\n A class instance has a namespace implemented as a dictionary which\n is the first place in which attribute references are searched.\n When an attribute is not found there, and the instance\'s class has\n an attribute by that name, the search continues with the class\n attributes. If a class attribute is found that is a user-defined\n function object or an unbound user-defined method object whose\n associated class is the class (call it ``C``) of the instance for\n which the attribute reference was initiated or one of its bases, it\n is transformed into a bound user-defined method object whose\n ``im_class`` attribute is ``C`` and whose ``im_self`` attribute is\n the instance. Static method and class method objects are also\n transformed, as if they had been retrieved from class ``C``; see\n above under "Classes". See section *Implementing Descriptors* for\n another way in which attributes of a class retrieved via its\n instances may differ from the objects actually stored in the\n class\'s ``__dict__``. If no class attribute is found, and the\n object\'s class has a ``__getattr__()`` method, that is called to\n satisfy the lookup.\n\n Attribute assignments and deletions update the instance\'s\n dictionary, never a class\'s dictionary. If the class has a\n ``__setattr__()`` or ``__delattr__()`` method, this is called\n instead of updating the instance dictionary directly.\n\n Class instances can pretend to be numbers, sequences, or mappings\n if they have methods with certain special names. See section\n *Special method names*.\n\n Special attributes: ``__dict__`` is the attribute dictionary;\n ``__class__`` is the instance\'s class.\n\nFiles\n A file object represents an open file. File objects are created by\n the ``open()`` built-in function, and also by ``os.popen()``,\n ``os.fdopen()``, and the ``makefile()`` method of socket objects\n (and perhaps by other functions or methods provided by extension\n modules). The objects ``sys.stdin``, ``sys.stdout`` and\n ``sys.stderr`` are initialized to file objects corresponding to the\n interpreter\'s standard input, output and error streams. See *File\n Objects* for complete documentation of file objects.\n\nInternal types\n A few types used internally by the interpreter are exposed to the\n user. Their definitions may change with future versions of the\n interpreter, but they are mentioned here for completeness.\n\n Code objects\n Code objects represent *byte-compiled* executable Python code,\n or *bytecode*. The difference between a code object and a\n function object is that the function object contains an explicit\n reference to the function\'s globals (the module in which it was\n defined), while a code object contains no context; also the\n default argument values are stored in the function object, not\n in the code object (because they represent values calculated at\n run-time). Unlike function objects, code objects are immutable\n and contain no references (directly or indirectly) to mutable\n objects.\n\n Special read-only attributes: ``co_name`` gives the function\n name; ``co_argcount`` is the number of positional arguments\n (including arguments with default values); ``co_nlocals`` is the\n number of local variables used by the function (including\n arguments); ``co_varnames`` is a tuple containing the names of\n the local variables (starting with the argument names);\n ``co_cellvars`` is a tuple containing the names of local\n variables that are referenced by nested functions;\n ``co_freevars`` is a tuple containing the names of free\n variables; ``co_code`` is a string representing the sequence of\n bytecode instructions; ``co_consts`` is a tuple containing the\n literals used by the bytecode; ``co_names`` is a tuple\n containing the names used by the bytecode; ``co_filename`` is\n the filename from which the code was compiled;\n ``co_firstlineno`` is the first line number of the function;\n ``co_lnotab`` is a string encoding the mapping from bytecode\n offsets to line numbers (for details see the source code of the\n interpreter); ``co_stacksize`` is the required stack size\n (including local variables); ``co_flags`` is an integer encoding\n a number of flags for the interpreter.\n\n The following flag bits are defined for ``co_flags``: bit\n ``0x04`` is set if the function uses the ``*arguments`` syntax\n to accept an arbitrary number of positional arguments; bit\n ``0x08`` is set if the function uses the ``**keywords`` syntax\n to accept arbitrary keyword arguments; bit ``0x20`` is set if\n the function is a generator.\n\n Future feature declarations (``from __future__ import\n division``) also use bits in ``co_flags`` to indicate whether a\n code object was compiled with a particular feature enabled: bit\n ``0x2000`` is set if the function was compiled with future\n division enabled; bits ``0x10`` and ``0x1000`` were used in\n earlier versions of Python.\n\n Other bits in ``co_flags`` are reserved for internal use.\n\n If a code object represents a function, the first item in\n ``co_consts`` is the documentation string of the function, or\n ``None`` if undefined.\n\n Frame objects\n Frame objects represent execution frames. They may occur in\n traceback objects (see below).\n\n Special read-only attributes: ``f_back`` is to the previous\n stack frame (towards the caller), or ``None`` if this is the\n bottom stack frame; ``f_code`` is the code object being executed\n in this frame; ``f_locals`` is the dictionary used to look up\n local variables; ``f_globals`` is used for global variables;\n ``f_builtins`` is used for built-in (intrinsic) names;\n ``f_restricted`` is a flag indicating whether the function is\n executing in restricted execution mode; ``f_lasti`` gives the\n precise instruction (this is an index into the bytecode string\n of the code object).\n\n Special writable attributes: ``f_trace``, if not ``None``, is a\n function called at the start of each source code line (this is\n used by the debugger); ``f_exc_type``, ``f_exc_value``,\n ``f_exc_traceback`` represent the last exception raised in the\n parent frame provided another exception was ever raised in the\n current frame (in all other cases they are None); ``f_lineno``\n is the current line number of the frame --- writing to this from\n within a trace function jumps to the given line (only for the\n bottom-most frame). A debugger can implement a Jump command\n (aka Set Next Statement) by writing to f_lineno.\n\n Traceback objects\n Traceback objects represent a stack trace of an exception. A\n traceback object is created when an exception occurs. When the\n search for an exception handler unwinds the execution stack, at\n each unwound level a traceback object is inserted in front of\n the current traceback. When an exception handler is entered,\n the stack trace is made available to the program. (See section\n *The try statement*.) It is accessible as ``sys.exc_traceback``,\n and also as the third item of the tuple returned by\n ``sys.exc_info()``. The latter is the preferred interface,\n since it works correctly when the program is using multiple\n threads. When the program contains no suitable handler, the\n stack trace is written (nicely formatted) to the standard error\n stream; if the interpreter is interactive, it is also made\n available to the user as ``sys.last_traceback``.\n\n Special read-only attributes: ``tb_next`` is the next level in\n the stack trace (towards the frame where the exception\n occurred), or ``None`` if there is no next level; ``tb_frame``\n points to the execution frame of the current level;\n ``tb_lineno`` gives the line number where the exception\n occurred; ``tb_lasti`` indicates the precise instruction. The\n line number and last instruction in the traceback may differ\n from the line number of its frame object if the exception\n occurred in a ``try`` statement with no matching except clause\n or with a finally clause.\n\n Slice objects\n Slice objects are used to represent slices when *extended slice\n syntax* is used. This is a slice using two colons, or multiple\n slices or ellipses separated by commas, e.g., ``a[i:j:step]``,\n ``a[i:j, k:l]``, or ``a[..., i:j]``. They are also created by\n the built-in ``slice()`` function.\n\n Special read-only attributes: ``start`` is the lower bound;\n ``stop`` is the upper bound; ``step`` is the step value; each is\n ``None`` if omitted. These attributes can have any type.\n\n Slice objects support one method:\n\n slice.indices(self, length)\n\n This method takes a single integer argument *length* and\n computes information about the extended slice that the slice\n object would describe if applied to a sequence of *length*\n items. It returns a tuple of three integers; respectively\n these are the *start* and *stop* indices and the *step* or\n stride length of the slice. Missing or out-of-bounds indices\n are handled in a manner consistent with regular slices.\n\n New in version 2.3.\n\n Static method objects\n Static method objects provide a way of defeating the\n transformation of function objects to method objects described\n above. A static method object is a wrapper around any other\n object, usually a user-defined method object. When a static\n method object is retrieved from a class or a class instance, the\n object actually returned is the wrapped object, which is not\n subject to any further transformation. Static method objects are\n not themselves callable, although the objects they wrap usually\n are. Static method objects are created by the built-in\n ``staticmethod()`` constructor.\n\n Class method objects\n A class method object, like a static method object, is a wrapper\n around another object that alters the way in which that object\n is retrieved from classes and class instances. The behaviour of\n class method objects upon such retrieval is described above,\n under "User-defined methods". Class method objects are created\n by the built-in ``classmethod()`` constructor.\n', 'typesfunctions': u'\nFunctions\n*********\n\nFunction objects are created by function definitions. The only\noperation on a function object is to call it: ``func(argument-list)``.\n\nThere are really two flavors of function objects: built-in functions\nand user-defined functions. Both support the same operation (to call\nthe function), but the implementation is different, hence the\ndifferent object types.\n\nSee *Function definitions* for more information.\n', 'typesmapping': u'\nMapping Types --- ``dict``\n**************************\n\nA *mapping* object maps *hashable* values to arbitrary objects.\nMappings are mutable objects. There is currently only one standard\nmapping type, the *dictionary*. (For other containers see the built\nin ``list``, ``set``, and ``tuple`` classes, and the ``collections``\nmodule.)\n\nA dictionary\'s keys are *almost* arbitrary values. Values that are\nnot *hashable*, that is, values containing lists, dictionaries or\nother mutable types (that are compared by value rather than by object\nidentity) may not be used as keys. Numeric types used for keys obey\nthe normal rules for numeric comparison: if two numbers compare equal\n(such as ``1`` and ``1.0``) then they can be used interchangeably to\nindex the same dictionary entry. (Note however, that since computers\nstore floating-point numbers as approximations it is usually unwise to\nuse them as dictionary keys.)\n\nDictionaries can be created by placing a comma-separated list of\n``key: value`` pairs within braces, for example: ``{\'jack\': 4098,\n\'sjoerd\': 4127}`` or ``{4098: \'jack\', 4127: \'sjoerd\'}``, or by the\n``dict`` constructor.\n\nclass dict([arg])\n\n Return a new dictionary initialized from an optional positional\n argument or from a set of keyword arguments. If no arguments are\n given, return a new empty dictionary. If the positional argument\n *arg* is a mapping object, return a dictionary mapping the same\n keys to the same values as does the mapping object. Otherwise the\n positional argument must be a sequence, a container that supports\n iteration, or an iterator object. The elements of the argument\n must each also be of one of those kinds, and each must in turn\n contain exactly two objects. The first is used as a key in the new\n dictionary, and the second as the key\'s value. If a given key is\n seen more than once, the last value associated with it is retained\n in the new dictionary.\n\n If keyword arguments are given, the keywords themselves with their\n associated values are added as items to the dictionary. If a key is\n specified both in the positional argument and as a keyword\n argument, the value associated with the keyword is retained in the\n dictionary. For example, these all return a dictionary equal to\n ``{"one": 2, "two": 3}``:\n\n * ``dict(one=2, two=3)``\n\n * ``dict({\'one\': 2, \'two\': 3})``\n\n * ``dict(zip((\'one\', \'two\'), (2, 3)))``\n\n * ``dict([[\'two\', 3], [\'one\', 2]])``\n\n The first example only works for keys that are valid Python\n identifiers; the others work with any valid keys.\n\n New in version 2.2.\n\n Changed in version 2.3: Support for building a dictionary from\n keyword arguments added.\n\n These are the operations that dictionaries support (and therefore,\n custom mapping types should support too):\n\n len(d)\n\n Return the number of items in the dictionary *d*.\n\n d[key]\n\n Return the item of *d* with key *key*. Raises a ``KeyError`` if\n *key* is not in the map.\n\n New in version 2.5: If a subclass of dict defines a method\n ``__missing__()``, if the key *key* is not present, the\n ``d[key]`` operation calls that method with the key *key* as\n argument. The ``d[key]`` operation then returns or raises\n whatever is returned or raised by the ``__missing__(key)`` call\n if the key is not present. No other operations or methods invoke\n ``__missing__()``. If ``__missing__()`` is not defined,\n ``KeyError`` is raised. ``__missing__()`` must be a method; it\n cannot be an instance variable. For an example, see\n ``collections.defaultdict``.\n\n d[key] = value\n\n Set ``d[key]`` to *value*.\n\n del d[key]\n\n Remove ``d[key]`` from *d*. Raises a ``KeyError`` if *key* is\n not in the map.\n\n key in d\n\n Return ``True`` if *d* has a key *key*, else ``False``.\n\n New in version 2.2.\n\n key not in d\n\n Equivalent to ``not key in d``.\n\n New in version 2.2.\n\n iter(d)\n\n Return an iterator over the keys of the dictionary. This is a\n shortcut for ``iterkeys()``.\n\n clear()\n\n Remove all items from the dictionary.\n\n copy()\n\n Return a shallow copy of the dictionary.\n\n fromkeys(seq[, value])\n\n Create a new dictionary with keys from *seq* and values set to\n *value*.\n\n ``fromkeys()`` is a class method that returns a new dictionary.\n *value* defaults to ``None``.\n\n New in version 2.3.\n\n get(key[, default])\n\n Return the value for *key* if *key* is in the dictionary, else\n *default*. If *default* is not given, it defaults to ``None``,\n so that this method never raises a ``KeyError``.\n\n has_key(key)\n\n Test for the presence of *key* in the dictionary. ``has_key()``\n is deprecated in favor of ``key in d``.\n\n items()\n\n Return a copy of the dictionary\'s list of ``(key, value)``\n pairs.\n\n Note: Keys and values are listed in an arbitrary order which is non-\n random, varies across Python implementations, and depends on\n the dictionary\'s history of insertions and deletions. If\n ``items()``, ``keys()``, ``values()``, ``iteritems()``,\n ``iterkeys()``, and ``itervalues()`` are called with no\n intervening modifications to the dictionary, the lists will\n directly correspond. This allows the creation of ``(value,\n key)`` pairs using ``zip()``: ``pairs = zip(d.values(),\n d.keys())``. The same relationship holds for the\n ``iterkeys()`` and ``itervalues()`` methods: ``pairs =\n zip(d.itervalues(), d.iterkeys())`` provides the same value\n for ``pairs``. Another way to create the same list is ``pairs\n = [(v, k) for (k, v) in d.iteritems()]``.\n\n iteritems()\n\n Return an iterator over the dictionary\'s ``(key, value)`` pairs.\n See the note for ``dict.items()``.\n\n Using ``iteritems()`` while adding or deleting entries in the\n dictionary will raise a ``RuntimeError``.\n\n New in version 2.2.\n\n iterkeys()\n\n Return an iterator over the dictionary\'s keys. See the note for\n ``dict.items()``.\n\n Using ``iterkeys()`` while adding or deleting entries in the\n dictionary will raise a ``RuntimeError``.\n\n New in version 2.2.\n\n itervalues()\n\n Return an iterator over the dictionary\'s values. See the note\n for ``dict.items()``.\n\n Using ``itervalues()`` while adding or deleting entries in the\n dictionary will raise a ``RuntimeError``.\n\n New in version 2.2.\n\n keys()\n\n Return a copy of the dictionary\'s list of keys. See the note\n for ``dict.items()``.\n\n pop(key[, default])\n\n If *key* is in the dictionary, remove it and return its value,\n else return *default*. If *default* is not given and *key* is\n not in the dictionary, a ``KeyError`` is raised.\n\n New in version 2.3.\n\n popitem()\n\n Remove and return an arbitrary ``(key, value)`` pair from the\n dictionary.\n\n ``popitem()`` is useful to destructively iterate over a\n dictionary, as often used in set algorithms. If the dictionary\n is empty, calling ``popitem()`` raises a ``KeyError``.\n\n setdefault(key[, default])\n\n If *key* is in the dictionary, return its value. If not, insert\n *key* with a value of *default* and return *default*. *default*\n defaults to ``None``.\n\n update([other])\n\n Update the dictionary with the key/value pairs from *other*,\n overwriting existing keys. Return ``None``.\n\n ``update()`` accepts either another dictionary object or an\n iterable of key/value pairs (as a tuple or other iterable of\n length two). If keyword arguments are specified, the dictionary\n is then is updated with those key/value pairs: ``d.update(red=1,\n blue=2)``.\n\n Changed in version 2.4: Allowed the argument to be an iterable\n of key/value pairs and allowed keyword arguments.\n\n values()\n\n Return a copy of the dictionary\'s list of values. See the note\n for ``dict.items()``.\n', 'typesmethods': u"\nMethods\n*******\n\nMethods are functions that are called using the attribute notation.\nThere are two flavors: built-in methods (such as ``append()`` on\nlists) and class instance methods. Built-in methods are described\nwith the types that support them.\n\nThe implementation adds two special read-only attributes to class\ninstance methods: ``m.im_self`` is the object on which the method\noperates, and ``m.im_func`` is the function implementing the method.\nCalling ``m(arg-1, arg-2, ..., arg-n)`` is completely equivalent to\ncalling ``m.im_func(m.im_self, arg-1, arg-2, ..., arg-n)``.\n\nClass instance methods are either *bound* or *unbound*, referring to\nwhether the method was accessed through an instance or a class,\nrespectively. When a method is unbound, its ``im_self`` attribute\nwill be ``None`` and if called, an explicit ``self`` object must be\npassed as the first argument. In this case, ``self`` must be an\ninstance of the unbound method's class (or a subclass of that class),\notherwise a ``TypeError`` is raised.\n\nLike function objects, methods objects support getting arbitrary\nattributes. However, since method attributes are actually stored on\nthe underlying function object (``meth.im_func``), setting method\nattributes on either bound or unbound methods is disallowed.\nAttempting to set a method attribute results in a ``TypeError`` being\nraised. In order to set a method attribute, you need to explicitly\nset it on the underlying function object:\n\n class C:\n def method(self):\n pass\n\n c = C()\n c.method.im_func.whoami = 'my name is c'\n\nSee *The standard type hierarchy* for more information.\n", 'typesmodules': u"\nModules\n*******\n\nThe only special operation on a module is attribute access:\n``m.name``, where *m* is a module and *name* accesses a name defined\nin *m*'s symbol table. Module attributes can be assigned to. (Note\nthat the ``import`` statement is not, strictly speaking, an operation\non a module object; ``import foo`` does not require a module object\nnamed *foo* to exist, rather it requires an (external) *definition*\nfor a module named *foo* somewhere.)\n\nA special member of every module is ``__dict__``. This is the\ndictionary containing the module's symbol table. Modifying this\ndictionary will actually change the module's symbol table, but direct\nassignment to the ``__dict__`` attribute is not possible (you can\nwrite ``m.__dict__['a'] = 1``, which defines ``m.a`` to be ``1``, but\nyou can't write ``m.__dict__ = {}``). Modifying ``__dict__`` directly\nis not recommended.\n\nModules built into the interpreter are written like this: ``<module\n'sys' (built-in)>``. If loaded from a file, they are written as\n``<module 'os' from '/usr/local/lib/pythonX.Y/os.pyc'>``.\n", 'typesseq': u'\nSequence Types --- ``str``, ``unicode``, ``list``, ``tuple``, ``buffer``, ``xrange``\n************************************************************************************\n\nThere are six sequence types: strings, Unicode strings, lists, tuples,\nbuffers, and xrange objects.\n\nFor other containers see the built in ``dict`` and ``set`` classes,\nand the ``collections`` module.\n\nString literals are written in single or double quotes: ``\'xyzzy\'``,\n``"frobozz"``. See *String literals* for more about string literals.\nUnicode strings are much like strings, but are specified in the syntax\nusing a preceding ``\'u\'`` character: ``u\'abc\'``, ``u"def"``. In\naddition to the functionality described here, there are also string-\nspecific methods described in the *String Methods* section. Lists are\nconstructed with square brackets, separating items with commas: ``[a,\nb, c]``. Tuples are constructed by the comma operator (not within\nsquare brackets), with or without enclosing parentheses, but an empty\ntuple must have the enclosing parentheses, such as ``a, b, c`` or\n``()``. A single item tuple must have a trailing comma, such as\n``(d,)``.\n\nBuffer objects are not directly supported by Python syntax, but can be\ncreated by calling the builtin function ``buffer()``. They don\'t\nsupport concatenation or repetition.\n\nObjects of type xrange are similar to buffers in that there is no\nspecific syntax to create them, but they are created using the\n``xrange()`` function. They don\'t support slicing, concatenation or\nrepetition, and using ``in``, ``not in``, ``min()`` or ``max()`` on\nthem is inefficient.\n\nMost sequence types support the following operations. The ``in`` and\n``not in`` operations have the same priorities as the comparison\noperations. The ``+`` and ``*`` operations have the same priority as\nthe corresponding numeric operations. [3] Additional methods are\nprovided for *Mutable Sequence Types*.\n\nThis table lists the sequence operations sorted in ascending priority\n(operations in the same box have the same priority). In the table,\n*s* and *t* are sequences of the same type; *n*, *i* and *j* are\nintegers:\n\n+--------------------+----------------------------------+------------+\n| Operation | Result | Notes |\n+====================+==================================+============+\n| ``x in s`` | ``True`` if an item of *s* is | (1) |\n| | equal to *x*, else ``False`` | |\n+--------------------+----------------------------------+------------+\n| ``x not in s`` | ``False`` if an item of *s* is | (1) |\n| | equal to *x*, else ``True`` | |\n+--------------------+----------------------------------+------------+\n| ``s + t`` | the concatenation of *s* and *t* | (6) |\n+--------------------+----------------------------------+------------+\n| ``s * n, n * s`` | *n* shallow copies of *s* | (2) |\n| | concatenated | |\n+--------------------+----------------------------------+------------+\n| ``s[i]`` | *i*\'th item of *s*, origin 0 | (3) |\n+--------------------+----------------------------------+------------+\n| ``s[i:j]`` | slice of *s* from *i* to *j* | (3)(4) |\n+--------------------+----------------------------------+------------+\n| ``s[i:j:k]`` | slice of *s* from *i* to *j* | (3)(5) |\n| | with step *k* | |\n+--------------------+----------------------------------+------------+\n| ``len(s)`` | length of *s* | |\n+--------------------+----------------------------------+------------+\n| ``min(s)`` | smallest item of *s* | |\n+--------------------+----------------------------------+------------+\n| ``max(s)`` | largest item of *s* | |\n+--------------------+----------------------------------+------------+\n\nSequence types also support comparisons. In particular, tuples and\nlists are compared lexicographically by comparing corresponding\nelements. This means that to compare equal, every element must compare\nequal and the two sequences must be of the same type and have the same\nlength. (For full details see *Comparisons* in the language\nreference.)\n\nNotes:\n\n1. When *s* is a string or Unicode string object the ``in`` and ``not\n in`` operations act like a substring test. In Python versions\n before 2.3, *x* had to be a string of length 1. In Python 2.3 and\n beyond, *x* may be a string of any length.\n\n2. Values of *n* less than ``0`` are treated as ``0`` (which yields an\n empty sequence of the same type as *s*). Note also that the copies\n are shallow; nested structures are not copied. This often haunts\n new Python programmers; consider:\n\n >>> lists = [[]] * 3\n >>> lists\n [[], [], []]\n >>> lists[0].append(3)\n >>> lists\n [[3], [3], [3]]\n\n What has happened is that ``[[]]`` is a one-element list containing\n an empty list, so all three elements of ``[[]] * 3`` are (pointers\n to) this single empty list. Modifying any of the elements of\n ``lists`` modifies this single list. You can create a list of\n different lists this way:\n\n >>> lists = [[] for i in range(3)]\n >>> lists[0].append(3)\n >>> lists[1].append(5)\n >>> lists[2].append(7)\n >>> lists\n [[3], [5], [7]]\n\n3. If *i* or *j* is negative, the index is relative to the end of the\n string: ``len(s) + i`` or ``len(s) + j`` is substituted. But note\n that ``-0`` is still ``0``.\n\n4. The slice of *s* from *i* to *j* is defined as the sequence of\n items with index *k* such that ``i <= k < j``. If *i* or *j* is\n greater than ``len(s)``, use ``len(s)``. If *i* is omitted or\n ``None``, use ``0``. If *j* is omitted or ``None``, use\n ``len(s)``. If *i* is greater than or equal to *j*, the slice is\n empty.\n\n5. The slice of *s* from *i* to *j* with step *k* is defined as the\n sequence of items with index ``x = i + n*k`` such that ``0 <= n <\n (j-i)/k``. In other words, the indices are ``i``, ``i+k``,\n ``i+2*k``, ``i+3*k`` and so on, stopping when *j* is reached (but\n never including *j*). If *i* or *j* is greater than ``len(s)``,\n use ``len(s)``. If *i* or *j* are omitted or ``None``, they become\n "end" values (which end depends on the sign of *k*). Note, *k*\n cannot be zero. If *k* is ``None``, it is treated like ``1``.\n\n6. If *s* and *t* are both strings, some Python implementations such\n as CPython can usually perform an in-place optimization for\n assignments of the form ``s=s+t`` or ``s+=t``. When applicable,\n this optimization makes quadratic run-time much less likely. This\n optimization is both version and implementation dependent. For\n performance sensitive code, it is preferable to use the\n ``str.join()`` method which assures consistent linear concatenation\n performance across versions and implementations.\n\n Changed in version 2.4: Formerly, string concatenation never\n occurred in-place.\n\n\nString Methods\n==============\n\nBelow are listed the string methods which both 8-bit strings and\nUnicode objects support. Note that none of these methods take keyword\narguments.\n\nIn addition, Python\'s strings support the sequence type methods\ndescribed in the *Sequence Types --- str, unicode, list, tuple,\nbuffer, xrange* section. To output formatted strings use template\nstrings or the ``%`` operator described in the *String Formatting\nOperations* section. Also, see the ``re`` module for string functions\nbased on regular expressions.\n\nstr.capitalize()\n\n Return a copy of the string with only its first character\n capitalized.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.center(width[, fillchar])\n\n Return centered in a string of length *width*. Padding is done\n using the specified *fillchar* (default is a space).\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.count(sub[, start[, end]])\n\n Return the number of non-overlapping occurrences of substring *sub*\n in the range [*start*, *end*]. Optional arguments *start* and\n *end* are interpreted as in slice notation.\n\nstr.decode([encoding[, errors]])\n\n Decodes the string using the codec registered for *encoding*.\n *encoding* defaults to the default string encoding. *errors* may\n be given to set a different error handling scheme. The default is\n ``\'strict\'``, meaning that encoding errors raise ``UnicodeError``.\n Other possible values are ``\'ignore\'``, ``\'replace\'`` and any other\n name registered via ``codecs.register_error()``, see section *Codec\n Base Classes*.\n\n New in version 2.2.\n\n Changed in version 2.3: Support for other error handling schemes\n added.\n\nstr.encode([encoding[, errors]])\n\n Return an encoded version of the string. Default encoding is the\n current default string encoding. *errors* may be given to set a\n different error handling scheme. The default for *errors* is\n ``\'strict\'``, meaning that encoding errors raise a\n ``UnicodeError``. Other possible values are ``\'ignore\'``,\n ``\'replace\'``, ``\'xmlcharrefreplace\'``, ``\'backslashreplace\'`` and\n any other name registered via ``codecs.register_error()``, see\n section *Codec Base Classes*. For a list of possible encodings, see\n section *Standard Encodings*.\n\n New in version 2.0.\n\n Changed in version 2.3: Support for ``\'xmlcharrefreplace\'`` and\n ``\'backslashreplace\'`` and other error handling schemes added.\n\nstr.endswith(suffix[, start[, end]])\n\n Return ``True`` if the string ends with the specified *suffix*,\n otherwise return ``False``. *suffix* can also be a tuple of\n suffixes to look for. With optional *start*, test beginning at\n that position. With optional *end*, stop comparing at that\n position.\n\n Changed in version 2.5: Accept tuples as *suffix*.\n\nstr.expandtabs([tabsize])\n\n Return a copy of the string where all tab characters are replaced\n by one or more spaces, depending on the current column and the\n given tab size. The column number is reset to zero after each\n newline occurring in the string. If *tabsize* is not given, a tab\n size of ``8`` characters is assumed. This doesn\'t understand other\n non-printing characters or escape sequences.\n\nstr.find(sub[, start[, end]])\n\n Return the lowest index in the string where substring *sub* is\n found, such that *sub* is contained in the range [*start*, *end*].\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return ``-1`` if *sub* is not found.\n\nstr.format(format_string, *args, **kwargs)\n\n Perform a string formatting operation. The *format_string*\n argument can contain literal text or replacement fields delimited\n by braces ``{}``. Each replacement field contains either the\n numeric index of a positional argument, or the name of a keyword\n argument. Returns a copy of *format_string* where each replacement\n field is replaced with the string value of the corresponding\n argument.\n\n >>> "The sum of 1 + 2 is {0}".format(1+2)\n \'The sum of 1 + 2 is 3\'\n\n See *Format String Syntax* for a description of the various\n formatting options that can be specified in format strings.\n\n This method of string formatting is the new standard in Python 3.0,\n and should be preferred to the ``%`` formatting described in\n *String Formatting Operations* in new code.\n\n New in version 2.6.\n\nstr.index(sub[, start[, end]])\n\n Like ``find()``, but raise ``ValueError`` when the substring is not\n found.\n\nstr.isalnum()\n\n Return true if all characters in the string are alphanumeric and\n there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isalpha()\n\n Return true if all characters in the string are alphabetic and\n there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isdigit()\n\n Return true if all characters in the string are digits and there is\n at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.islower()\n\n Return true if all cased characters in the string are lowercase and\n there is at least one cased character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isspace()\n\n Return true if there are only whitespace characters in the string\n and there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.istitle()\n\n Return true if the string is a titlecased string and there is at\n least one character, for example uppercase characters may only\n follow uncased characters and lowercase characters only cased ones.\n Return false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isupper()\n\n Return true if all cased characters in the string are uppercase and\n there is at least one cased character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.join(seq)\n\n Return a string which is the concatenation of the strings in the\n sequence *seq*. The separator between elements is the string\n providing this method.\n\nstr.ljust(width[, fillchar])\n\n Return the string left justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than\n ``len(s)``.\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.lower()\n\n Return a copy of the string converted to lowercase.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.lstrip([chars])\n\n Return a copy of the string with leading characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or ``None``, the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a prefix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.lstrip()\n \'spacious \'\n >>> \'www.example.com\'.lstrip(\'cmowz.\')\n \'example.com\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.partition(sep)\n\n Split the string at the first occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing the string itself, followed by\n two empty strings.\n\n New in version 2.5.\n\nstr.replace(old, new[, count])\n\n Return a copy of the string with all occurrences of substring *old*\n replaced by *new*. If the optional argument *count* is given, only\n the first *count* occurrences are replaced.\n\nstr.rfind(sub[, start[, end]])\n\n Return the highest index in the string where substring *sub* is\n found, such that *sub* is contained within s[start,end]. Optional\n arguments *start* and *end* are interpreted as in slice notation.\n Return ``-1`` on failure.\n\nstr.rindex(sub[, start[, end]])\n\n Like ``rfind()`` but raises ``ValueError`` when the substring *sub*\n is not found.\n\nstr.rjust(width[, fillchar])\n\n Return the string right justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than\n ``len(s)``.\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.rpartition(sep)\n\n Split the string at the last occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing two empty strings, followed by\n the string itself.\n\n New in version 2.5.\n\nstr.rsplit([sep[, maxsplit]])\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit* splits\n are done, the *rightmost* ones. If *sep* is not specified or\n ``None``, any whitespace string is a separator. Except for\n splitting from the right, ``rsplit()`` behaves like ``split()``\n which is described in detail below.\n\n New in version 2.4.\n\nstr.rstrip([chars])\n\n Return a copy of the string with trailing characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or ``None``, the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a suffix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.rstrip()\n \' spacious\'\n >>> \'mississippi\'.rstrip(\'ipz\')\n \'mississ\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.split([sep[, maxsplit]])\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit*\n splits are done (thus, the list will have at most ``maxsplit+1``\n elements). If *maxsplit* is not specified, then there is no limit\n on the number of splits (all possible splits are made).\n\n If *sep* is given, consecutive delimiters are not grouped together\n and are deemed to delimit empty strings (for example,\n ``\'1,,2\'.split(\',\')`` returns ``[\'1\', \'\', \'2\']``). The *sep*\n argument may consist of multiple characters (for example,\n ``\'1<>2<>3\'.split(\'<>\')`` returns ``[\'1\', \'2\', \'3\']``). Splitting\n an empty string with a specified separator returns ``[\'\']``.\n\n If *sep* is not specified or is ``None``, a different splitting\n algorithm is applied: runs of consecutive whitespace are regarded\n as a single separator, and the result will contain no empty strings\n at the start or end if the string has leading or trailing\n whitespace. Consequently, splitting an empty string or a string\n consisting of just whitespace with a ``None`` separator returns\n ``[]``.\n\n For example, ``\' 1 2 3 \'.split()`` returns ``[\'1\', \'2\', \'3\']``,\n and ``\' 1 2 3 \'.split(None, 1)`` returns ``[\'1\', \'2 3 \']``.\n\nstr.splitlines([keepends])\n\n Return a list of the lines in the string, breaking at line\n boundaries. Line breaks are not included in the resulting list\n unless *keepends* is given and true.\n\nstr.startswith(prefix[, start[, end]])\n\n Return ``True`` if string starts with the *prefix*, otherwise\n return ``False``. *prefix* can also be a tuple of prefixes to look\n for. With optional *start*, test string beginning at that\n position. With optional *end*, stop comparing string at that\n position.\n\n Changed in version 2.5: Accept tuples as *prefix*.\n\nstr.strip([chars])\n\n Return a copy of the string with the leading and trailing\n characters removed. The *chars* argument is a string specifying the\n set of characters to be removed. If omitted or ``None``, the\n *chars* argument defaults to removing whitespace. The *chars*\n argument is not a prefix or suffix; rather, all combinations of its\n values are stripped:\n\n >>> \' spacious \'.strip()\n \'spacious\'\n >>> \'www.example.com\'.strip(\'cmowz.\')\n \'example\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.swapcase()\n\n Return a copy of the string with uppercase characters converted to\n lowercase and vice versa.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.title()\n\n Return a titlecased version of the string: words start with\n uppercase characters, all remaining cased characters are lowercase.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.translate(table[, deletechars])\n\n Return a copy of the string where all characters occurring in the\n optional argument *deletechars* are removed, and the remaining\n characters have been mapped through the given translation table,\n which must be a string of length 256.\n\n You can use the ``maketrans()`` helper function in the ``string``\n module to create a translation table. For string objects, set the\n *table* argument to ``None`` for translations that only delete\n characters:\n\n >>> \'read this short text\'.translate(None, \'aeiou\')\n \'rd ths shrt txt\'\n\n New in version 2.6: Support for a ``None`` *table* argument.\n\n For Unicode objects, the ``translate()`` method does not accept the\n optional *deletechars* argument. Instead, it returns a copy of the\n *s* where all characters have been mapped through the given\n translation table which must be a mapping of Unicode ordinals to\n Unicode ordinals, Unicode strings or ``None``. Unmapped characters\n are left untouched. Characters mapped to ``None`` are deleted.\n Note, a more flexible approach is to create a custom character\n mapping codec using the ``codecs`` module (see ``encodings.cp1251``\n for an example).\n\nstr.upper()\n\n Return a copy of the string converted to uppercase.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.zfill(width)\n\n Return the numeric string left filled with zeros in a string of\n length *width*. A sign prefix is handled correctly. The original\n string is returned if *width* is less than ``len(s)``.\n\n New in version 2.2.2.\n\nThe following methods are present only on unicode objects:\n\nunicode.isnumeric()\n\n Return ``True`` if there are only numeric characters in S,\n ``False`` otherwise. Numeric characters include digit characters,\n and all characters that have the Unicode numeric value property,\n e.g. U+2155, VULGAR FRACTION ONE FIFTH.\n\nunicode.isdecimal()\n\n Return ``True`` if there are only decimal characters in S,\n ``False`` otherwise. Decimal characters include digit characters,\n and all characters that that can be used to form decimal-radix\n numbers, e.g. U+0660, ARABIC-INDIC DIGIT ZERO.\n\n\nString Formatting Operations\n============================\n\nString and Unicode objects have one unique built-in operation: the\n``%`` operator (modulo). This is also known as the string\n*formatting* or *interpolation* operator. Given ``format % values``\n(where *format* is a string or Unicode object), ``%`` conversion\nspecifications in *format* are replaced with zero or more elements of\n*values*. The effect is similar to the using ``sprintf`` in the C\nlanguage. If *format* is a Unicode object, or if any of the objects\nbeing converted using the ``%s`` conversion are Unicode objects, the\nresult will also be a Unicode object.\n\nIf *format* requires a single argument, *values* may be a single non-\ntuple object. [4] Otherwise, *values* must be a tuple with exactly\nthe number of items specified by the format string, or a single\nmapping object (for example, a dictionary).\n\nA conversion specifier contains two or more characters and has the\nfollowing components, which must occur in this order:\n\n1. The ``\'%\'`` character, which marks the start of the specifier.\n\n2. Mapping key (optional), consisting of a parenthesised sequence of\n characters (for example, ``(somename)``).\n\n3. Conversion flags (optional), which affect the result of some\n conversion types.\n\n4. Minimum field width (optional). If specified as an ``\'*\'``\n (asterisk), the actual width is read from the next element of the\n tuple in *values*, and the object to convert comes after the\n minimum field width and optional precision.\n\n5. Precision (optional), given as a ``\'.\'`` (dot) followed by the\n precision. If specified as ``\'*\'`` (an asterisk), the actual width\n is read from the next element of the tuple in *values*, and the\n value to convert comes after the precision.\n\n6. Length modifier (optional).\n\n7. Conversion type.\n\nWhen the right argument is a dictionary (or other mapping type), then\nthe formats in the string *must* include a parenthesised mapping key\ninto that dictionary inserted immediately after the ``\'%\'`` character.\nThe mapping key selects the value to be formatted from the mapping.\nFor example:\n\n>>> print \'%(language)s has %(#)03d quote types.\' % \\\n... {\'language\': "Python", "#": 2}\nPython has 002 quote types.\n\nIn this case no ``*`` specifiers may occur in a format (since they\nrequire a sequential parameter list).\n\nThe conversion flag characters are:\n\n+-----------+-----------------------------------------------------------------------+\n| Flag | Meaning |\n+===========+=======================================================================+\n| ``\'#\'`` | The value conversion will use the "alternate form" (where defined |\n| | below). |\n+-----------+-----------------------------------------------------------------------+\n| ``\'0\'`` | The conversion will be zero padded for numeric values. |\n+-----------+-----------------------------------------------------------------------+\n| ``\'-\'`` | The converted value is left adjusted (overrides the ``\'0\'`` |\n| | conversion if both are given). |\n+-----------+-----------------------------------------------------------------------+\n| ``\' \'`` | (a space) A blank should be left before a positive number (or empty |\n| | string) produced by a signed conversion. |\n+-----------+-----------------------------------------------------------------------+\n| ``\'+\'`` | A sign character (``\'+\'`` or ``\'-\'``) will precede the conversion |\n| | (overrides a "space" flag). |\n+-----------+-----------------------------------------------------------------------+\n\nA length modifier (``h``, ``l``, or ``L``) may be present, but is\nignored as it is not necessary for Python -- so e.g. ``%ld`` is\nidentical to ``%d``.\n\nThe conversion types are:\n\n+--------------+-------------------------------------------------------+---------+\n| Conversion | Meaning | Notes |\n+==============+=======================================================+=========+\n| ``\'d\'`` | Signed integer decimal. | |\n+--------------+-------------------------------------------------------+---------+\n| ``\'i\'`` | Signed integer decimal. | |\n+--------------+-------------------------------------------------------+---------+\n| ``\'o\'`` | Signed octal value. | (1) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'u\'`` | Obsolete type -- it is identical to ``\'d\'``. | (7) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'x\'`` | Signed hexadecimal (lowercase). | (2) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'X\'`` | Signed hexadecimal (uppercase). | (2) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'e\'`` | Floating point exponential format (lowercase). | (3) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'E\'`` | Floating point exponential format (uppercase). | (3) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'f\'`` | Floating point decimal format. | (3) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'F\'`` | Floating point decimal format. | (3) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'g\'`` | Floating point format. Uses lowercase exponential | (4) |\n| | format if exponent is less than -4 or not less than | |\n| | precision, decimal format otherwise. | |\n+--------------+-------------------------------------------------------+---------+\n| ``\'G\'`` | Floating point format. Uses uppercase exponential | (4) |\n| | format if exponent is less than -4 or not less than | |\n| | precision, decimal format otherwise. | |\n+--------------+-------------------------------------------------------+---------+\n| ``\'c\'`` | Single character (accepts integer or single character | |\n| | string). | |\n+--------------+-------------------------------------------------------+---------+\n| ``\'r\'`` | String (converts any python object using ``repr()``). | (5) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'s\'`` | String (converts any python object using ``str()``). | (6) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'%\'`` | No argument is converted, results in a ``\'%\'`` | |\n| | character in the result. | |\n+--------------+-------------------------------------------------------+---------+\n\nNotes:\n\n1. The alternate form causes a leading zero (``\'0\'``) to be inserted\n between left-hand padding and the formatting of the number if the\n leading character of the result is not already a zero.\n\n2. The alternate form causes a leading ``\'0x\'`` or ``\'0X\'`` (depending\n on whether the ``\'x\'`` or ``\'X\'`` format was used) to be inserted\n between left-hand padding and the formatting of the number if the\n leading character of the result is not already a zero.\n\n3. The alternate form causes the result to always contain a decimal\n point, even if no digits follow it.\n\n The precision determines the number of digits after the decimal\n point and defaults to 6.\n\n4. The alternate form causes the result to always contain a decimal\n point, and trailing zeroes are not removed as they would otherwise\n be.\n\n The precision determines the number of significant digits before\n and after the decimal point and defaults to 6.\n\n5. The ``%r`` conversion was added in Python 2.0.\n\n The precision determines the maximal number of characters used.\n\n6. If the object or format provided is a ``unicode`` string, the\n resulting string will also be ``unicode``.\n\n The precision determines the maximal number of characters used.\n\n7. See **PEP 237**.\n\nSince Python strings have an explicit length, ``%s`` conversions do\nnot assume that ``\'\\0\'`` is the end of the string.\n\nFor safety reasons, floating point precisions are clipped to 50;\n``%f`` conversions for numbers whose absolute value is over 1e50 are\nreplaced by ``%g`` conversions. [5] All other errors raise\nexceptions.\n\nAdditional string operations are defined in standard modules\n``string`` and ``re``.\n\n\nXRange Type\n===========\n\nThe ``xrange`` type is an immutable sequence which is commonly used\nfor looping. The advantage of the ``xrange`` type is that an\n``xrange`` object will always take the same amount of memory, no\nmatter the size of the range it represents. There are no consistent\nperformance advantages.\n\nXRange objects have very little behavior: they only support indexing,\niteration, and the ``len()`` function.\n\n\nMutable Sequence Types\n======================\n\nList objects support additional operations that allow in-place\nmodification of the object. Other mutable sequence types (when added\nto the language) should also support these operations. Strings and\ntuples are immutable sequence types: such objects cannot be modified\nonce created. The following operations are defined on mutable sequence\ntypes (where *x* is an arbitrary object):\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation | Result | Notes |\n+================================+==================================+=======================+\n| ``s[i] = x`` | item *i* of *s* is replaced by | |\n| | *x* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s[i:j] = t`` | slice of *s* from *i* to *j* is | |\n| | replaced by the contents of the | |\n| | iterable *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``del s[i:j]`` | same as ``s[i:j] = []`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s[i:j:k] = t`` | the elements of ``s[i:j:k]`` are | (1) |\n| | replaced by those of *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``del s[i:j:k]`` | removes the elements of | |\n| | ``s[i:j:k]`` from the list | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.append(x)`` | same as ``s[len(s):len(s)] = | (2) |\n| | [x]`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.extend(x)`` | same as ``s[len(s):len(s)] = x`` | (3) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.count(x)`` | return number of *i*\'s for which | |\n| | ``s[i] == x`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.index(x[, i[, j]])`` | return smallest *k* such that | (4) |\n| | ``s[k] == x`` and ``i <= k < j`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.insert(i, x)`` | same as ``s[i:i] = [x]`` | (5) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.pop([i])`` | same as ``x = s[i]; del s[i]; | (6) |\n| | return x`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.remove(x)`` | same as ``del s[s.index(x)]`` | (4) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.reverse()`` | reverses the items of *s* in | (7) |\n| | place | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.sort([cmp[, key[, | sort the items of *s* in place | (7)(8)(9)(10) |\n| reverse]]])`` | | |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. The C implementation of Python has historically accepted multiple\n parameters and implicitly joined them into a tuple; this no longer\n works in Python 2.0. Use of this misfeature has been deprecated\n since Python 1.4.\n\n3. *x* can be any iterable object.\n\n4. Raises ``ValueError`` when *x* is not found in *s*. When a negative\n index is passed as the second or third parameter to the ``index()``\n method, the list length is added, as for slice indices. If it is\n still negative, it is truncated to zero, as for slice indices.\n\n Changed in version 2.3: Previously, ``index()`` didn\'t have\n arguments for specifying start and stop positions.\n\n5. When a negative index is passed as the first parameter to the\n ``insert()`` method, the list length is added, as for slice\n indices. If it is still negative, it is truncated to zero, as for\n slice indices.\n\n Changed in version 2.3: Previously, all negative indices were\n truncated to zero.\n\n6. The ``pop()`` method is only supported by the list and array types.\n The optional argument *i* defaults to ``-1``, so that by default\n the last item is removed and returned.\n\n7. The ``sort()`` and ``reverse()`` methods modify the list in place\n for economy of space when sorting or reversing a large list. To\n remind you that they operate by side effect, they don\'t return the\n sorted or reversed list.\n\n8. The ``sort()`` method takes optional arguments for controlling the\n comparisons.\n\n *cmp* specifies a custom comparison function of two arguments (list\n items) which should return a negative, zero or positive number\n depending on whether the first argument is considered smaller than,\n equal to, or larger than the second argument: ``cmp=lambda x,y:\n cmp(x.lower(), y.lower())``. The default value is ``None``.\n\n *key* specifies a function of one argument that is used to extract\n a comparison key from each list element: ``key=str.lower``. The\n default value is ``None``.\n\n *reverse* is a boolean value. If set to ``True``, then the list\n elements are sorted as if each comparison were reversed.\n\n In general, the *key* and *reverse* conversion processes are much\n faster than specifying an equivalent *cmp* function. This is\n because *cmp* is called multiple times for each list element while\n *key* and *reverse* touch each element only once.\n\n Changed in version 2.3: Support for ``None`` as an equivalent to\n omitting *cmp* was added.\n\n Changed in version 2.4: Support for *key* and *reverse* was added.\n\n9. Starting with Python 2.3, the ``sort()`` method is guaranteed to be\n stable. A sort is stable if it guarantees not to change the\n relative order of elements that compare equal --- this is helpful\n for sorting in multiple passes (for example, sort by department,\n then by salary grade).\n\n10. While a list is being sorted, the effect of attempting to mutate,\n or even inspect, the list is undefined. The C implementation of\n Python 2.3 and newer makes the list appear empty for the duration,\n and raises ``ValueError`` if it can detect that the list has been\n mutated during a sort.\n', 'typesseq-mutable': u"\nMutable Sequence Types\n**********************\n\nList objects support additional operations that allow in-place\nmodification of the object. Other mutable sequence types (when added\nto the language) should also support these operations. Strings and\ntuples are immutable sequence types: such objects cannot be modified\nonce created. The following operations are defined on mutable sequence\ntypes (where *x* is an arbitrary object):\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation | Result | Notes |\n+================================+==================================+=======================+\n| ``s[i] = x`` | item *i* of *s* is replaced by | |\n| | *x* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s[i:j] = t`` | slice of *s* from *i* to *j* is | |\n| | replaced by the contents of the | |\n| | iterable *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``del s[i:j]`` | same as ``s[i:j] = []`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s[i:j:k] = t`` | the elements of ``s[i:j:k]`` are | (1) |\n| | replaced by those of *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``del s[i:j:k]`` | removes the elements of | |\n| | ``s[i:j:k]`` from the list | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.append(x)`` | same as ``s[len(s):len(s)] = | (2) |\n| | [x]`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.extend(x)`` | same as ``s[len(s):len(s)] = x`` | (3) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.count(x)`` | return number of *i*'s for which | |\n| | ``s[i] == x`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.index(x[, i[, j]])`` | return smallest *k* such that | (4) |\n| | ``s[k] == x`` and ``i <= k < j`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.insert(i, x)`` | same as ``s[i:i] = [x]`` | (5) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.pop([i])`` | same as ``x = s[i]; del s[i]; | (6) |\n| | return x`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.remove(x)`` | same as ``del s[s.index(x)]`` | (4) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.reverse()`` | reverses the items of *s* in | (7) |\n| | place | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.sort([cmp[, key[, | sort the items of *s* in place | (7)(8)(9)(10) |\n| reverse]]])`` | | |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. The C implementation of Python has historically accepted multiple\n parameters and implicitly joined them into a tuple; this no longer\n works in Python 2.0. Use of this misfeature has been deprecated\n since Python 1.4.\n\n3. *x* can be any iterable object.\n\n4. Raises ``ValueError`` when *x* is not found in *s*. When a negative\n index is passed as the second or third parameter to the ``index()``\n method, the list length is added, as for slice indices. If it is\n still negative, it is truncated to zero, as for slice indices.\n\n Changed in version 2.3: Previously, ``index()`` didn't have\n arguments for specifying start and stop positions.\n\n5. When a negative index is passed as the first parameter to the\n ``insert()`` method, the list length is added, as for slice\n indices. If it is still negative, it is truncated to zero, as for\n slice indices.\n\n Changed in version 2.3: Previously, all negative indices were\n truncated to zero.\n\n6. The ``pop()`` method is only supported by the list and array types.\n The optional argument *i* defaults to ``-1``, so that by default\n the last item is removed and returned.\n\n7. The ``sort()`` and ``reverse()`` methods modify the list in place\n for economy of space when sorting or reversing a large list. To\n remind you that they operate by side effect, they don't return the\n sorted or reversed list.\n\n8. The ``sort()`` method takes optional arguments for controlling the\n comparisons.\n\n *cmp* specifies a custom comparison function of two arguments (list\n items) which should return a negative, zero or positive number\n depending on whether the first argument is considered smaller than,\n equal to, or larger than the second argument: ``cmp=lambda x,y:\n cmp(x.lower(), y.lower())``. The default value is ``None``.\n\n *key* specifies a function of one argument that is used to extract\n a comparison key from each list element: ``key=str.lower``. The\n default value is ``None``.\n\n *reverse* is a boolean value. If set to ``True``, then the list\n elements are sorted as if each comparison were reversed.\n\n In general, the *key* and *reverse* conversion processes are much\n faster than specifying an equivalent *cmp* function. This is\n because *cmp* is called multiple times for each list element while\n *key* and *reverse* touch each element only once.\n\n Changed in version 2.3: Support for ``None`` as an equivalent to\n omitting *cmp* was added.\n\n Changed in version 2.4: Support for *key* and *reverse* was added.\n\n9. Starting with Python 2.3, the ``sort()`` method is guaranteed to be\n stable. A sort is stable if it guarantees not to change the\n relative order of elements that compare equal --- this is helpful\n for sorting in multiple passes (for example, sort by department,\n then by salary grade).\n\n10. While a list is being sorted, the effect of attempting to mutate,\n or even inspect, the list is undefined. The C implementation of\n Python 2.3 and newer makes the list appear empty for the duration,\n and raises ``ValueError`` if it can detect that the list has been\n mutated during a sort.\n", 'unary': u'\nUnary arithmetic and bitwise operations\n***************************************\n\nAll unary arithmetic and bitwise operations have the same priority:\n\n u_expr ::= power | "-" u_expr | "+" u_expr | "~" u_expr\n\nThe unary ``-`` (minus) operator yields the negation of its numeric\nargument.\n\nThe unary ``+`` (plus) operator yields its numeric argument unchanged.\n\nThe unary ``~`` (invert) operator yields the bitwise inversion of its\nplain or long integer argument. The bitwise inversion of ``x`` is\ndefined as ``-(x+1)``. It only applies to integral numbers.\n\nIn all three cases, if the argument does not have the proper type, a\n``TypeError`` exception is raised.\n', 'while': u'\nThe ``while`` statement\n***********************\n\nThe ``while`` statement is used for repeated execution as long as an\nexpression is true:\n\n while_stmt ::= "while" expression ":" suite\n ["else" ":" suite]\n\nThis repeatedly tests the expression and, if it is true, executes the\nfirst suite; if the expression is false (which may be the first time\nit is tested) the suite of the ``else`` clause, if present, is\nexecuted and the loop terminates.\n\nA ``break`` statement executed in the first suite terminates the loop\nwithout executing the ``else`` clause\'s suite. A ``continue``\nstatement executed in the first suite skips the rest of the suite and\ngoes back to testing the expression.\n', 'with': u'\nThe ``with`` statement\n**********************\n\nNew in version 2.5.\n\nThe ``with`` statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section *With Statement\nContext Managers*). This allows common\n``try``...``except``...``finally`` usage patterns to be encapsulated\nfor convenient reuse.\n\n with_stmt ::= "with" expression ["as" target] ":" suite\n\nThe execution of the ``with`` statement proceeds as follows:\n\n1. The context expression is evaluated to obtain a context manager.\n\n2. The context manager\'s ``__enter__()`` method is invoked.\n\n3. If a target was included in the ``with`` statement, the return\n value from ``__enter__()`` is assigned to it.\n\n Note: The ``with`` statement guarantees that if the ``__enter__()``\n method returns without an error, then ``__exit__()`` will always\n be called. Thus, if an error occurs during the assignment to the\n target list, it will be treated the same as an error occurring\n within the suite would be. See step 5 below.\n\n4. The suite is executed.\n\n5. The context manager\'s ``__exit__()`` method is invoked. If an\n exception caused the suite to be exited, its type, value, and\n traceback are passed as arguments to ``__exit__()``. Otherwise,\n three ``None`` arguments are supplied.\n\n If the suite was exited due to an exception, and the return value\n from the ``__exit__()`` method was false, the exception is\n reraised. If the return value was true, the exception is\n suppressed, and execution continues with the statement following\n the ``with`` statement.\n\n If the suite was exited for any reason other than an exception, the\n return value from ``__exit__()`` is ignored, and execution proceeds\n at the normal location for the kind of exit that was taken.\n\nNote: In Python 2.5, the ``with`` statement is only allowed when the\n ``with_statement`` feature has been enabled. It is always enabled\n in Python 2.6.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python\n ``with`` statement.\n', 'yield': u'\nThe ``yield`` statement\n***********************\n\n yield_stmt ::= yield_expression\n\nThe ``yield`` statement is only used when defining a generator\nfunction, and is only used in the body of the generator function.\nUsing a ``yield`` statement in a function definition is sufficient to\ncause that definition to create a generator function instead of a\nnormal function.\n\nWhen a generator function is called, it returns an iterator known as a\ngenerator iterator, or more commonly, a generator. The body of the\ngenerator function is executed by calling the generator\'s ``next()``\nmethod repeatedly until it raises an exception.\n\nWhen a ``yield`` statement is executed, the state of the generator is\nfrozen and the value of **expression_list** is returned to\n``next()``\'s caller. By "frozen" we mean that all local state is\nretained, including the current bindings of local variables, the\ninstruction pointer, and the internal evaluation stack: enough\ninformation is saved so that the next time ``next()`` is invoked, the\nfunction can proceed exactly as if the ``yield`` statement were just\nanother external call.\n\nAs of Python version 2.5, the ``yield`` statement is now allowed in\nthe ``try`` clause of a ``try`` ... ``finally`` construct. If the\ngenerator is not resumed before it is finalized (by reaching a zero\nreference count or by being garbage collected), the generator-\niterator\'s ``close()`` method will be called, allowing any pending\n``finally`` clauses to execute.\n\nNote: In Python 2.2, the ``yield`` statement was only allowed when the\n ``generators`` feature has been enabled. This ``__future__`` import\n statement was used to enable the feature:\n\n from __future__ import generators\n\nSee also:\n\n **PEP 0255** - Simple Generators\n The proposal for adding generators and the ``yield`` statement\n to Python.\n\n **PEP 0342** - Coroutines via Enhanced Generators\n The proposal that, among other generator enhancements, proposed\n allowing ``yield`` to appear inside a ``try`` ... ``finally``\n block.\n'}
lgpl-2.1
rs2/pandas
pandas/io/sql.py
1
62655
""" Collection of query wrappers / abstractions to both facilitate data retrieval and to reduce dependency on DB-specific API. """ from contextlib import contextmanager from datetime import date, datetime, time from functools import partial import re from typing import Iterator, Optional, Union, overload import warnings import numpy as np import pandas._libs.lib as lib from pandas.core.dtypes.common import is_datetime64tz_dtype, is_dict_like, is_list_like from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.missing import isna from pandas.core.api import DataFrame, Series from pandas.core.base import PandasObject from pandas.core.tools.datetimes import to_datetime class SQLAlchemyRequired(ImportError): pass class DatabaseError(IOError): pass # ----------------------------------------------------------------------------- # -- Helper functions _SQLALCHEMY_INSTALLED = None def _is_sqlalchemy_connectable(con): global _SQLALCHEMY_INSTALLED if _SQLALCHEMY_INSTALLED is None: try: import sqlalchemy _SQLALCHEMY_INSTALLED = True except ImportError: _SQLALCHEMY_INSTALLED = False if _SQLALCHEMY_INSTALLED: import sqlalchemy # noqa: F811 return isinstance(con, sqlalchemy.engine.Connectable) else: return False def _convert_params(sql, params): """Convert SQL and params args to DBAPI2.0 compliant format.""" args = [sql] if params is not None: if hasattr(params, "keys"): # test if params is a mapping args += [params] else: args += [list(params)] return args def _process_parse_dates_argument(parse_dates): """Process parse_dates argument for read_sql functions""" # handle non-list entries for parse_dates gracefully if parse_dates is True or parse_dates is None or parse_dates is False: parse_dates = [] elif not hasattr(parse_dates, "__iter__"): parse_dates = [parse_dates] return parse_dates def _handle_date_column(col, utc=None, format=None): if isinstance(format, dict): return to_datetime(col, errors="ignore", **format) else: # Allow passing of formatting string for integers # GH17855 if format is None and ( issubclass(col.dtype.type, np.floating) or issubclass(col.dtype.type, np.integer) ): format = "s" if format in ["D", "d", "h", "m", "s", "ms", "us", "ns"]: return to_datetime(col, errors="coerce", unit=format, utc=utc) elif is_datetime64tz_dtype(col.dtype): # coerce to UTC timezone # GH11216 return to_datetime(col, utc=True) else: return to_datetime(col, errors="coerce", format=format, utc=utc) def _parse_date_columns(data_frame, parse_dates): """ Force non-datetime columns to be read as such. Supports both string formatted and integer timestamp columns. """ parse_dates = _process_parse_dates_argument(parse_dates) # we want to coerce datetime64_tz dtypes for now to UTC # we could in theory do a 'nice' conversion from a FixedOffset tz # GH11216 for col_name, df_col in data_frame.items(): if is_datetime64tz_dtype(df_col.dtype) or col_name in parse_dates: try: fmt = parse_dates[col_name] except TypeError: fmt = None data_frame[col_name] = _handle_date_column(df_col, format=fmt) return data_frame def _wrap_result(data, columns, index_col=None, coerce_float=True, parse_dates=None): """Wrap result set of query in a DataFrame.""" frame = DataFrame.from_records(data, columns=columns, coerce_float=coerce_float) frame = _parse_date_columns(frame, parse_dates) if index_col is not None: frame.set_index(index_col, inplace=True) return frame def execute(sql, con, cur=None, params=None): """ Execute the given SQL query using the provided connection object. Parameters ---------- sql : string SQL query to be executed. con : SQLAlchemy connectable(engine/connection) or sqlite3 connection Using SQLAlchemy makes it possible to use any DB supported by the library. If a DBAPI2 object, only sqlite3 is supported. cur : deprecated, cursor is obtained from connection, default: None params : list or tuple, optional, default: None List of parameters to pass to execute method. Returns ------- Results Iterable """ if cur is None: pandas_sql = pandasSQL_builder(con) else: pandas_sql = pandasSQL_builder(cur, is_cursor=True) args = _convert_params(sql, params) return pandas_sql.execute(*args) # ----------------------------------------------------------------------------- # -- Read and write to DataFrames @overload def read_sql_table( table_name, con, schema=None, index_col=None, coerce_float=True, parse_dates=None, columns=None, chunksize: None = None, ) -> DataFrame: ... @overload def read_sql_table( table_name, con, schema=None, index_col=None, coerce_float=True, parse_dates=None, columns=None, chunksize: int = 1, ) -> Iterator[DataFrame]: ... def read_sql_table( table_name, con, schema=None, index_col=None, coerce_float=True, parse_dates=None, columns=None, chunksize: Optional[int] = None, ) -> Union[DataFrame, Iterator[DataFrame]]: """ Read SQL database table into a DataFrame. Given a table name and a SQLAlchemy connectable, returns a DataFrame. This function does not support DBAPI connections. Parameters ---------- table_name : str Name of SQL table in database. con : SQLAlchemy connectable or str A database URI could be provided as as str. SQLite DBAPI connection mode not supported. schema : str, default None Name of SQL schema in database to query (if database flavor supports this). Uses default schema if None (default). index_col : str or list of str, optional, default: None Column(s) to set as index(MultiIndex). coerce_float : bool, default True Attempts to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point. Can result in loss of Precision. parse_dates : list or dict, default None - List of column names to parse as dates. - Dict of ``{column_name: format string}`` where format string is strftime compatible in case of parsing string times or is one of (D, s, ns, ms, us) in case of parsing integer timestamps. - Dict of ``{column_name: arg dict}``, where the arg dict corresponds to the keyword arguments of :func:`pandas.to_datetime` Especially useful with databases without native Datetime support, such as SQLite. columns : list, default None List of column names to select from SQL table. chunksize : int, default None If specified, returns an iterator where `chunksize` is the number of rows to include in each chunk. Returns ------- DataFrame or Iterator[DataFrame] A SQL table is returned as two-dimensional data structure with labeled axes. See Also -------- read_sql_query : Read SQL query into a DataFrame. read_sql : Read SQL query or database table into a DataFrame. Notes ----- Any datetime values with time zone information will be converted to UTC. Examples -------- >>> pd.read_sql_table('table_name', 'postgres:///db_name') # doctest:+SKIP """ con = _engine_builder(con) if not _is_sqlalchemy_connectable(con): raise NotImplementedError( "read_sql_table only supported for SQLAlchemy connectable." ) import sqlalchemy from sqlalchemy.schema import MetaData meta = MetaData(con, schema=schema) try: meta.reflect(only=[table_name], views=True) except sqlalchemy.exc.InvalidRequestError as err: raise ValueError(f"Table {table_name} not found") from err pandas_sql = SQLDatabase(con, meta=meta) table = pandas_sql.read_table( table_name, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, columns=columns, chunksize=chunksize, ) if table is not None: return table else: raise ValueError(f"Table {table_name} not found", con) @overload def read_sql_query( sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None, chunksize: None = None, ) -> DataFrame: ... @overload def read_sql_query( sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None, chunksize: int = 1, ) -> Iterator[DataFrame]: ... def read_sql_query( sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None, chunksize: Optional[int] = None, ) -> Union[DataFrame, Iterator[DataFrame]]: """ Read SQL query into a DataFrame. Returns a DataFrame corresponding to the result set of the query string. Optionally provide an `index_col` parameter to use one of the columns as the index, otherwise default integer index will be used. Parameters ---------- sql : str SQL query or SQLAlchemy Selectable (select or text object) SQL query to be executed. con : SQLAlchemy connectable, str, or sqlite3 connection Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. index_col : str or list of str, optional, default: None Column(s) to set as index(MultiIndex). coerce_float : bool, default True Attempts to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point. Useful for SQL result sets. params : list, tuple or dict, optional, default: None List of parameters to pass to execute method. The syntax used to pass parameters is database driver dependent. Check your database driver documentation for which of the five syntax styles, described in PEP 249's paramstyle, is supported. Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}. parse_dates : list or dict, default: None - List of column names to parse as dates. - Dict of ``{column_name: format string}`` where format string is strftime compatible in case of parsing string times, or is one of (D, s, ns, ms, us) in case of parsing integer timestamps. - Dict of ``{column_name: arg dict}``, where the arg dict corresponds to the keyword arguments of :func:`pandas.to_datetime` Especially useful with databases without native Datetime support, such as SQLite. chunksize : int, default None If specified, return an iterator where `chunksize` is the number of rows to include in each chunk. Returns ------- DataFrame or Iterator[DataFrame] See Also -------- read_sql_table : Read SQL database table into a DataFrame. read_sql : Read SQL query or database table into a DataFrame. Notes ----- Any datetime values with time zone information parsed via the `parse_dates` parameter will be converted to UTC. """ pandas_sql = pandasSQL_builder(con) return pandas_sql.read_query( sql, index_col=index_col, params=params, coerce_float=coerce_float, parse_dates=parse_dates, chunksize=chunksize, ) @overload def read_sql( sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None, columns=None, chunksize: None = None, ) -> DataFrame: ... @overload def read_sql( sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None, columns=None, chunksize: int = 1, ) -> Iterator[DataFrame]: ... def read_sql( sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None, columns=None, chunksize: Optional[int] = None, ) -> Union[DataFrame, Iterator[DataFrame]]: """ Read SQL query or database table into a DataFrame. This function is a convenience wrapper around ``read_sql_table`` and ``read_sql_query`` (for backward compatibility). It will delegate to the specific function depending on the provided input. A SQL query will be routed to ``read_sql_query``, while a database table name will be routed to ``read_sql_table``. Note that the delegated function might have more specific notes about their functionality not listed here. Parameters ---------- sql : str or SQLAlchemy Selectable (select or text object) SQL query to be executed or a table name. con : SQLAlchemy connectable, str, or sqlite3 connection Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. The user is responsible for engine disposal and connection closure for the SQLAlchemy connectable; str connections are closed automatically. See `here <https://docs.sqlalchemy.org/en/13/core/connections.html>`_. index_col : str or list of str, optional, default: None Column(s) to set as index(MultiIndex). coerce_float : bool, default True Attempts to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets. params : list, tuple or dict, optional, default: None List of parameters to pass to execute method. The syntax used to pass parameters is database driver dependent. Check your database driver documentation for which of the five syntax styles, described in PEP 249's paramstyle, is supported. Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}. parse_dates : list or dict, default: None - List of column names to parse as dates. - Dict of ``{column_name: format string}`` where format string is strftime compatible in case of parsing string times, or is one of (D, s, ns, ms, us) in case of parsing integer timestamps. - Dict of ``{column_name: arg dict}``, where the arg dict corresponds to the keyword arguments of :func:`pandas.to_datetime` Especially useful with databases without native Datetime support, such as SQLite. columns : list, default: None List of column names to select from SQL table (only used when reading a table). chunksize : int, default None If specified, return an iterator where `chunksize` is the number of rows to include in each chunk. Returns ------- DataFrame or Iterator[DataFrame] See Also -------- read_sql_table : Read SQL database table into a DataFrame. read_sql_query : Read SQL query into a DataFrame. """ pandas_sql = pandasSQL_builder(con) if isinstance(pandas_sql, SQLiteDatabase): return pandas_sql.read_query( sql, index_col=index_col, params=params, coerce_float=coerce_float, parse_dates=parse_dates, chunksize=chunksize, ) try: _is_table_name = pandas_sql.has_table(sql) except Exception: # using generic exception to catch errors from sql drivers (GH24988) _is_table_name = False if _is_table_name: pandas_sql.meta.reflect(only=[sql]) return pandas_sql.read_table( sql, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, columns=columns, chunksize=chunksize, ) else: return pandas_sql.read_query( sql, index_col=index_col, params=params, coerce_float=coerce_float, parse_dates=parse_dates, chunksize=chunksize, ) def to_sql( frame, name, con, schema=None, if_exists="fail", index=True, index_label=None, chunksize=None, dtype=None, method=None, ) -> None: """ Write records stored in a DataFrame to a SQL database. Parameters ---------- frame : DataFrame, Series name : str Name of SQL table. con : SQLAlchemy connectable(engine/connection) or database string URI or sqlite3 DBAPI2 connection Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. schema : str, optional Name of SQL schema in database to write to (if database flavor supports this). If None, use default schema (default). if_exists : {'fail', 'replace', 'append'}, default 'fail' - fail: If table exists, do nothing. - replace: If table exists, drop it, recreate it, and insert data. - append: If table exists, insert data. Create if does not exist. index : boolean, default True Write DataFrame index as a column. index_label : str or sequence, optional Column label for index column(s). If None is given (default) and `index` is True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. chunksize : int, optional Specify the number of rows in each batch to be written at a time. By default, all rows will be written at once. dtype : dict or scalar, optional Specifying the datatype for columns. If a dictionary is used, the keys should be the column names and the values should be the SQLAlchemy types or strings for the sqlite3 fallback mode. If a scalar is provided, it will be applied to all columns. method : {None, 'multi', callable}, optional Controls the SQL insertion clause used: - None : Uses standard SQL ``INSERT`` clause (one per row). - 'multi': Pass multiple values in a single ``INSERT`` clause. - callable with signature ``(pd_table, conn, keys, data_iter)``. Details and a sample callable implementation can be found in the section :ref:`insert method <io.sql.method>`. .. versionadded:: 0.24.0 """ if if_exists not in ("fail", "replace", "append"): raise ValueError(f"'{if_exists}' is not valid for if_exists") pandas_sql = pandasSQL_builder(con, schema=schema) if isinstance(frame, Series): frame = frame.to_frame() elif not isinstance(frame, DataFrame): raise NotImplementedError( "'frame' argument should be either a Series or a DataFrame" ) pandas_sql.to_sql( frame, name, if_exists=if_exists, index=index, index_label=index_label, schema=schema, chunksize=chunksize, dtype=dtype, method=method, ) def has_table(table_name, con, schema=None): """ Check if DataBase has named table. Parameters ---------- table_name: string Name of SQL table. con: SQLAlchemy connectable(engine/connection) or sqlite3 DBAPI2 connection Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. schema : string, default None Name of SQL schema in database to write to (if database flavor supports this). If None, use default schema (default). Returns ------- boolean """ pandas_sql = pandasSQL_builder(con, schema=schema) return pandas_sql.has_table(table_name) table_exists = has_table def _engine_builder(con): """ Returns a SQLAlchemy engine from a URI (if con is a string) else it just return con without modifying it. """ global _SQLALCHEMY_INSTALLED if isinstance(con, str): try: import sqlalchemy except ImportError: _SQLALCHEMY_INSTALLED = False else: con = sqlalchemy.create_engine(con) return con return con def pandasSQL_builder(con, schema=None, meta=None, is_cursor=False): """ Convenience function to return the correct PandasSQL subclass based on the provided parameters. """ # When support for DBAPI connections is removed, # is_cursor should not be necessary. con = _engine_builder(con) if _is_sqlalchemy_connectable(con): return SQLDatabase(con, schema=schema, meta=meta) elif isinstance(con, str): raise ImportError("Using URI string without sqlalchemy installed.") else: return SQLiteDatabase(con, is_cursor=is_cursor) class SQLTable(PandasObject): """ For mapping Pandas tables to SQL tables. Uses fact that table is reflected by SQLAlchemy to do better type conversions. Also holds various flags needed to avoid having to pass them between functions all the time. """ # TODO: support for multiIndex def __init__( self, name, pandas_sql_engine, frame=None, index=True, if_exists="fail", prefix="pandas", index_label=None, schema=None, keys=None, dtype=None, ): self.name = name self.pd_sql = pandas_sql_engine self.prefix = prefix self.frame = frame self.index = self._index_name(index, index_label) self.schema = schema self.if_exists = if_exists self.keys = keys self.dtype = dtype if frame is not None: # We want to initialize based on a dataframe self.table = self._create_table_setup() else: # no data provided, read-only mode self.table = self.pd_sql.get_table(self.name, self.schema) if self.table is None: raise ValueError(f"Could not init table '{name}'") def exists(self): return self.pd_sql.has_table(self.name, self.schema) def sql_schema(self): from sqlalchemy.schema import CreateTable return str(CreateTable(self.table).compile(self.pd_sql.connectable)) def _execute_create(self): # Inserting table into database, add to MetaData object self.table = self.table.tometadata(self.pd_sql.meta) self.table.create() def create(self): if self.exists(): if self.if_exists == "fail": raise ValueError(f"Table '{self.name}' already exists.") elif self.if_exists == "replace": self.pd_sql.drop_table(self.name, self.schema) self._execute_create() elif self.if_exists == "append": pass else: raise ValueError(f"'{self.if_exists}' is not valid for if_exists") else: self._execute_create() def _execute_insert(self, conn, keys, data_iter): """ Execute SQL statement inserting data Parameters ---------- conn : sqlalchemy.engine.Engine or sqlalchemy.engine.Connection keys : list of str Column names data_iter : generator of list Each item contains a list of values to be inserted """ data = [dict(zip(keys, row)) for row in data_iter] conn.execute(self.table.insert(), data) def _execute_insert_multi(self, conn, keys, data_iter): """ Alternative to _execute_insert for DBs support multivalue INSERT. Note: multi-value insert is usually faster for analytics DBs and tables containing a few columns but performance degrades quickly with increase of columns. """ data = [dict(zip(keys, row)) for row in data_iter] conn.execute(self.table.insert(data)) def insert_data(self): if self.index is not None: temp = self.frame.copy() temp.index.names = self.index try: temp.reset_index(inplace=True) except ValueError as err: raise ValueError(f"duplicate name in index/columns: {err}") from err else: temp = self.frame column_names = list(map(str, temp.columns)) ncols = len(column_names) data_list = [None] * ncols for i, (_, ser) in enumerate(temp.items()): vals = ser._values if vals.dtype.kind == "M": d = vals.to_pydatetime() elif vals.dtype.kind == "m": # store as integers, see GH#6921, GH#7076 d = vals.view("i8").astype(object) else: d = vals.astype(object) assert isinstance(d, np.ndarray), type(d) if ser._can_hold_na: # Note: this will miss timedeltas since they are converted to int mask = isna(d) d[mask] = None data_list[i] = d return column_names, data_list def insert(self, chunksize=None, method=None): # set insert method if method is None: exec_insert = self._execute_insert elif method == "multi": exec_insert = self._execute_insert_multi elif callable(method): exec_insert = partial(method, self) else: raise ValueError(f"Invalid parameter `method`: {method}") keys, data_list = self.insert_data() nrows = len(self.frame) if nrows == 0: return if chunksize is None: chunksize = nrows elif chunksize == 0: raise ValueError("chunksize argument should be non-zero") chunks = int(nrows / chunksize) + 1 with self.pd_sql.run_transaction() as conn: for i in range(chunks): start_i = i * chunksize end_i = min((i + 1) * chunksize, nrows) if start_i >= end_i: break chunk_iter = zip(*[arr[start_i:end_i] for arr in data_list]) exec_insert(conn, keys, chunk_iter) def _query_iterator( self, result, chunksize, columns, coerce_float=True, parse_dates=None ): """Return generator through chunked result set.""" while True: data = result.fetchmany(chunksize) if not data: break else: self.frame = DataFrame.from_records( data, columns=columns, coerce_float=coerce_float ) self._harmonize_columns(parse_dates=parse_dates) if self.index is not None: self.frame.set_index(self.index, inplace=True) yield self.frame def read(self, coerce_float=True, parse_dates=None, columns=None, chunksize=None): if columns is not None and len(columns) > 0: from sqlalchemy import select cols = [self.table.c[n] for n in columns] if self.index is not None: for idx in self.index[::-1]: cols.insert(0, self.table.c[idx]) sql_select = select(cols) else: sql_select = self.table.select() result = self.pd_sql.execute(sql_select) column_names = result.keys() if chunksize is not None: return self._query_iterator( result, chunksize, column_names, coerce_float=coerce_float, parse_dates=parse_dates, ) else: data = result.fetchall() self.frame = DataFrame.from_records( data, columns=column_names, coerce_float=coerce_float ) self._harmonize_columns(parse_dates=parse_dates) if self.index is not None: self.frame.set_index(self.index, inplace=True) return self.frame def _index_name(self, index, index_label): # for writing: index=True to include index in sql table if index is True: nlevels = self.frame.index.nlevels # if index_label is specified, set this as index name(s) if index_label is not None: if not isinstance(index_label, list): index_label = [index_label] if len(index_label) != nlevels: raise ValueError( "Length of 'index_label' should match number of " f"levels, which is {nlevels}" ) else: return index_label # return the used column labels for the index columns if ( nlevels == 1 and "index" not in self.frame.columns and self.frame.index.name is None ): return ["index"] else: return [ l if l is not None else f"level_{i}" for i, l in enumerate(self.frame.index.names) ] # for reading: index=(list of) string to specify column to set as index elif isinstance(index, str): return [index] elif isinstance(index, list): return index else: return None def _get_column_names_and_types(self, dtype_mapper): column_names_and_types = [] if self.index is not None: for i, idx_label in enumerate(self.index): idx_type = dtype_mapper(self.frame.index._get_level_values(i)) column_names_and_types.append((str(idx_label), idx_type, True)) column_names_and_types += [ (str(self.frame.columns[i]), dtype_mapper(self.frame.iloc[:, i]), False) for i in range(len(self.frame.columns)) ] return column_names_and_types def _create_table_setup(self): from sqlalchemy import Column, PrimaryKeyConstraint, Table column_names_and_types = self._get_column_names_and_types(self._sqlalchemy_type) columns = [ Column(name, typ, index=is_index) for name, typ, is_index in column_names_and_types ] if self.keys is not None: if not is_list_like(self.keys): keys = [self.keys] else: keys = self.keys pkc = PrimaryKeyConstraint(*keys, name=self.name + "_pk") columns.append(pkc) schema = self.schema or self.pd_sql.meta.schema # At this point, attach to new metadata, only attach to self.meta # once table is created. from sqlalchemy.schema import MetaData meta = MetaData(self.pd_sql, schema=schema) return Table(self.name, meta, *columns, schema=schema) def _harmonize_columns(self, parse_dates=None): """ Make the DataFrame's column types align with the SQL table column types. Need to work around limited NA value support. Floats are always fine, ints must always be floats if there are Null values. Booleans are hard because converting bool column with None replaces all Nones with false. Therefore only convert bool if there are no NA values. Datetimes should already be converted to np.datetime64 if supported, but here we also force conversion if required. """ parse_dates = _process_parse_dates_argument(parse_dates) for sql_col in self.table.columns: col_name = sql_col.name try: df_col = self.frame[col_name] # Handle date parsing upfront; don't try to convert columns # twice if col_name in parse_dates: try: fmt = parse_dates[col_name] except TypeError: fmt = None self.frame[col_name] = _handle_date_column(df_col, format=fmt) continue # the type the dataframe column should have col_type = self._get_dtype(sql_col.type) if ( col_type is datetime or col_type is date or col_type is DatetimeTZDtype ): # Convert tz-aware Datetime SQL columns to UTC utc = col_type is DatetimeTZDtype self.frame[col_name] = _handle_date_column(df_col, utc=utc) elif col_type is float: # floats support NA, can always convert! self.frame[col_name] = df_col.astype(col_type, copy=False) elif len(df_col) == df_col.count(): # No NA values, can convert ints and bools if col_type is np.dtype("int64") or col_type is bool: self.frame[col_name] = df_col.astype(col_type, copy=False) except KeyError: pass # this column not in results def _sqlalchemy_type(self, col): dtype = self.dtype or {} if col.name in dtype: return self.dtype[col.name] # Infer type of column, while ignoring missing values. # Needed for inserting typed data containing NULLs, GH 8778. col_type = lib.infer_dtype(col, skipna=True) from sqlalchemy.types import ( TIMESTAMP, BigInteger, Boolean, Date, DateTime, Float, Integer, Text, Time, ) if col_type == "datetime64" or col_type == "datetime": # GH 9086: TIMESTAMP is the suggested type if the column contains # timezone information try: if col.dt.tz is not None: return TIMESTAMP(timezone=True) except AttributeError: # The column is actually a DatetimeIndex # GH 26761 or an Index with date-like data e.g. 9999-01-01 if getattr(col, "tz", None) is not None: return TIMESTAMP(timezone=True) return DateTime if col_type == "timedelta64": warnings.warn( "the 'timedelta' type is not supported, and will be " "written as integer values (ns frequency) to the database.", UserWarning, stacklevel=8, ) return BigInteger elif col_type == "floating": if col.dtype == "float32": return Float(precision=23) else: return Float(precision=53) elif col_type == "integer": if col.dtype == "int32": return Integer else: return BigInteger elif col_type == "boolean": return Boolean elif col_type == "date": return Date elif col_type == "time": return Time elif col_type == "complex": raise ValueError("Complex datatypes not supported") return Text def _get_dtype(self, sqltype): from sqlalchemy.types import TIMESTAMP, Boolean, Date, DateTime, Float, Integer if isinstance(sqltype, Float): return float elif isinstance(sqltype, Integer): # TODO: Refine integer size. return np.dtype("int64") elif isinstance(sqltype, TIMESTAMP): # we have a timezone capable type if not sqltype.timezone: return datetime return DatetimeTZDtype elif isinstance(sqltype, DateTime): # Caution: np.datetime64 is also a subclass of np.number. return datetime elif isinstance(sqltype, Date): return date elif isinstance(sqltype, Boolean): return bool return object class PandasSQL(PandasObject): """ Subclasses Should define read_sql and to_sql. """ def read_sql(self, *args, **kwargs): raise ValueError( "PandasSQL must be created with an SQLAlchemy " "connectable or sqlite connection" ) def to_sql(self, *args, **kwargs): raise ValueError( "PandasSQL must be created with an SQLAlchemy " "connectable or sqlite connection" ) class SQLDatabase(PandasSQL): """ This class enables conversion between DataFrame and SQL databases using SQLAlchemy to handle DataBase abstraction. Parameters ---------- engine : SQLAlchemy connectable Connectable to connect with the database. Using SQLAlchemy makes it possible to use any DB supported by that library. schema : string, default None Name of SQL schema in database to write to (if database flavor supports this). If None, use default schema (default). meta : SQLAlchemy MetaData object, default None If provided, this MetaData object is used instead of a newly created. This allows to specify database flavor specific arguments in the MetaData object. """ def __init__(self, engine, schema=None, meta=None): self.connectable = engine if not meta: from sqlalchemy.schema import MetaData meta = MetaData(self.connectable, schema=schema) self.meta = meta @contextmanager def run_transaction(self): with self.connectable.begin() as tx: if hasattr(tx, "execute"): yield tx else: yield self.connectable def execute(self, *args, **kwargs): """Simple passthrough to SQLAlchemy connectable""" return self.connectable.execution_options(no_parameters=True).execute( *args, **kwargs ) def read_table( self, table_name, index_col=None, coerce_float=True, parse_dates=None, columns=None, schema=None, chunksize=None, ): """ Read SQL database table into a DataFrame. Parameters ---------- table_name : string Name of SQL table in database. index_col : string, optional, default: None Column to set as index. coerce_float : boolean, default True Attempts to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point. This can result in loss of precision. parse_dates : list or dict, default: None - List of column names to parse as dates. - Dict of ``{column_name: format string}`` where format string is strftime compatible in case of parsing string times, or is one of (D, s, ns, ms, us) in case of parsing integer timestamps. - Dict of ``{column_name: arg}``, where the arg corresponds to the keyword arguments of :func:`pandas.to_datetime`. Especially useful with databases without native Datetime support, such as SQLite. columns : list, default: None List of column names to select from SQL table. schema : string, default None Name of SQL schema in database to query (if database flavor supports this). If specified, this overwrites the default schema of the SQL database object. chunksize : int, default None If specified, return an iterator where `chunksize` is the number of rows to include in each chunk. Returns ------- DataFrame See Also -------- pandas.read_sql_table SQLDatabase.read_query """ table = SQLTable(table_name, self, index=index_col, schema=schema) return table.read( coerce_float=coerce_float, parse_dates=parse_dates, columns=columns, chunksize=chunksize, ) @staticmethod def _query_iterator( result, chunksize, columns, index_col=None, coerce_float=True, parse_dates=None ): """Return generator through chunked result set""" while True: data = result.fetchmany(chunksize) if not data: break else: yield _wrap_result( data, columns, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, ) def read_query( self, sql, index_col=None, coerce_float=True, parse_dates=None, params=None, chunksize=None, ): """ Read SQL query into a DataFrame. Parameters ---------- sql : string SQL query to be executed. index_col : string, optional, default: None Column name to use as index for the returned DataFrame object. coerce_float : boolean, default True Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets. params : list, tuple or dict, optional, default: None List of parameters to pass to execute method. The syntax used to pass parameters is database driver dependent. Check your database driver documentation for which of the five syntax styles, described in PEP 249's paramstyle, is supported. Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'} parse_dates : list or dict, default: None - List of column names to parse as dates. - Dict of ``{column_name: format string}`` where format string is strftime compatible in case of parsing string times, or is one of (D, s, ns, ms, us) in case of parsing integer timestamps. - Dict of ``{column_name: arg dict}``, where the arg dict corresponds to the keyword arguments of :func:`pandas.to_datetime` Especially useful with databases without native Datetime support, such as SQLite. chunksize : int, default None If specified, return an iterator where `chunksize` is the number of rows to include in each chunk. Returns ------- DataFrame See Also -------- read_sql_table : Read SQL database table into a DataFrame. read_sql """ args = _convert_params(sql, params) result = self.execute(*args) columns = result.keys() if chunksize is not None: return self._query_iterator( result, chunksize, columns, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, ) else: data = result.fetchall() frame = _wrap_result( data, columns, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, ) return frame read_sql = read_query def to_sql( self, frame, name, if_exists="fail", index=True, index_label=None, schema=None, chunksize=None, dtype=None, method=None, ): """ Write records stored in a DataFrame to a SQL database. Parameters ---------- frame : DataFrame name : string Name of SQL table. if_exists : {'fail', 'replace', 'append'}, default 'fail' - fail: If table exists, do nothing. - replace: If table exists, drop it, recreate it, and insert data. - append: If table exists, insert data. Create if does not exist. index : boolean, default True Write DataFrame index as a column. index_label : string or sequence, default None Column label for index column(s). If None is given (default) and `index` is True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. schema : string, default None Name of SQL schema in database to write to (if database flavor supports this). If specified, this overwrites the default schema of the SQLDatabase object. chunksize : int, default None If not None, then rows will be written in batches of this size at a time. If None, all rows will be written at once. dtype : single type or dict of column name to SQL type, default None Optional specifying the datatype for columns. The SQL type should be a SQLAlchemy type. If all columns are of the same type, one single value can be used. method : {None', 'multi', callable}, default None Controls the SQL insertion clause used: * None : Uses standard SQL ``INSERT`` clause (one per row). * 'multi': Pass multiple values in a single ``INSERT`` clause. * callable with signature ``(pd_table, conn, keys, data_iter)``. Details and a sample callable implementation can be found in the section :ref:`insert method <io.sql.method>`. .. versionadded:: 0.24.0 """ if dtype and not is_dict_like(dtype): dtype = {col_name: dtype for col_name in frame} if dtype is not None: from sqlalchemy.types import TypeEngine, to_instance for col, my_type in dtype.items(): if not isinstance(to_instance(my_type), TypeEngine): raise ValueError(f"The type of {col} is not a SQLAlchemy type") table = SQLTable( name, self, frame=frame, index=index, if_exists=if_exists, index_label=index_label, schema=schema, dtype=dtype, ) table.create() from sqlalchemy import exc try: table.insert(chunksize, method=method) except exc.SQLAlchemyError as err: # GH34431 msg = "(1054, \"Unknown column 'inf' in 'field list'\")" err_text = str(err.orig) if re.search(msg, err_text): raise ValueError("inf cannot be used with MySQL") from err else: raise err if not name.isdigit() and not name.islower(): # check for potentially case sensitivity issues (GH7815) # Only check when name is not a number and name is not lower case engine = self.connectable.engine with self.connectable.connect() as conn: table_names = engine.table_names( schema=schema or self.meta.schema, connection=conn ) if name not in table_names: msg = ( f"The provided table name '{name}' is not found exactly as " "such in the database after writing the table, possibly " "due to case sensitivity issues. Consider using lower " "case table names." ) warnings.warn(msg, UserWarning) @property def tables(self): return self.meta.tables def has_table(self, name, schema=None): return self.connectable.run_callable( self.connectable.dialect.has_table, name, schema or self.meta.schema ) def get_table(self, table_name, schema=None): schema = schema or self.meta.schema if schema: tbl = self.meta.tables.get(".".join([schema, table_name])) else: tbl = self.meta.tables.get(table_name) # Avoid casting double-precision floats into decimals from sqlalchemy import Numeric for column in tbl.columns: if isinstance(column.type, Numeric): column.type.asdecimal = False return tbl def drop_table(self, table_name, schema=None): schema = schema or self.meta.schema if self.has_table(table_name, schema): self.meta.reflect(only=[table_name], schema=schema) self.get_table(table_name, schema).drop() self.meta.clear() def _create_sql_schema(self, frame, table_name, keys=None, dtype=None): table = SQLTable( table_name, self, frame=frame, index=False, keys=keys, dtype=dtype ) return str(table.sql_schema()) # ---- SQL without SQLAlchemy --- # sqlite-specific sql strings and handler class # dictionary used for readability purposes _SQL_TYPES = { "string": "TEXT", "floating": "REAL", "integer": "INTEGER", "datetime": "TIMESTAMP", "date": "DATE", "time": "TIME", "boolean": "INTEGER", } def _get_unicode_name(name): try: uname = str(name).encode("utf-8", "strict").decode("utf-8") except UnicodeError as err: raise ValueError(f"Cannot convert identifier to UTF-8: '{name}'") from err return uname def _get_valid_sqlite_name(name): # See https://stackoverflow.com/questions/6514274/how-do-you-escape-strings\ # -for-sqlite-table-column-names-in-python # Ensure the string can be encoded as UTF-8. # Ensure the string does not include any NUL characters. # Replace all " with "". # Wrap the entire thing in double quotes. uname = _get_unicode_name(name) if not len(uname): raise ValueError("Empty table or column name specified") nul_index = uname.find("\x00") if nul_index >= 0: raise ValueError("SQLite identifier cannot contain NULs") return '"' + uname.replace('"', '""') + '"' _SAFE_NAMES_WARNING = ( "The spaces in these column names will not be changed. " "In pandas versions < 0.14, spaces were converted to underscores." ) class SQLiteTable(SQLTable): """ Patch the SQLTable for fallback support. Instead of a table variable just use the Create Table statement. """ def __init__(self, *args, **kwargs): # GH 8341 # register an adapter callable for datetime.time object import sqlite3 # this will transform time(12,34,56,789) into '12:34:56.000789' # (this is what sqlalchemy does) sqlite3.register_adapter(time, lambda _: _.strftime("%H:%M:%S.%f")) super().__init__(*args, **kwargs) def sql_schema(self): return str(";\n".join(self.table)) def _execute_create(self): with self.pd_sql.run_transaction() as conn: for stmt in self.table: conn.execute(stmt) def insert_statement(self, *, num_rows): names = list(map(str, self.frame.columns)) wld = "?" # wildcard char escape = _get_valid_sqlite_name if self.index is not None: for idx in self.index[::-1]: names.insert(0, idx) bracketed_names = [escape(column) for column in names] col_names = ",".join(bracketed_names) row_wildcards = ",".join([wld] * len(names)) wildcards = ",".join(f"({row_wildcards})" for _ in range(num_rows)) insert_statement = ( f"INSERT INTO {escape(self.name)} ({col_names}) VALUES {wildcards}" ) return insert_statement def _execute_insert(self, conn, keys, data_iter): data_list = list(data_iter) conn.executemany(self.insert_statement(num_rows=1), data_list) def _execute_insert_multi(self, conn, keys, data_iter): data_list = list(data_iter) flattened_data = [x for row in data_list for x in row] conn.execute(self.insert_statement(num_rows=len(data_list)), flattened_data) def _create_table_setup(self): """ Return a list of SQL statements that creates a table reflecting the structure of a DataFrame. The first entry will be a CREATE TABLE statement while the rest will be CREATE INDEX statements. """ column_names_and_types = self._get_column_names_and_types(self._sql_type_name) pat = re.compile(r"\s+") column_names = [col_name for col_name, _, _ in column_names_and_types] if any(map(pat.search, column_names)): warnings.warn(_SAFE_NAMES_WARNING, stacklevel=6) escape = _get_valid_sqlite_name create_tbl_stmts = [ escape(cname) + " " + ctype for cname, ctype, _ in column_names_and_types ] if self.keys is not None and len(self.keys): if not is_list_like(self.keys): keys = [self.keys] else: keys = self.keys cnames_br = ", ".join(escape(c) for c in keys) create_tbl_stmts.append( f"CONSTRAINT {self.name}_pk PRIMARY KEY ({cnames_br})" ) create_stmts = [ "CREATE TABLE " + escape(self.name) + " (\n" + ",\n ".join(create_tbl_stmts) + "\n)" ] ix_cols = [cname for cname, _, is_index in column_names_and_types if is_index] if len(ix_cols): cnames = "_".join(ix_cols) cnames_br = ",".join(escape(c) for c in ix_cols) create_stmts.append( "CREATE INDEX " + escape("ix_" + self.name + "_" + cnames) + "ON " + escape(self.name) + " (" + cnames_br + ")" ) return create_stmts def _sql_type_name(self, col): dtype = self.dtype or {} if col.name in dtype: return dtype[col.name] # Infer type of column, while ignoring missing values. # Needed for inserting typed data containing NULLs, GH 8778. col_type = lib.infer_dtype(col, skipna=True) if col_type == "timedelta64": warnings.warn( "the 'timedelta' type is not supported, and will be " "written as integer values (ns frequency) to the database.", UserWarning, stacklevel=8, ) col_type = "integer" elif col_type == "datetime64": col_type = "datetime" elif col_type == "empty": col_type = "string" elif col_type == "complex": raise ValueError("Complex datatypes not supported") if col_type not in _SQL_TYPES: col_type = "string" return _SQL_TYPES[col_type] class SQLiteDatabase(PandasSQL): """ Version of SQLDatabase to support SQLite connections (fallback without SQLAlchemy). This should only be used internally. Parameters ---------- con : sqlite connection object """ def __init__(self, con, is_cursor=False): self.is_cursor = is_cursor self.con = con @contextmanager def run_transaction(self): cur = self.con.cursor() try: yield cur self.con.commit() except Exception: self.con.rollback() raise finally: cur.close() def execute(self, *args, **kwargs): if self.is_cursor: cur = self.con else: cur = self.con.cursor() try: cur.execute(*args, **kwargs) return cur except Exception as exc: try: self.con.rollback() except Exception as inner_exc: # pragma: no cover ex = DatabaseError( f"Execution failed on sql: {args[0]}\n{exc}\nunable to rollback" ) raise ex from inner_exc ex = DatabaseError(f"Execution failed on sql '{args[0]}': {exc}") raise ex from exc @staticmethod def _query_iterator( cursor, chunksize, columns, index_col=None, coerce_float=True, parse_dates=None ): """Return generator through chunked result set""" while True: data = cursor.fetchmany(chunksize) if type(data) == tuple: data = list(data) if not data: cursor.close() break else: yield _wrap_result( data, columns, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, ) def read_query( self, sql, index_col=None, coerce_float=True, params=None, parse_dates=None, chunksize=None, ): args = _convert_params(sql, params) cursor = self.execute(*args) columns = [col_desc[0] for col_desc in cursor.description] if chunksize is not None: return self._query_iterator( cursor, chunksize, columns, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, ) else: data = self._fetchall_as_list(cursor) cursor.close() frame = _wrap_result( data, columns, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, ) return frame def _fetchall_as_list(self, cur): result = cur.fetchall() if not isinstance(result, list): result = list(result) return result def to_sql( self, frame, name, if_exists="fail", index=True, index_label=None, schema=None, chunksize=None, dtype=None, method=None, ): """ Write records stored in a DataFrame to a SQL database. Parameters ---------- frame: DataFrame name: string Name of SQL table. if_exists: {'fail', 'replace', 'append'}, default 'fail' fail: If table exists, do nothing. replace: If table exists, drop it, recreate it, and insert data. append: If table exists, insert data. Create if it does not exist. index : boolean, default True Write DataFrame index as a column index_label : string or sequence, default None Column label for index column(s). If None is given (default) and `index` is True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. schema : string, default None Ignored parameter included for compatibility with SQLAlchemy version of ``to_sql``. chunksize : int, default None If not None, then rows will be written in batches of this size at a time. If None, all rows will be written at once. dtype : single type or dict of column name to SQL type, default None Optional specifying the datatype for columns. The SQL type should be a string. If all columns are of the same type, one single value can be used. method : {None, 'multi', callable}, default None Controls the SQL insertion clause used: * None : Uses standard SQL ``INSERT`` clause (one per row). * 'multi': Pass multiple values in a single ``INSERT`` clause. * callable with signature ``(pd_table, conn, keys, data_iter)``. Details and a sample callable implementation can be found in the section :ref:`insert method <io.sql.method>`. .. versionadded:: 0.24.0 """ if dtype and not is_dict_like(dtype): dtype = {col_name: dtype for col_name in frame} if dtype is not None: for col, my_type in dtype.items(): if not isinstance(my_type, str): raise ValueError(f"{col} ({my_type}) not a string") table = SQLiteTable( name, self, frame=frame, index=index, if_exists=if_exists, index_label=index_label, dtype=dtype, ) table.create() table.insert(chunksize, method) def has_table(self, name, schema=None): # TODO(wesm): unused? # escape = _get_valid_sqlite_name # esc_name = escape(name) wld = "?" query = f"SELECT name FROM sqlite_master WHERE type='table' AND name={wld};" return len(self.execute(query, [name]).fetchall()) > 0 def get_table(self, table_name, schema=None): return None # not supported in fallback mode def drop_table(self, name, schema=None): drop_sql = f"DROP TABLE {_get_valid_sqlite_name(name)}" self.execute(drop_sql) def _create_sql_schema(self, frame, table_name, keys=None, dtype=None): table = SQLiteTable( table_name, self, frame=frame, index=False, keys=keys, dtype=dtype ) return str(table.sql_schema()) def get_schema(frame, name, keys=None, con=None, dtype=None): """ Get the SQL db table schema for the given frame. Parameters ---------- frame : DataFrame name : string name of SQL table keys : string or sequence, default: None columns to use a primary key con: an open SQL database connection object or a SQLAlchemy connectable Using SQLAlchemy makes it possible to use any DB supported by that library, default: None If a DBAPI2 object, only sqlite3 is supported. dtype : dict of column name to SQL type, default None Optional specifying the datatype for columns. The SQL type should be a SQLAlchemy type, or a string for sqlite3 fallback connection. """ pandas_sql = pandasSQL_builder(con=con) return pandas_sql._create_sql_schema(frame, name, keys=keys, dtype=dtype)
bsd-3-clause
AmperificSuperKANG/lge-kernel-loki
tools/perf/scripts/python/sched-migration.py
11215
11670
#!/usr/bin/python # # Cpu task migration overview toy # # Copyright (C) 2010 Frederic Weisbecker <[email protected]> # # perf script event handlers have been generated by perf script -g python # # This software is distributed under the terms of the GNU General # Public License ("GPL") version 2 as published by the Free Software # Foundation. import os import sys from collections import defaultdict from UserList import UserList sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from SchedGui import * threads = { 0 : "idle"} def thread_name(pid): return "%s:%d" % (threads[pid], pid) class RunqueueEventUnknown: @staticmethod def color(): return None def __repr__(self): return "unknown" class RunqueueEventSleep: @staticmethod def color(): return (0, 0, 0xff) def __init__(self, sleeper): self.sleeper = sleeper def __repr__(self): return "%s gone to sleep" % thread_name(self.sleeper) class RunqueueEventWakeup: @staticmethod def color(): return (0xff, 0xff, 0) def __init__(self, wakee): self.wakee = wakee def __repr__(self): return "%s woke up" % thread_name(self.wakee) class RunqueueEventFork: @staticmethod def color(): return (0, 0xff, 0) def __init__(self, child): self.child = child def __repr__(self): return "new forked task %s" % thread_name(self.child) class RunqueueMigrateIn: @staticmethod def color(): return (0, 0xf0, 0xff) def __init__(self, new): self.new = new def __repr__(self): return "task migrated in %s" % thread_name(self.new) class RunqueueMigrateOut: @staticmethod def color(): return (0xff, 0, 0xff) def __init__(self, old): self.old = old def __repr__(self): return "task migrated out %s" % thread_name(self.old) class RunqueueSnapshot: def __init__(self, tasks = [0], event = RunqueueEventUnknown()): self.tasks = tuple(tasks) self.event = event def sched_switch(self, prev, prev_state, next): event = RunqueueEventUnknown() if taskState(prev_state) == "R" and next in self.tasks \ and prev in self.tasks: return self if taskState(prev_state) != "R": event = RunqueueEventSleep(prev) next_tasks = list(self.tasks[:]) if prev in self.tasks: if taskState(prev_state) != "R": next_tasks.remove(prev) elif taskState(prev_state) == "R": next_tasks.append(prev) if next not in next_tasks: next_tasks.append(next) return RunqueueSnapshot(next_tasks, event) def migrate_out(self, old): if old not in self.tasks: return self next_tasks = [task for task in self.tasks if task != old] return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old)) def __migrate_in(self, new, event): if new in self.tasks: self.event = event return self next_tasks = self.tasks[:] + tuple([new]) return RunqueueSnapshot(next_tasks, event) def migrate_in(self, new): return self.__migrate_in(new, RunqueueMigrateIn(new)) def wake_up(self, new): return self.__migrate_in(new, RunqueueEventWakeup(new)) def wake_up_new(self, new): return self.__migrate_in(new, RunqueueEventFork(new)) def load(self): """ Provide the number of tasks on the runqueue. Don't count idle""" return len(self.tasks) - 1 def __repr__(self): ret = self.tasks.__repr__() ret += self.origin_tostring() return ret class TimeSlice: def __init__(self, start, prev): self.start = start self.prev = prev self.end = start # cpus that triggered the event self.event_cpus = [] if prev is not None: self.total_load = prev.total_load self.rqs = prev.rqs.copy() else: self.rqs = defaultdict(RunqueueSnapshot) self.total_load = 0 def __update_total_load(self, old_rq, new_rq): diff = new_rq.load() - old_rq.load() self.total_load += diff def sched_switch(self, ts_list, prev, prev_state, next, cpu): old_rq = self.prev.rqs[cpu] new_rq = old_rq.sched_switch(prev, prev_state, next) if old_rq is new_rq: return self.rqs[cpu] = new_rq self.__update_total_load(old_rq, new_rq) ts_list.append(self) self.event_cpus = [cpu] def migrate(self, ts_list, new, old_cpu, new_cpu): if old_cpu == new_cpu: return old_rq = self.prev.rqs[old_cpu] out_rq = old_rq.migrate_out(new) self.rqs[old_cpu] = out_rq self.__update_total_load(old_rq, out_rq) new_rq = self.prev.rqs[new_cpu] in_rq = new_rq.migrate_in(new) self.rqs[new_cpu] = in_rq self.__update_total_load(new_rq, in_rq) ts_list.append(self) if old_rq is not out_rq: self.event_cpus.append(old_cpu) self.event_cpus.append(new_cpu) def wake_up(self, ts_list, pid, cpu, fork): old_rq = self.prev.rqs[cpu] if fork: new_rq = old_rq.wake_up_new(pid) else: new_rq = old_rq.wake_up(pid) if new_rq is old_rq: return self.rqs[cpu] = new_rq self.__update_total_load(old_rq, new_rq) ts_list.append(self) self.event_cpus = [cpu] def next(self, t): self.end = t return TimeSlice(t, self) class TimeSliceList(UserList): def __init__(self, arg = []): self.data = arg def get_time_slice(self, ts): if len(self.data) == 0: slice = TimeSlice(ts, TimeSlice(-1, None)) else: slice = self.data[-1].next(ts) return slice def find_time_slice(self, ts): start = 0 end = len(self.data) found = -1 searching = True while searching: if start == end or start == end - 1: searching = False i = (end + start) / 2 if self.data[i].start <= ts and self.data[i].end >= ts: found = i end = i continue if self.data[i].end < ts: start = i elif self.data[i].start > ts: end = i return found def set_root_win(self, win): self.root_win = win def mouse_down(self, cpu, t): idx = self.find_time_slice(t) if idx == -1: return ts = self[idx] rq = ts.rqs[cpu] raw = "CPU: %d\n" % cpu raw += "Last event : %s\n" % rq.event.__repr__() raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000) raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6)) raw += "Load = %d\n" % rq.load() for t in rq.tasks: raw += "%s \n" % thread_name(t) self.root_win.update_summary(raw) def update_rectangle_cpu(self, slice, cpu): rq = slice.rqs[cpu] if slice.total_load != 0: load_rate = rq.load() / float(slice.total_load) else: load_rate = 0 red_power = int(0xff - (0xff * load_rate)) color = (0xff, red_power, red_power) top_color = None if cpu in slice.event_cpus: top_color = rq.event.color() self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end) def fill_zone(self, start, end): i = self.find_time_slice(start) if i == -1: return for i in xrange(i, len(self.data)): timeslice = self.data[i] if timeslice.start > end: return for cpu in timeslice.rqs: self.update_rectangle_cpu(timeslice, cpu) def interval(self): if len(self.data) == 0: return (0, 0) return (self.data[0].start, self.data[-1].end) def nr_rectangles(self): last_ts = self.data[-1] max_cpu = 0 for cpu in last_ts.rqs: if cpu > max_cpu: max_cpu = cpu return max_cpu class SchedEventProxy: def __init__(self): self.current_tsk = defaultdict(lambda : -1) self.timeslices = TimeSliceList() def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state, next_comm, next_pid, next_prio): """ Ensure the task we sched out this cpu is really the one we logged. Otherwise we may have missed traces """ on_cpu_task = self.current_tsk[headers.cpu] if on_cpu_task != -1 and on_cpu_task != prev_pid: print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \ (headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid) threads[prev_pid] = prev_comm threads[next_pid] = next_comm self.current_tsk[headers.cpu] = next_pid ts = self.timeslices.get_time_slice(headers.ts()) ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu) def migrate(self, headers, pid, prio, orig_cpu, dest_cpu): ts = self.timeslices.get_time_slice(headers.ts()) ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu) def wake_up(self, headers, comm, pid, success, target_cpu, fork): if success == 0: return ts = self.timeslices.get_time_slice(headers.ts()) ts.wake_up(self.timeslices, pid, target_cpu, fork) def trace_begin(): global parser parser = SchedEventProxy() def trace_end(): app = wx.App(False) timeslices = parser.timeslices frame = RootFrame(timeslices, "Migration") app.MainLoop() def sched__sched_stat_runtime(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, comm, pid, runtime, vruntime): pass def sched__sched_stat_iowait(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, comm, pid, delay): pass def sched__sched_stat_sleep(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, comm, pid, delay): pass def sched__sched_stat_wait(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, comm, pid, delay): pass def sched__sched_process_fork(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, parent_comm, parent_pid, child_comm, child_pid): pass def sched__sched_process_wait(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, comm, pid, prio): pass def sched__sched_process_exit(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, comm, pid, prio): pass def sched__sched_process_free(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, comm, pid, prio): pass def sched__sched_migrate_task(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, comm, pid, prio, orig_cpu, dest_cpu): headers = EventHeaders(common_cpu, common_secs, common_nsecs, common_pid, common_comm) parser.migrate(headers, pid, prio, orig_cpu, dest_cpu) def sched__sched_switch(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, prev_comm, prev_pid, prev_prio, prev_state, next_comm, next_pid, next_prio): headers = EventHeaders(common_cpu, common_secs, common_nsecs, common_pid, common_comm) parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state, next_comm, next_pid, next_prio) def sched__sched_wakeup_new(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, comm, pid, prio, success, target_cpu): headers = EventHeaders(common_cpu, common_secs, common_nsecs, common_pid, common_comm) parser.wake_up(headers, comm, pid, success, target_cpu, 1) def sched__sched_wakeup(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, comm, pid, prio, success, target_cpu): headers = EventHeaders(common_cpu, common_secs, common_nsecs, common_pid, common_comm) parser.wake_up(headers, comm, pid, success, target_cpu, 0) def sched__sched_wait_task(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, comm, pid, prio): pass def sched__sched_kthread_stop_ret(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, ret): pass def sched__sched_kthread_stop(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, comm, pid): pass def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm): pass
gpl-2.0
JuanjoA/l10n-spain
l10n_es_aeat_mod340/__init__.py
14
1171
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (c) 20011 Ting (http://www.ting.es) # Copyright (c) 2011-2013 Acysos S.L. (http://acysos.com) # Ignacio Ibeas Izquierdo <[email protected]> # # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import report from . import wizard from . import models
agpl-3.0
jaredly/django-feedback
test_settings.py
2
5566
# Django settings for demoproject project. from os.path import abspath, dirname, join demoproject_dir = dirname(abspath(__file__)) DEBUG = True ADMINS = ( # ('Your Name', '[email protected]'), ) MANAGERS = ADMINS DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'. 'NAME': 'test.sqlite3', # Or path to database file if using sqlite3. 'USER': '', # Not used with sqlite3. 'PASSWORD': '', # Not used with sqlite3. 'HOST': '', # Set to empty string for localhost. Not used with sqlite3. 'PORT': '', # Set to empty string for default. Not used with sqlite3. } } # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # In a Windows environment this must be set to your system time zone. TIME_ZONE = 'America/Chicago' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale. USE_L10N = True # If you set this to False, Django will not use timezone-aware datetimes. USE_TZ = True # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/home/media/media.lawrence.com/media/" MEDIA_ROOT = '' # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash. # Examples: "http://media.lawrence.com/media/", "http://example.com/media/" MEDIA_URL = '' # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/home/media/media.lawrence.com/static/" STATIC_ROOT = '' # URL prefix for static files. # Example: "http://media.lawrence.com/static/" STATIC_URL = '/static/' # Additional locations of static files STATICFILES_DIRS = ( # Put strings here, like "/home/html/static" or "C:/www/django/static". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # 'django.contrib.staticfiles.finders.DefaultStorageFinder', ) # Make this unique, and don't share it with anybody. SECRET_KEY = 'k9i055*z@6@9$7xvyw(8y4sk_w0@1ltf2$y-^zu^&amp;asfasfww' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'APP_DIRS': True, }, ] MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', # Uncomment the next line for simple clickjacking protection: # 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'feedback.urls' # Python dotted path to the WSGI application used by Django's runserver. WSGI_APPLICATION = 'demoproject.wsgi.application' INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', 'feedback', # Uncomment the next line to enable the admin: # 'django.contrib.admin', # Uncomment the next line to enable admin documentation: # 'django.contrib.admindocs', ) FEEDBACK_EMAIL = "[email protected]" # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'formatters': { 'verbose': { 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s' }, 'simple': { 'format': '\033[22;32m%(levelname)s\033[0;0m %(message)s' }, }, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' }, 'console': { 'level': 'DEBUG', 'class': 'logging.StreamHandler', 'formatter': 'simple' }, }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, 'javascript_error': { 'handlers': ['mail_admins', 'console'], 'level': 'ERROR', 'propagate': True, }, } }
mit
IKholopov/HackUPC2017
hackupc/env/lib/python3.5/site-packages/social_core/backends/odnoklassniki.py
4
7024
""" Odnoklassniki OAuth2 and Iframe Application backends, docs at: https://python-social-auth.readthedocs.io/en/latest/backends/odnoklassnikiru.html """ from hashlib import md5 from six.moves.urllib_parse import unquote from .base import BaseAuth from .oauth import BaseOAuth2 from ..exceptions import AuthFailed class OdnoklassnikiOAuth2(BaseOAuth2): """Odnoklassniki authentication backend""" name = 'odnoklassniki-oauth2' ID_KEY = 'uid' ACCESS_TOKEN_METHOD = 'POST' SCOPE_SEPARATOR = ';' AUTHORIZATION_URL = 'https://connect.ok.ru/oauth/authorize' ACCESS_TOKEN_URL = 'https://api.ok.ru/oauth/token.do' EXTRA_DATA = [('refresh_token', 'refresh_token'), ('expires_in', 'expires')] def get_user_details(self, response): """Return user details from Odnoklassniki request""" fullname, first_name, last_name = self.get_user_names( fullname=unquote(response['name']), first_name=unquote(response['first_name']), last_name=unquote(response['last_name']) ) return { 'username': response['uid'], 'email': response.get('email', ''), 'fullname': fullname, 'first_name': first_name, 'last_name': last_name } def user_data(self, access_token, *args, **kwargs): """Return user data from Odnoklassniki REST API""" data = {'access_token': access_token, 'method': 'users.getCurrentUser'} key, secret = self.get_key_and_secret() public_key = self.setting('PUBLIC_NAME') return odnoklassniki_api(self, data, 'https://api.ok.ru/', public_key, secret, 'oauth') class OdnoklassnikiApp(BaseAuth): """Odnoklassniki iframe app authentication backend""" name = 'odnoklassniki-app' ID_KEY = 'uid' def extra_data(self, user, uid, response, details=None, *args, **kwargs): return dict([(key, value) for key, value in response.items() if key in response['extra_data_list']]) def get_user_details(self, response): fullname, first_name, last_name = self.get_user_names( fullname=unquote(response['name']), first_name=unquote(response['first_name']), last_name=unquote(response['last_name']) ) return { 'username': response['uid'], 'email': '', 'fullname': fullname, 'first_name': first_name, 'last_name': last_name } def auth_complete(self, *args, **kwargs): self.verify_auth_sig() response = self.get_response() fields = ('uid', 'first_name', 'last_name', 'name') + \ self.setting('EXTRA_USER_DATA_LIST', ()) data = { 'method': 'users.getInfo', 'uids': '{0}'.format(response['logged_user_id']), 'fields': ','.join(fields), } client_key, client_secret = self.get_key_and_secret() public_key = self.setting('PUBLIC_NAME') details = odnoklassniki_api(self, data, response['api_server'], public_key, client_secret, 'iframe_nosession') if len(details) == 1 and 'uid' in details[0]: details = details[0] auth_data_fields = self.setting('EXTRA_AUTH_DATA_LIST', ('api_server', 'apiconnection', 'session_key', 'authorized', 'session_secret_key')) for field in auth_data_fields: details[field] = response[field] details['extra_data_list'] = fields + auth_data_fields kwargs.update({'backend': self, 'response': details}) else: raise AuthFailed(self, 'Cannot get user details: API error') return self.strategy.authenticate(*args, **kwargs) def get_auth_sig(self): secret_key = self.setting('SECRET') hash_source = '{0:s}{1:s}{2:s}'.format(self.data['logged_user_id'], self.data['session_key'], secret_key) return md5(hash_source.encode('utf-8')).hexdigest() def get_response(self): fields = ('logged_user_id', 'api_server', 'application_key', 'session_key', 'session_secret_key', 'authorized', 'apiconnection') return dict((name, self.data[name]) for name in fields if name in self.data) def verify_auth_sig(self): correct_key = self.get_auth_sig() key = self.data['auth_sig'].lower() if correct_key != key: raise AuthFailed(self, 'Wrong authorization key') def odnoklassniki_oauth_sig(data, client_secret): """ Calculates signature of request data access_token value must be included Algorithm is described at https://apiok.ru/wiki/pages/viewpage.action?pageId=12878032, search for "little bit different way" """ suffix = md5( '{0:s}{1:s}'.format(data['access_token'], client_secret).encode('utf-8') ).hexdigest() check_list = sorted(['{0:s}={1:s}'.format(key, value) for key, value in data.items() if key != 'access_token']) return md5((''.join(check_list) + suffix).encode('utf-8')).hexdigest() def odnoklassniki_iframe_sig(data, client_secret_or_session_secret): """ Calculates signature as described at: https://apiok.ru/wiki/display/ok/Authentication+and+Authorization If API method requires session context, request is signed with session secret key. Otherwise it is signed with application secret key """ param_list = sorted(['{0:s}={1:s}'.format(key, value) for key, value in data.items()]) return md5( (''.join(param_list) + client_secret_or_session_secret).encode('utf-8') ).hexdigest() def odnoklassniki_api(backend, data, api_url, public_key, client_secret, request_type='oauth'): """Calls Odnoklassniki REST API method https://apiok.ru/wiki/display/ok/Odnoklassniki+Rest+API""" data.update({ 'application_key': public_key, 'format': 'JSON' }) if request_type == 'oauth': data['sig'] = odnoklassniki_oauth_sig(data, client_secret) elif request_type == 'iframe_session': data['sig'] = odnoklassniki_iframe_sig(data, data['session_secret_key']) elif request_type == 'iframe_nosession': data['sig'] = odnoklassniki_iframe_sig(data, client_secret) else: msg = 'Unknown request type {0}. How should it be signed?' raise AuthFailed(backend, msg.format(request_type)) return backend.get_json(api_url + 'fb.do', params=data)
apache-2.0
jtoppins/beaker
Server/bkr/server/pools.py
1
18936
# This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. import re from flask import jsonify, request from bkr.server import identity from bkr.server.app import app from bkr.server.model import System, SystemPool, SystemAccessPolicy, \ SystemAccessPolicyRule, User, Group, SystemPermission, Activity from bkr.server.flask_util import auth_required, \ convert_internal_errors, read_json_request, BadRequest400, \ Forbidden403, MethodNotAllowed405, NotFound404, Conflict409, \ UnsupportedMediaType415, request_wants_json, render_tg_template, \ json_collection from bkr.server.util import absolute_url from sqlalchemy.orm import contains_eager from sqlalchemy.orm.exc import NoResultFound from turbogears.database import session from bkr.server.systems import _get_system_by_FQDN, _edit_access_policy_rules import datetime from bkr.server.bexceptions import DatabaseLookupError @app.route('/pools/', methods=['GET']) def get_pools(): """ Returns a pageable JSON collection of system pools in Beaker. Refer to :ref:`pageable-json-collections`. The following fields are supported for filtering and sorting: ``id`` ID of the pool. ``name`` Name of the pool. ``owner.user_name`` Username of the pool owner (if the pool is owned by a user rather than by a group). ``owner.group_name`` Name of the pool's owning group (if the pool is owned by a group rather than by a user). """ query = SystemPool.query.order_by(SystemPool.name) # join User and Group for sorting/filtering and also for eager loading query = query\ .outerjoin(SystemPool.owning_user)\ .options(contains_eager(SystemPool.owning_user))\ .outerjoin(SystemPool.owning_group)\ .options(contains_eager(SystemPool.owning_group)) json_result = json_collection(query, columns={ 'id': SystemPool.id, 'name': SystemPool.name, 'owner.user_name': User.user_name, 'owner.group_name': Group.group_name, }) if request_wants_json(): return jsonify(json_result) return render_tg_template('bkr.server.templates.backgrid', { 'title': u'Pools', 'grid_collection_type': 'SystemPools', 'grid_collection_data': json_result, 'grid_collection_url': request.path, 'grid_view_type': 'PoolsView', 'grid_add_label': 'Create', 'grid_add_view_type': 'PoolCreateModal' if not identity.current.anonymous else 'null', }) _typeahead_split_pattern = re.compile(r'[-\s]+') @app.route('/pools/+typeahead') def pools_typeahead(): if 'q' in request.args: pools = SystemPool.query.filter(SystemPool.name.like('%%%s%%' % request.args['q'])) else: pools = SystemPool.query data = [{'name': pool.name, 'tokens': _typeahead_split_pattern.split(pool.name.strip())} for pool in pools.values(SystemPool.name)] return jsonify(data=data) def _get_pool_by_name(pool_name, lockmode=False): """Get system pool by name, reporting HTTP 404 if the system pool is not found""" try: return SystemPool.by_name(pool_name, lockmode) except NoResultFound: raise NotFound404('System pool %s does not exist' % pool_name) @app.route('/pools/<pool_name>/', methods=['GET']) def get_pool(pool_name): """ Provides detailed information about a system pool in JSON format. :param pool_name: System pool's name. """ pool = _get_pool_by_name(pool_name) if request_wants_json(): return jsonify(pool.__json__()) return render_tg_template('bkr.server.templates.system_pool', { 'title': pool.name, 'system_pool': pool, }) def _get_owner(data): if data is None: data = {} user_name = data.get('user_name') group_name = data.get('group_name') if user_name and group_name: raise Forbidden403('System pool can have either an user or a group as owner') if user_name: owner = User.by_user_name(user_name) if owner is None: raise BadRequest400('No such user %s' % user_name) owner_type = 'user' if group_name: try: owner = Group.by_name(group_name) except NoResultFound: raise BadRequest400('No such group %r' % group_name) owner_type = 'group' return owner, owner_type @app.route('/pools/', methods=['POST']) @auth_required def create_pool(): """ Creates a new system pool in Beaker. The request must be :mimetype:`application/x-www-form-urlencoded` or :mimetype:`application/json`. :jsonparam string name: Name for the system pool. :jsonparam string description: Description of the system pool. :jsonparam object owner: JSON object containing a ``user_name`` key or ``group_name`` key identifying the owner for the system pool. :status 201: The system pool was successfully created. """ owner = None description = None u = identity.current.user if request.json: if 'name' not in request.json: raise BadRequest400('Missing pool name key') new_name = request.json['name'] if 'owner' in request.json: owner = request.json['owner'] if 'description' in request.json: description = request.json['description'] elif request.form: if 'name' not in request.form: raise BadRequest400('Missing pool name parameter') new_name = request.form['name'] if 'owner' in request.form: owner = request.form['owner'] if 'description' in request.form: description = request.form['description'] else: raise UnsupportedMediaType415 with convert_internal_errors(): if SystemPool.query.filter(SystemPool.name == new_name).count() != 0: raise Conflict409('System pool with name %r already exists' % new_name) pool = SystemPool(name=new_name, description=description) session.add(pool) if owner: owner, owner_type = _get_owner(owner) if owner_type == 'user': pool.owning_user = owner else: pool.owning_group = owner else: pool.owning_user = u # new systems pool are visible to everybody by default pool.access_policy = SystemAccessPolicy() pool.access_policy.add_rule(SystemPermission.view, everybody=True) pool.record_activity(user=u, service=u'HTTP', action=u'Created', field=u'Pool', new=unicode(pool)) response = jsonify(pool.__json__()) response.status_code = 201 response.headers.add('Location', absolute_url(pool.href)) return response @app.route('/pools/<pool_name>/', methods=['PATCH']) @auth_required def update_pool(pool_name): """ Updates attributes of an existing system pool. The request body must be a JSON object containing one or more of the following keys. :param pool_name: System pool's name. :jsonparam string name: New name for the system pool. :jsonparam string description: Description of the system pool. :jsonparam object owner: JSON object containing a ``user_name`` key or ``group_name`` key identifying the new owner for the system pool. :status 200: System pool was updated. :status 400: Invalid data was given. """ pool = _get_pool_by_name(pool_name) if not pool.can_edit(identity.current.user): raise Forbidden403('Cannot edit system pool') data = read_json_request(request) # helper for recording activity below def record_activity(field, old, new, action=u'Changed'): pool.record_activity(user=identity.current.user, service=u'HTTP', action=action, field=field, old=old, new=new) with convert_internal_errors(): renamed = False if 'name' in data: new_name = data['name'] if new_name != pool.name: if SystemPool.query.filter(SystemPool.name == new_name).count(): raise Conflict409('System pool %s already exists' % new_name) record_activity(u'Name', pool.name, new_name) pool.name = new_name renamed = True if 'description' in data: new_description = data['description'] if new_description != pool.description: record_activity(u'Description', pool.description, new_description) pool.description = new_description if 'owner' in data: new_owner, owner_type = _get_owner(data['owner']) if owner_type == 'user': pool.change_owner(user=new_owner) else: pool.change_owner(group=new_owner) response = jsonify(pool.__json__()) if renamed: response.headers.add('Location', absolute_url(pool.href)) return response # For compat only. Separate function so that it doesn't appear in the docs. @app.route('/pools/<pool_name>/', methods=['POST']) def update_system_pool_post(pool_name): return update_pool(pool_name) @app.route('/pools/<pool_name>/systems/', methods=['POST']) @auth_required def add_system_to_pool(pool_name): """ Add a system to a system pool :param pool_name: System pool's name. :jsonparam fqdn: System's fully-qualified domain name. """ u = identity.current.user data = read_json_request(request) pool = _get_pool_by_name(pool_name, lockmode='update') if 'fqdn' not in data: raise BadRequest400('System FQDN not specified') try: system = System.by_fqdn(data['fqdn'], u) except DatabaseLookupError: raise BadRequest400("System '%s' does not exist" % data['fqdn']) if not pool in system.pools: if pool.can_edit(u) and system.can_edit(u): system.record_activity(user=u, service=u'HTTP', action=u'Added', field=u'Pool', old=None, new=unicode(pool)) system.pools.append(pool) system.date_modified = datetime.datetime.utcnow() pool.record_activity(user=u, service=u'HTTP', action=u'Added', field=u'System', old=None, new=unicode(system)) else: if not pool.can_edit(u): raise Forbidden403('You do not have permission to ' 'add systems to pool %s' % pool.name) if not system.can_edit(u): raise Forbidden403('You do not have permission to ' 'modify system %s' % system.fqdn) return '', 204 @app.route('/pools/<pool_name>/systems/', methods=['DELETE']) @auth_required def remove_system_from_pool(pool_name): """ Remove a system from a system pool :param pool_name: System pool's name. :queryparam fqdn: System's fully-qualified domain name """ if 'fqdn' not in request.args: raise MethodNotAllowed405 fqdn = request.args['fqdn'] system = _get_system_by_FQDN(fqdn) u = identity.current.user pool = _get_pool_by_name(pool_name, lockmode='update') if pool in system.pools: if pool.can_edit(u) or system.can_edit(u): if system.active_access_policy == pool.access_policy: system.active_access_policy = system.custom_access_policy system.record_activity(user=u, service=u'HTTP', field=u'Active Access Policy', action=u'Changed', old = pool.access_policy, new = system.custom_access_policy) system.pools.remove(pool) system.record_activity(user=u, service=u'HTTP', action=u'Removed', field=u'Pool', old=unicode(pool), new=None) system.date_modified = datetime.datetime.utcnow() pool.record_activity(user=u, service=u'HTTP', action=u'Removed', field=u'System', old=unicode(system), new=None) else: raise Forbidden403('You do not have permission to modify system %s' 'or remove systems from pool %s' % (system.fqdn, pool.name)) else: raise BadRequest400('System %s is not in pool %s' % (system.fqdn, pool.name)) return '', 204 @app.route('/pools/<pool_name>/access-policy/', methods=['GET']) def get_access_policy(pool_name): """ Get access policy for pool :param pool_name: System pool's name. """ pool = _get_pool_by_name(pool_name) rules = pool.access_policy.rules return jsonify({ 'id': pool.access_policy.id, 'rules': [ {'id': rule.id, 'user': rule.user.user_name if rule.user else None, 'group': rule.group.group_name if rule.group else None, 'everybody': rule.everybody, 'permission': unicode(rule.permission)} for rule in rules], 'possible_permissions': [ {'value': unicode(permission), 'label': unicode(permission.label)} for permission in SystemPermission], }) @app.route('/pools/<pool_name>/access-policy/', methods=['POST', 'PUT']) @auth_required def save_access_policy(pool_name): """ Updates the access policy for a system pool. :param pool_name: System pool's name. :jsonparam array rules: List of rules to include in the new policy. This replaces all existing rules in the policy. Each rule is a JSON object with ``user``, ``group``, and ``everybody`` keys. """ pool = _get_pool_by_name(pool_name) if not pool.can_edit_policy(identity.current.user): raise Forbidden403('Cannot edit system pool policy') data = read_json_request(request) _edit_access_policy_rules(pool, pool.access_policy, data['rules']) return jsonify(pool.access_policy.__json__()) @app.route('/pools/<pool_name>/access-policy/rules/', methods=['POST']) @auth_required def add_access_policy_rule(pool_name): """ Adds a new rule to the access policy for a system pool. Each rule in the policy grants a permission to a single user, a group of users, or to everybody. See :ref:`system-access-policies-api` for a description of the expected JSON parameters. :param pool_name: System pool's name. """ pool = _get_pool_by_name(pool_name) if not pool.can_edit_policy(identity.current.user): raise Forbidden403('Cannot edit system pool policy') policy = pool.access_policy rule = read_json_request(request) if rule.get('user', None): user = User.by_user_name(rule['user']) if not user: raise BadRequest400("User '%s' does not exist" % rule['user']) else: user = None if rule.get('group', None): try: group = Group.by_name(rule['group']) except NoResultFound: raise BadRequest400("Group '%s' does not exist" % rule['group']) else: group = None try: permission = SystemPermission.from_string(rule['permission']) except ValueError: raise BadRequest400('Invalid permission') new_rule = policy.add_rule(user=user, group=group, everybody=rule['everybody'], permission=permission) pool.record_activity(user=identity.current.user, service=u'HTTP', field=u'Access Policy Rule', action=u'Added', new=repr(new_rule)) return '', 204 @app.route('/pools/<pool_name>/access-policy/rules/', methods=['DELETE']) @auth_required def delete_access_policy_rules(pool_name): """ Deletes one or more matching rules from a system pool's access policy. See :ref:`system-access-policies-api` for description of the expected query parameters :param pool_name: System pool's name. """ pool = _get_pool_by_name(pool_name) if not pool.can_edit_policy(identity.current.user): raise Forbidden403('Cannot edit system policy') policy = pool.access_policy query = SystemAccessPolicyRule.query.filter(SystemAccessPolicyRule.policy == policy) if 'permission' in request.args: query = query.filter(SystemAccessPolicyRule.permission.in_( request.args.getlist('permission', type=SystemPermission.from_string))) else: raise MethodNotAllowed405 if 'user' in request.args: query = query.join(SystemAccessPolicyRule.user)\ .filter(User.user_name.in_(request.args.getlist('user'))) elif 'group' in request.args: query = query.join(SystemAccessPolicyRule.group)\ .filter(Group.group_name.in_(request.args.getlist('group'))) elif 'everybody' in request.args: query = query.filter(SystemAccessPolicyRule.everybody) else: raise MethodNotAllowed405 for rule in query: rule.record_deletion(service=u'HTTP') session.delete(rule) return '', 204 @app.route('/pools/<pool_name>/', methods=['DELETE']) @auth_required def delete_pool(pool_name): """ Deletes a system pool :param pool_name: System pool's name """ pool = _get_pool_by_name(pool_name, lockmode='update') u = identity.current.user if not pool.can_edit(u): raise Forbidden403('Cannot delete pool %s' % pool_name) systems = System.query.filter(System.pools.contains(pool)) System.record_bulk_activity(systems, user=identity.current.user, service=u'HTTP', action=u'Removed', field=u'Pool', old=unicode(pool), new=None) # Since we are deleting the pool, we will have to change the active # access policy for all systems using the pool's policy to their # custom policy systems = System.query.filter(System.active_access_policy == pool.access_policy) for system in systems: system.active_access_policy = system.custom_access_policy System.record_bulk_activity(systems, user=identity.current.user, service=u'HTTP', field=u'Active Access Policy', action=u'Changed', old = 'Pool policy: %s' % pool_name, new = 'Custom access policy') session.delete(pool) activity = Activity(u, u'HTTP', u'Deleted', u'Pool', pool_name) session.add(activity) return '', 204
gpl-2.0
rdelval/aurora
src/test/python/apache/thermos/core/test_runner_integration.py
14
6126
# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os from textwrap import dedent from apache.thermos.config.schema import Process, Resources, SequentialTask, Task, Tasks from apache.thermos.testing.runner import RunnerTestBase from gen.apache.thermos.ttypes import ProcessState, TaskState class TestRunnerBasic(RunnerTestBase): portmap = {'named_port': 8123} # noqa @classmethod def task(cls): hello_template = Process(cmdline="echo 1") t1 = hello_template(name="t1", cmdline="echo 1 port {{thermos.ports[named_port]}}") t2 = hello_template(name="t2") t3 = hello_template(name="t3") t4 = hello_template(name="t4") t5 = hello_template(name="t5") t6 = hello_template(name="t6") tsk = Task(name="complex", processes=[t1, t2, t3, t4, t5, t6]) # three ways of tasks: t1 t2, t3 t4, t5 t6 tsk = tsk(constraints=[ {'order': ['t1', 't3']}, {'order': ['t1', 't4']}, {'order': ['t2', 't3']}, {'order': ['t2', 't4']}, {'order': ['t3', 't5']}, {'order': ['t3', 't6']}, {'order': ['t4', 't5']}, {'order': ['t4', 't6']}]) return tsk def test_runner_state_success(self): assert self.state.statuses[-1].state == TaskState.SUCCESS def test_runner_header_populated(self): header = self.state.header assert header is not None, 'header should be populated.' assert header.task_id == self.runner.task_id, 'header task id must be set!' assert header.sandbox == os.path.join(self.runner.tempdir, 'sandbox', header.task_id), ( 'header sandbox must be set!') assert header.hostname, 'header task replica id must be set!' assert header.launch_time_ms, 'header launch time must be set' def test_runner_has_allocated_name_ports(self): ports = self.state.header.ports assert 'named_port' in ports, 'ephemeral port was either not allocated, or not checkpointed!' assert ports['named_port'] == 8123 def test_runner_has_expected_processes(self): processes = self.state.processes process_names = set(['t%d' % k for k in range(1, 7)]) actual_process_names = set(processes.keys()) assert process_names == actual_process_names, "runner didn't run expected set of processes!" for process in processes: assert processes[process][-1].process == process def test_runner_processes_have_expected_output(self): for process in self.state.processes: history = self.state.processes[process] assert history[-1].state == ProcessState.SUCCESS if len(history) > 1: for run in range(len(history) - 1): assert history[run].state != ProcessState.SUCCESS, ( "nonterminal processes must not be in SUCCESS state!") def test_runner_processes_have_monotonically_increasing_timestamps(self): for process in self.state.processes: for run in self.state.processes[process]: assert run.fork_time < run.start_time assert run.start_time < run.stop_time class TestConcurrencyBasic(RunnerTestBase): @classmethod def task(cls): hello_template = Process(cmdline="sleep 1") tsk = Task( name="complex", processes=[hello_template(name="process1"), hello_template(name="process2"), hello_template(name="process3")], resources=Resources(cpu=1.0, ram=16 * 1024 * 1024, disk=16 * 1024), max_concurrency=1) return tsk def test_runner_state_success(self): assert self.state.statuses[-1].state == TaskState.SUCCESS # TODO(wickman) This needs a better test. def test_runner_processes_separated_temporally_due_to_concurrency_limit(self): runs = [] for process in self.state.processes: assert len(self.state.processes[process]) == 1, 'Expect one run per task' assert self.state.processes[process][0].state == ProcessState.SUCCESS runs.append(self.state.processes[process][0].start_time) runs.sort() assert runs[1] - runs[0] > 1.0 assert runs[2] - runs[1] > 1.0 class TestRunnerEnvironment(RunnerTestBase): @classmethod def task(cls): setup_bashrc = Process(name="setup_bashrc", cmdline=dedent( """ mkdir -p .profile.d cat <<EOF > .thermos_profile for i in .profile.d/*.sh ; do if [ -r "\\$i" ]; then . \\$i fi done EOF """)) setup_foo = Process(name="setup_foo", cmdline=dedent( """ cat <<EOF > .profile.d/setup_foo.sh export FOO=1 EOF """)) setup_bar = Process(name="setup_bar", cmdline=dedent( """ cat <<EOF > .profile.d/setup_bar.sh export BAR=2 EOF """)) foo_recipe = SequentialTask(processes=[setup_bashrc, setup_foo]) bar_recipe = SequentialTask(processes=[setup_bashrc, setup_bar]) all_recipes = Tasks.combine(foo_recipe, bar_recipe) run = Process(name="run", cmdline=dedent( """ echo $FOO $BAR > expected_output.txt """)) my_task = Task(processes=[run], resources=Resources(cpu=1.0, ram=16 * 1024 * 1024, disk=16 * 1024)) return Tasks.concat(all_recipes, my_task, name="my_task") def test_runner_state_success(self): assert self.state.statuses[-1].state == TaskState.SUCCESS def test_runner_processes_have_expected_output(self): expected_output_file = os.path.join(self.runner.sandbox, self.runner.task_id, 'expected_output.txt') assert os.path.exists(expected_output_file) with open(expected_output_file, 'rb') as fp: assert fp.read().strip() == b"1 2"
apache-2.0
diogommartins/pox
pox/lib/packet/ethernet.py
45
5486
# Copyright 2011,2012,2013 James McCauley # Copyright 2008 (C) Nicira, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This file is derived from the packet library in NOX, which was # developed by Nicira, Inc. #====================================================================== # Ethernet header # #====================================================================== import struct from packet_base import packet_base from packet_utils import ethtype_to_str from pox.lib.addresses import * ETHER_ANY = EthAddr(b"\x00\x00\x00\x00\x00\x00") ETHER_BROADCAST = EthAddr(b"\xff\xff\xff\xff\xff\xff") BRIDGE_GROUP_ADDRESS = EthAddr(b"\x01\x80\xC2\x00\x00\x00") LLDP_MULTICAST = EthAddr(b"\x01\x80\xc2\x00\x00\x0e") PAE_MULTICAST = EthAddr(b'\x01\x80\xc2\x00\x00\x03') # 802.1x Port # Access Entity NDP_MULTICAST = EthAddr(b'\x01\x23\x20\x00\x00\x01') # Nicira discovery # multicast class ethernet(packet_base): "Ethernet packet struct" resolve_names = False MIN_LEN = 14 IP_TYPE = 0x0800 ARP_TYPE = 0x0806 RARP_TYPE = 0x8035 VLAN_TYPE = 0x8100 LLDP_TYPE = 0x88cc PAE_TYPE = 0x888e # 802.1x Port Access Entity #MPLS_UNICAST_TYPE = 0x8847 #MPLS_MULTICAST_TYPE = 0x8848 MPLS_TYPE = 0x8847 MPLS_MC_TYPE = 0x8848 # Multicast IPV6_TYPE = 0x86dd PPP_TYPE = 0x880b LWAPP_TYPE = 0x88bb GSMP_TYPE = 0x880c IPX_TYPE = 0x8137 IPX_TYPE = 0x8137 WOL_TYPE = 0x0842 TRILL_TYPE = 0x22f3 JUMBO_TYPE = 0x8870 SCSI_TYPE = 0x889a ATA_TYPE = 0x88a2 QINQ_TYPE = 0x9100 INVALID_TYPE = 0xffff type_parsers = {} def __init__(self, raw=None, prev=None, **kw): packet_base.__init__(self) if len(ethernet.type_parsers) == 0: from vlan import vlan ethernet.type_parsers[ethernet.VLAN_TYPE] = vlan from arp import arp ethernet.type_parsers[ethernet.ARP_TYPE] = arp ethernet.type_parsers[ethernet.RARP_TYPE] = arp from ipv4 import ipv4 ethernet.type_parsers[ethernet.IP_TYPE] = ipv4 from ipv6 import ipv6 ethernet.type_parsers[ethernet.IPV6_TYPE] = ipv6 from lldp import lldp ethernet.type_parsers[ethernet.LLDP_TYPE] = lldp from eapol import eapol ethernet.type_parsers[ethernet.PAE_TYPE] = eapol from mpls import mpls ethernet.type_parsers[ethernet.MPLS_TYPE] = mpls ethernet.type_parsers[ethernet.MPLS_MC_TYPE] = mpls from llc import llc ethernet._llc = llc self.prev = prev self.dst = ETHER_ANY self.src = ETHER_ANY self.type = 0 self.next = b'' if raw is not None: self.parse(raw) self._init(kw) def parse (self, raw): assert isinstance(raw, bytes) self.next = None # In case of unfinished parsing self.raw = raw alen = len(raw) if alen < ethernet.MIN_LEN: self.msg('warning eth packet data too short to parse header: data len %u' % (alen,)) return self.dst = EthAddr(raw[:6]) self.src = EthAddr(raw[6:12]) self.type = struct.unpack('!H', raw[12:ethernet.MIN_LEN])[0] self.hdr_len = ethernet.MIN_LEN self.payload_len = alen - self.hdr_len self.next = ethernet.parse_next(self, self.type, raw, ethernet.MIN_LEN) self.parsed = True @staticmethod def parse_next (prev, typelen, raw, offset=0, allow_llc=True): parser = ethernet.type_parsers.get(typelen) if parser is not None: return parser(raw[offset:], prev) elif typelen < 1536 and allow_llc: return ethernet._llc(raw[offset:], prev) else: return raw[offset:] @staticmethod def getNameForType (ethertype): """ Returns a string name for a numeric ethertype """ return ethtype_to_str(ethertype) @property def effective_ethertype (self): return self._get_effective_ethertype(self) @staticmethod def _get_effective_ethertype (self): """ Get the "effective" ethertype of a packet. This means that if the payload is something like a VLAN or SNAP header, we want the type from that deeper header. This is kind of ugly here in the packet library, but it should make user code somewhat simpler. """ if not self.parsed: return ethernet.INVALID_TYPE if self.type == ethernet.VLAN_TYPE or type(self.payload) == ethernet._llc: try: return self.payload.effective_ethertype except: return ethernet.INVALID_TYPE return self.type def _to_str(self): s = ''.join(('[',str(EthAddr(self.src)),'>',str(EthAddr(self.dst)),' ', ethernet.getNameForType(self.type),']')) return s def hdr(self, payload): dst = self.dst src = self.src if type(dst) is EthAddr: dst = dst.toRaw() if type(src) is EthAddr: src = src.toRaw() return struct.pack('!6s6sH', dst, src, self.type)
apache-2.0
Shemahmforash/playcert
playcert/lib/event.py
1
1199
import logging from playcert.lib.artist import Artist from playcert.cache import cache_data_in_hash log = logging.getLogger(__name__) class Event(object): def __init__(self, title, when, venue, artist_name=None, redis=None): self.title = title self.when = when self.venue = venue # to use cache in this class (not mandatory) self.redis = redis if artist_name: log.debug( 'No need to find artist name, already have it from eventful %s', artist_name) self.artist = Artist(artist_name, self.redis) else: # finds artist from event title self.artist = self.find_artist() def cache_hash_key(self): return 'text.artist' def cache_key(self): return self.title @cache_data_in_hash def find_artist(self): # finds and creates artist from event title return Artist.create_artist_from_text( self.title, self.venue, self.redis) def __repr__(self): return 'Event(%s, %s, %s, %s)' % \ (self.title.encode('utf-8'), self.when.encode('utf-8'), self.venue.encode('utf-8'), self.artist)
gpl-2.0
sacherjj/array_devices
baud_test.py
1
2376
from __future__ import division import serial import time from array_devices import array3710 __author__ = 'JoeSacher' """ This is a crude script to play with PC baud rates while the load is set to a fixed baud rate. """ load_addr = 1 # This should match load base_baud_rate = 9600 serial_conn = serial.Serial('COM4', base_baud_rate, timeout=1) load = array3710.Load(load_addr, serial_conn) load.remote_control = True serial_conn.close() # Set this to sufficient range to get possible valid connections min_rate = 3500 max_rate = 20000 print("Walking from {} to {} for {}".format(min_rate, max_rate, base_baud_rate)) for baud_rate in xrange(min_rate, max_rate, 100): time.sleep(0.1) serial_conn = serial.Serial('COM4', baud_rate, timeout=0.5) try: load = array3710.Load(load_addr, serial_conn, print_errors=False) except IOError: print("Baud_Rate: {} Error: Can't creating load".format(baud_rate)) else: error_count = 0 for i in range(10): try: load.set_load_resistance(baud_rate/1000) load.update_status(retry_count=1) # print(load.voltage) except IOError: error_count += 1 try: load.load_on = True load.load_on = False time.sleep(0.05) except IOError: error_count += 1 print("Baud_Rate: {} - Errors: {}".format(baud_rate, error_count)) serial_conn.close() serial_conn = serial.Serial('COM4', base_baud_rate, timeout=1) load = array3710.Load(load_addr, serial_conn) load.remote_control = False serial_conn.close() """ Results for both of my loads. I found the multiple baud responses when the load was set at 9600 very interesting. When I get time, I want to scope the wild mismatches and see what is going on. 4800(L1): 4700-5200 (All 0 errors) 4800(L2): 4700-5200 (All 0 errors) 9600(L1): 4000-4600, 5300-6400, 7600-12400, 15100-17400 (All 0 errors) 9600(L2): 4000-4600, 5300-6400, 7600-12400, 15100-17400 (All 0 errors) 19200(L1): 17500-24000 (~30% with 1 error, 1 with 2 errors) 19200(L2): 17500-24000 (~20% with 1 error, 5% with 2 errors) 38400(L1): 35900-44200 (Errors of 1-4 throughout range, pretty evenly spread, 20% with 0 errors) 38400(L2): 35900-44200 (same distribution of errors as L1 at this baud rate) """
mit
virgree/odoo
addons/base_report_designer/plugin/openerp_report_designer/bin/script/Repeatln.py
293
13228
######################################################################### # # Copyright (c) 2003-2004 Danny Brewer [email protected] # Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>). # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # See: http://www.gnu.org/licenses/lgpl.html # ############################################################################# import uno import string import unohelper import xmlrpclib from com.sun.star.task import XJobExecutor if __name__<>"package": from lib.gui import * from lib.error import ErrorDialog from lib.functions import * from ServerParameter import * from lib.logreport import * from lib.rpc import * from LoginTest import * database="test_db1" uid = 3 #class RepeatIn: class RepeatIn( unohelper.Base, XJobExecutor ): def __init__(self, sObject="", sVariable="", sFields="", sDisplayName="", bFromModify=False): # Interface Design LoginTest() self.logobj=Logger() if not loginstatus and __name__=="package": exit(1) self.win = DBModalDialog(60, 50, 180, 250, "RepeatIn Builder") self.win.addFixedText("lblVariable", 2, 12, 60, 15, "Objects to loop on :") self.win.addComboBox("cmbVariable", 180-120-2, 10, 120, 15,True, itemListenerProc=self.cmbVariable_selected) self.insVariable = self.win.getControl( "cmbVariable" ) self.win.addFixedText("lblFields", 10, 32, 60, 15, "Field to loop on :") self.win.addComboListBox("lstFields", 180-120-2, 30, 120, 150, False,itemListenerProc=self.lstbox_selected) self.insField = self.win.getControl( "lstFields" ) self.win.addFixedText("lblName", 12, 187, 60, 15, "Variable name :") self.win.addEdit("txtName", 180-120-2, 185, 120, 15,) self.win.addFixedText("lblUName", 8, 207, 60, 15, "Displayed name :") self.win.addEdit("txtUName", 180-120-2, 205, 120, 15,) self.win.addButton('btnOK',-2 ,-10,45,15,'Ok', actionListenerProc = self.btnOk_clicked ) self.win.addButton('btnCancel',-2 - 45 - 5 ,-10,45,15,'Cancel', actionListenerProc = self.btnCancel_clicked ) global passwd self.password = passwd global url self.sock=RPCSession(url) # Variable Declaration self.sValue=None self.sObj=None self.aSectionList=[] self.sGVariable=sVariable self.sGDisplayName=sDisplayName self.aItemList=[] self.aComponentAdd=[] self.aObjectList=[] self.aListRepeatIn=[] self.aVariableList=[] # Call method to perform Enumration on Report Document EnumDocument(self.aItemList,self.aComponentAdd) # Perform checking that Field-1 and Field - 4 is available or not alos get Combobox # filled if condition is true desktop = getDesktop() doc = desktop.getCurrentComponent() docinfo = doc.getDocumentInfo() # Check weather Field-1 is available if not then exit from application self.sMyHost= "" if not docinfo.getUserFieldValue(3) == "" and not docinfo.getUserFieldValue(0)=="": self.sMyHost= docinfo.getUserFieldValue(0) self.count=0 oParEnum = doc.getTextFields().createEnumeration() while oParEnum.hasMoreElements(): oPar = oParEnum.nextElement() if oPar.supportsService("com.sun.star.text.TextField.DropDown"): self.count += 1 getList(self.aObjectList, self.sMyHost,self.count) cursor = doc.getCurrentController().getViewCursor() text = cursor.getText() tcur = text.createTextCursorByRange(cursor) self.aVariableList.extend( filter( lambda obj: obj[:obj.find(" ")] == "List", self.aObjectList ) ) for i in range(len(self.aItemList)): try: anItem = self.aItemList[i][1] component = self.aComponentAdd[i] if component == "Document": sLVal = anItem[anItem.find(",'") + 2:anItem.find("')")] self.aVariableList.extend( filter( lambda obj: obj[:obj.find("(")] == sLVal, self.aObjectList ) ) if tcur.TextSection: getRecersiveSection(tcur.TextSection,self.aSectionList) if component in self.aSectionList: sLVal = anItem[anItem.find(",'") + 2:anItem.find("')")] self.aVariableList.extend( filter( lambda obj: obj[:obj.find("(")] == sLVal, self.aObjectList ) ) if tcur.TextTable: if not component == "Document" and component[component.rfind(".") + 1:] == tcur.TextTable.Name: VariableScope( tcur, self.aVariableList, self.aObjectList, self.aComponentAdd, self.aItemList, component ) except : import traceback,sys info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback)) self.logobj.log_write('RepeatIn', LOG_ERROR, info) self.bModify=bFromModify if self.bModify==True: if sObject=="": self.insVariable.setText("List of "+docinfo.getUserFieldValue(3)) self.insField.addItem("objects",self.win.getListBoxItemCount("lstFields")) self.win.setEditText("txtName", sVariable) self.win.setEditText("txtUName",sDisplayName) self.sValue= "objects" else: sItem="" for anObject in self.aObjectList: if anObject[:anObject.find("(")] == sObject: sItem = anObject self.insVariable.setText( sItem ) genTree( sItem[sItem.find("(")+1:sItem.find(")")], self.aListRepeatIn, self.insField, self.sMyHost, 2, ending=['one2many','many2many'], recur=['one2many','many2many'] ) self.sValue= self.win.getListBoxItem("lstFields",self.aListRepeatIn.index(sFields)) for var in self.aVariableList: if var[:8] <> 'List of ': self.model_ids = self.sock.execute(database, uid, self.password, 'ir.model' , 'search', [('model','=',var[var.find("(")+1:var.find(")")])]) else: self.model_ids = self.sock.execute(database, uid, self.password, 'ir.model' , 'search', [('model','=',var[8:])]) fields=['name','model'] self.model_res = self.sock.execute(database, uid, self.password, 'ir.model', 'read', self.model_ids,fields) if self.model_res <> []: if var[:8]<>'List of ': self.insVariable.addItem(var[:var.find("(")+1] + self.model_res[0]['name'] + ")" ,self.insVariable.getItemCount()) else: self.insVariable.addItem('List of ' + self.model_res[0]['name'] ,self.insVariable.getItemCount()) else: self.insVariable.addItem(var ,self.insVariable.getItemCount()) self.win.doModalDialog("lstFields",self.sValue) else: ErrorDialog("Please Select Appropriate module" ,"Create new report from: \nOdoo -> Open a New Report") self.win.endExecute() def lstbox_selected(self, oItemEvent): sItem=self.win.getListBoxSelectedItem("lstFields") sMain=self.aListRepeatIn[self.win.getListBoxSelectedItemPos("lstFields")] if self.bModify==True: self.win.setEditText("txtName", self.sGVariable) self.win.setEditText("txtUName",self.sGDisplayName) else: self.win.setEditText("txtName",sMain[sMain.rfind("/")+1:]) self.win.setEditText("txtUName","|-."+sItem[sItem.rfind("/")+1:]+".-|") def cmbVariable_selected(self, oItemEvent): if self.count > 0 : desktop=getDesktop() doc =desktop.getCurrentComponent() docinfo=doc.getDocumentInfo() self.win.removeListBoxItems("lstFields", 0, self.win.getListBoxItemCount("lstFields")) sItem=self.win.getComboBoxText("cmbVariable") for var in self.aVariableList: if var[:8]=='List of ': if var[:8]==sItem[:8]: sItem = var elif var[:var.find("(")+1] == sItem[:sItem.find("(")+1]: sItem = var self.aListRepeatIn=[] data = ( sItem[sItem.rfind(" ") + 1:] == docinfo.getUserFieldValue(3) ) and docinfo.getUserFieldValue(3) or sItem[sItem.find("(")+1:sItem.find(")")] genTree( data, self.aListRepeatIn, self.insField, self.sMyHost, 2, ending=['one2many','many2many'], recur=['one2many','many2many'] ) self.win.selectListBoxItemPos("lstFields", 0, True ) else: sItem=self.win.getComboBoxText("cmbVariable") for var in self.aVariableList: if var[:8]=='List of ' and var[:8] == sItem[:8]: sItem = var if sItem.find(".")==-1: temp=sItem[sItem.rfind("x_"):] else: temp=sItem[sItem.rfind(".")+1:] self.win.setEditText("txtName",temp) self.win.setEditText("txtUName","|-."+temp+".-|") self.insField.addItem("objects",self.win.getListBoxItemCount("lstFields")) self.win.selectListBoxItemPos("lstFields", 0, True ) def btnOk_clicked(self, oActionEvent): desktop=getDesktop() doc = desktop.getCurrentComponent() cursor = doc.getCurrentController().getViewCursor() selectedItem = self.win.getListBoxSelectedItem( "lstFields" ) selectedItemPos = self.win.getListBoxSelectedItemPos( "lstFields" ) txtName = self.win.getEditText( "txtName" ) txtUName = self.win.getEditText( "txtUName" ) if selectedItem != "" and txtName != "" and txtUName != "": sKey=u""+ txtUName if selectedItem == "objects": sValue=u"[[ repeatIn(" + selectedItem + ",'" + txtName + "') ]]" else: sObjName=self.win.getComboBoxText("cmbVariable") sObjName=sObjName[:sObjName.find("(")] sValue=u"[[ repeatIn(" + sObjName + self.aListRepeatIn[selectedItemPos].replace("/",".") + ",'" + txtName +"') ]]" if self.bModify == True: oCurObj = cursor.TextField oCurObj.Items = (sKey,sValue) oCurObj.update() else: oInputList = doc.createInstance("com.sun.star.text.TextField.DropDown") if self.win.getListBoxSelectedItem("lstFields") == "objects": oInputList.Items = (sKey,sValue) doc.Text.insertTextContent(cursor,oInputList,False) else: sValue=u"[[ repeatIn(" + sObjName + self.aListRepeatIn[selectedItemPos].replace("/",".") + ",'" + txtName +"') ]]" if cursor.TextTable==None: oInputList.Items = (sKey,sValue) doc.Text.insertTextContent(cursor,oInputList,False) else: oInputList.Items = (sKey,sValue) widget = ( cursor.TextTable or selectedItem <> 'objects' ) and cursor.TextTable.getCellByName( cursor.Cell.CellName ) or doc.Text widget.insertTextContent(cursor,oInputList,False) self.win.endExecute() else: ErrorDialog("Please fill appropriate data in Object Field or Name field \nor select particular value from the list of fields.") def btnCancel_clicked(self, oActionEvent): self.win.endExecute() if __name__<>"package" and __name__=="__main__": RepeatIn() elif __name__=="package": g_ImplementationHelper = unohelper.ImplementationHelper() g_ImplementationHelper.addImplementation( RepeatIn, "org.openoffice.openerp.report.repeatln", ("com.sun.star.task.Job",),) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
popazerty/blackhole-vuplus
lib/python/Components/MediaPlayer.py
32
3735
from MenuList import MenuList from Tools.Directories import SCOPE_CURRENT_SKIN, resolveFilename from os import path from enigma import eListboxPythonMultiContent, RT_VALIGN_CENTER, gFont, eServiceCenter from Tools.LoadPixmap import LoadPixmap import skin STATE_PLAY = 0 STATE_PAUSE = 1 STATE_STOP = 2 STATE_REWIND = 3 STATE_FORWARD = 4 STATE_NONE = 5 class PlayList(MenuList): def __init__(self, enableWrapAround = False): MenuList.__init__(self, [], enableWrapAround, eListboxPythonMultiContent) font = skin.fonts.get("PlayList", ("Regular", 18, 23)) self.l.setFont(0, gFont(font[0], font[1])) self.l.setItemHeight(font[2]) self.currPlaying = -1 self.oldCurrPlaying = -1 self.serviceHandler = eServiceCenter.getInstance() self.state = STATE_NONE self.icons = [ LoadPixmap(resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/icons/ico_mp_play.png")), LoadPixmap(resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/icons/ico_mp_pause.png")), LoadPixmap(resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/icons/ico_mp_stop.png")), LoadPixmap(resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/icons/ico_mp_rewind.png")), LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/icons/ico_mp_forward.png")), ] def PlaylistEntryComponent(self, serviceref, state): res = [ serviceref ] text = serviceref.getName() if text is "": text = path.split(serviceref.getPath().split('/')[-1])[1] x, y, w, h = skin.parameters.get("PlayListName",(25, 1, 470, 22)) res.append((eListboxPythonMultiContent.TYPE_TEXT, x, y, w, h, 0, RT_VALIGN_CENTER, text)) try: png = self.icons[state] x, y, w, h = skin.parameters.get("PlayListIcon",(5, 3, 16, 16)) res.append((eListboxPythonMultiContent.TYPE_PIXMAP_ALPHATEST, x, y, w, h, png)) except: pass return res def clear(self): del self.list[:] self.l.setList(self.list) self.currPlaying = -1 self.oldCurrPlaying = -1 def getSelection(self): return self.l.getCurrentSelection() and self.l.getCurrentSelection()[0] def addFile(self, serviceref): self.list.append(self.PlaylistEntryComponent(serviceref, STATE_NONE)) def updateFile(self, index, newserviceref): if index < len(self.list): self.list[index] = self.PlaylistEntryComponent(newserviceref, STATE_NONE) def deleteFile(self, index): if self.currPlaying >= index: self.currPlaying -= 1 del self.list[index] def setCurrentPlaying(self, index): self.oldCurrPlaying = self.currPlaying self.currPlaying = index self.moveToIndex(index) def updateState(self, state): self.state = state if len(self.list) > self.oldCurrPlaying and self.oldCurrPlaying != -1: self.list[self.oldCurrPlaying] = self.PlaylistEntryComponent(self.list[self.oldCurrPlaying][0], STATE_NONE) if self.currPlaying != -1 and self.currPlaying < len(self.list): self.list[self.currPlaying] = self.PlaylistEntryComponent(self.list[self.currPlaying][0], state) self.updateList() def isStopped(self): return self.state in (STATE_STOP, STATE_NONE) def playFile(self): self.updateState(STATE_PLAY) def pauseFile(self): self.updateState(STATE_PAUSE) def stopFile(self): self.updateState(STATE_STOP) def rewindFile(self): self.updateState(STATE_REWIND) def forwardFile(self): self.updateState(STATE_FORWARD) def updateList(self): self.l.setList(self.list) def getCurrentIndex(self): return self.currPlaying def getCurrentEvent(self): l = self.l.getCurrentSelection() return l and self.serviceHandler.info(l[0]).getEvent(l[0]) def getCurrent(self): l = self.l.getCurrentSelection() return l and l[0] def getServiceRefList(self): return [ x[0] for x in self.list ] def __len__(self): return len(self.list)
gpl-2.0
hogarthj/ansible
test/units/modules/network/edgeos/test_edgeos_command.py
39
4249
# (c) 2018 Red Hat Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) __metaclass__ = type import json from ansible.compat.tests.mock import patch from ansible.modules.network.edgeos import edgeos_command from units.modules.utils import set_module_args from .edgeos_module import TestEdgeosModule, load_fixture class TestEdgeosCommandModule(TestEdgeosModule): module = edgeos_command def setUp(self): super(TestEdgeosCommandModule, self).setUp() self.mock_run_commands = patch('ansible.modules.network.edgeos.edgeos_command.run_commands') self.run_commands = self.mock_run_commands.start() def tearDown(self): super(TestEdgeosCommandModule, self).tearDown() self.mock_run_commands.stop() def load_fixtures(self, commands=None): def load_from_file(*args, **kwargs): module, commands = args output = list() for item in commands: try: obj = json.loads(item) command = obj['command'] except (ValueError, TypeError): command = item['command'] filename = str(command).replace(' ', '_') output.append(load_fixture(filename)) return output self.run_commands.side_effect = load_from_file def test_edgeos_command_simple(self): set_module_args(dict(commands=['show version'])) result = self.execute_module() self.assertEqual(len(result['stdout']), 1) self.assertTrue(result['stdout'][0].startswith('Version: v1.9.7')) def test_edgeos_command_multiple(self): set_module_args(dict(commands=['show version', 'show version'])) result = self.execute_module() self.assertEqual(len(result['stdout']), 2) self.assertTrue(result['stdout'][0].startswith('Version: v1.9.7')) def test_edgeos_commond_wait_for(self): wait_for = 'result[0] contains "Ubiquiti Networks"' set_module_args(dict(commands=['show version'], wait_for=wait_for)) self.execute_module() def test_edgeos_command_wait_for_fails(self): wait_for = 'result[0] contains "bad string"' set_module_args(dict(commands=['show version'], wait_for=wait_for)) self.execute_module(failed=True) self.assertEqual(self.run_commands.call_count, 10) def test_edgeos_command_retries(self): wait_for = 'result[0] contains "bad string"' set_module_args(dict(commands=['show version'], wait_for=wait_for, retries=2)) self.execute_module(failed=True) self.assertEqual(self.run_commands.call_count, 2) def test_edgeos_command_match_any(self): wait_for = ['result[0] contains "Ubiquiti Networks"', 'result[0] contains "bad string"'] set_module_args(dict(commands=['show version'], wait_for=wait_for, match='any')) self.execute_module() def test_edgeos_command_match_all(self): wait_for = ['result[0] contains "Ubiquiti Networks"', 'result[0] contains "EdgeRouter"'] set_module_args(dict(commands=['show version'], wait_for=wait_for, match='all')) self.execute_module() def test_vyos_command_match_all_failure(self): wait_for = ['result[0] contains "Ubiquiti Networks"', 'result[0] contains "bad string"'] commands = ['show version', 'show version'] set_module_args(dict(commands=commands, wait_for=wait_for, match='all')) self.execute_module(failed=True)
gpl-3.0
liberorbis/libernext
apps/frappe/frappe/__init__.py
3
19151
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt """ globals attached to frappe module + some utility functions that should probably be moved """ from __future__ import unicode_literals from werkzeug.local import Local, release_local import os, importlib, inspect, logging, json # public from frappe.__version__ import __version__ from .exceptions import * from .utils.jinja import get_jenv, get_template, render_template local = Local() class _dict(dict): """dict like object that exposes keys as attributes""" def __getattr__(self, key): ret = self.get(key) if not ret and key.startswith("__"): raise AttributeError() return ret def __setattr__(self, key, value): self[key] = value def __getstate__(self): return self def __setstate__(self, d): self.update(d) def update(self, d): """update and return self -- the missing dict feature in python""" super(_dict, self).update(d) return self def copy(self): return _dict(dict(self).copy()) def _(msg): """translate object in current lang, if exists""" if local.lang == "en": return msg from frappe.translate import get_full_dict return get_full_dict(local.lang).get(msg, msg) def get_lang_dict(fortype, name=None): if local.lang=="en": return {} from frappe.translate import get_dict return get_dict(fortype, name) def set_user_lang(user, user_language=None): from frappe.translate import get_user_lang local.lang = get_user_lang(user) # local-globals db = local("db") conf = local("conf") form = form_dict = local("form_dict") request = local("request") request_method = local("request_method") response = local("response") session = local("session") user = local("user") flags = local("flags") error_log = local("error_log") debug_log = local("debug_log") message_log = local("message_log") lang = local("lang") def init(site, sites_path=None): if getattr(local, "initialised", None): return if not sites_path: sites_path = '.' local.error_log = [] local.message_log = [] local.debug_log = [] local.flags = _dict({}) local.rollback_observers = [] local.test_objects = {} local.site = site local.sites_path = sites_path local.site_path = os.path.join(sites_path, site) local.request_method = request.method if request else None local.request_ip = None local.response = _dict({"docs":[]}) local.conf = _dict(get_site_config()) local.lang = local.conf.lang or "en" local.module_app = None local.app_modules = None local.user = None local.role_permissions = {} local.jenv = None local.jloader =None local.cache = {} setup_module_map() local.initialised = True def connect(site=None, db_name=None): from database import Database if site: init(site) local.db = Database(user=db_name or local.conf.db_name) local.form_dict = _dict() local.session = _dict() set_user("Administrator") def get_site_config(sites_path=None, site_path=None): config = {} sites_path = sites_path or getattr(local, "sites_path", None) site_path = site_path or getattr(local, "site_path", None) if sites_path: common_site_config = os.path.join(sites_path, "common_site_config.json") if os.path.exists(common_site_config): config.update(get_file_json(common_site_config)) if site_path: site_config = os.path.join(site_path, "site_config.json") if os.path.exists(site_config): config.update(get_file_json(site_config)) return _dict(config) def destroy(): """closes connection and releases werkzeug local""" if db: db.close() release_local(local) _memc = None # memcache def cache(): global _memc if not _memc: from frappe.memc import MClient _memc = MClient(['localhost:11211']) return _memc def get_traceback(): import utils return utils.get_traceback() def errprint(msg): from utils import cstr if not request or (not "cmd" in local.form_dict): print cstr(msg) error_log.append(cstr(msg)) def log(msg): if not request: if conf.get("logging") or False: print repr(msg) from utils import cstr debug_log.append(cstr(msg)) def msgprint(msg, small=0, raise_exception=0, as_table=False): def _raise_exception(): if raise_exception: if flags.rollback_on_exception: db.rollback() import inspect if inspect.isclass(raise_exception) and issubclass(raise_exception, Exception): raise raise_exception, msg else: raise ValidationError, msg if flags.mute_messages: _raise_exception() return from utils import cstr if as_table and type(msg) in (list, tuple): msg = '<table border="1px" style="border-collapse: collapse" cellpadding="2px">' + ''.join(['<tr>'+''.join(['<td>%s</td>' % c for c in r])+'</tr>' for r in msg]) + '</table>' if flags.print_messages: print "Message: " + repr(msg) message_log.append((small and '__small:' or '')+cstr(msg or '')) _raise_exception() def throw(msg, exc=ValidationError): msgprint(msg, raise_exception=exc) def create_folder(path, with_init=False): from frappe.utils import touch_file if not os.path.exists(path): os.makedirs(path) if with_init: touch_file(os.path.join(path, "__init__.py")) def set_user(username): from frappe.utils.user import User local.session.user = username local.session.sid = username local.cache = {} local.form_dict = _dict() local.jenv = None local.session.data = {} local.user = User(username) local.role_permissions = {} def get_request_header(key, default=None): return request.headers.get(key, default) def sendmail(recipients=(), sender="", subject="No Subject", message="No Message", as_markdown=False, bulk=False, ref_doctype=None, ref_docname=None, add_unsubscribe_link=False, attachments=None): if bulk: import frappe.utils.email_lib.bulk frappe.utils.email_lib.bulk.send(recipients=recipients, sender=sender, subject=subject, message=message, ref_doctype = ref_doctype, ref_docname = ref_docname, add_unsubscribe_link=add_unsubscribe_link, attachments=attachments) else: import frappe.utils.email_lib if as_markdown: frappe.utils.email_lib.sendmail_md(recipients, sender=sender, subject=subject, msg=message, attachments=attachments) else: frappe.utils.email_lib.sendmail(recipients, sender=sender, subject=subject, msg=message, attachments=attachments) logger = None whitelisted = [] guest_methods = [] def whitelist(allow_guest=False): """ decorator for whitelisting a function Note: if the function is allowed to be accessed by a guest user, it must explicitly be marked as allow_guest=True for specific roles, set allow_roles = ['Administrator'] etc. """ def innerfn(fn): global whitelisted, guest_methods whitelisted.append(fn) if allow_guest: guest_methods.append(fn) return fn return innerfn def only_for(roles): if not isinstance(roles, (tuple, list)): roles = (roles,) roles = set(roles) myroles = set(get_roles()) if not roles.intersection(myroles): raise PermissionError def clear_cache(user=None, doctype=None): """clear cache""" import frappe.sessions if doctype: import frappe.model.meta frappe.model.meta.clear_cache(doctype) reset_metadata_version() elif user: frappe.sessions.clear_cache(user) else: # everything import translate frappe.sessions.clear_cache() translate.clear_cache() reset_metadata_version() for fn in frappe.get_hooks("clear_cache"): get_attr(fn)() frappe.local.role_permissions = {} def get_roles(username=None): if not local.session: return ["Guest"] return get_user(username).get_roles() def get_user(username): from frappe.utils.user import User if not username or username == local.session.user: return local.user else: return User(username) def has_permission(doctype, ptype="read", doc=None, user=None): import frappe.permissions return frappe.permissions.has_permission(doctype, ptype, doc, user=user) def is_table(doctype): tables = cache().get_value("is_table") if tables==None: tables = db.sql_list("select name from tabDocType where ifnull(istable,0)=1") cache().set_value("is_table", tables) return doctype in tables def clear_perms(doctype): db.sql("""delete from tabDocPerm where parent=%s""", doctype) def reset_perms(doctype): from frappe.core.doctype.notification_count.notification_count import delete_notification_count_for delete_notification_count_for(doctype) clear_perms(doctype) reload_doc(db.get_value("DocType", doctype, "module"), "DocType", doctype, force=True) def generate_hash(txt=None): """Generates random hash for session id""" import hashlib, time from .utils import random_string return hashlib.sha224((txt or "") + repr(time.time()) + repr(random_string(8))).hexdigest() def reset_metadata_version(): v = generate_hash() cache().set_value("metadata_version", v) return v def new_doc(doctype, parent_doc=None, parentfield=None): from frappe.model.create_new import get_new_doc return get_new_doc(doctype, parent_doc, parentfield) def set_value(doctype, docname, fieldname, value): import frappe.client return frappe.client.set_value(doctype, docname, fieldname, value) def get_doc(arg1, arg2=None): import frappe.model.document return frappe.model.document.get_doc(arg1, arg2) def get_meta(doctype, cached=True): import frappe.model.meta return frappe.model.meta.get_meta(doctype, cached=cached) def delete_doc(doctype=None, name=None, force=0, ignore_doctypes=None, for_reload=False, ignore_permissions=False): import frappe.model.delete_doc frappe.model.delete_doc.delete_doc(doctype, name, force, ignore_doctypes, for_reload, ignore_permissions) def delete_doc_if_exists(doctype, name): if db.exists(doctype, name): delete_doc(doctype, name) def reload_doc(module, dt=None, dn=None, force=False): import frappe.modules return frappe.modules.reload_doc(module, dt, dn, force=force) def rename_doc(doctype, old, new, debug=0, force=False, merge=False, ignore_permissions=False): from frappe.model.rename_doc import rename_doc return rename_doc(doctype, old, new, force=force, merge=merge, ignore_permissions=ignore_permissions) def insert(doclist): import frappe.model return frappe.model.insert(doclist) def get_module(modulename): return importlib.import_module(modulename) def scrub(txt): return txt.replace(' ','_').replace('-', '_').lower() def unscrub(txt): return txt.replace('_',' ').replace('-', ' ').title() def get_module_path(module, *joins): module = scrub(module) return get_pymodule_path(local.module_app[module] + "." + module, *joins) def get_app_path(app_name, *joins): return get_pymodule_path(app_name, *joins) def get_site_path(*joins): return os.path.join(local.site_path, *joins) def get_pymodule_path(modulename, *joins): joins = [scrub(part) for part in joins] return os.path.join(os.path.dirname(get_module(scrub(modulename)).__file__), *joins) def get_module_list(app_name): return get_file_items(os.path.join(os.path.dirname(get_module(app_name).__file__), "modules.txt")) def get_all_apps(with_frappe=False, with_internal_apps=True, sites_path=None): if not sites_path: sites_path = local.sites_path apps = get_file_items(os.path.join(sites_path, "apps.txt"), raise_not_found=True) if with_internal_apps: apps.extend(get_file_items(os.path.join(local.site_path, "apps.txt"))) if with_frappe: apps.insert(0, 'frappe') return apps def get_installed_apps(): if getattr(flags, "in_install_db", True): return [] installed = json.loads(db.get_global("installed_apps") or "[]") return installed @whitelist() def get_versions(): versions = {} for app in get_installed_apps(): versions[app] = { "title": get_hooks("app_title", app_name=app), "description": get_hooks("app_description", app_name=app) } try: versions[app]["version"] = get_attr(app + ".__version__") except AttributeError: versions[app]["version"] = '0.0.1' return versions def get_hooks(hook=None, default=None, app_name=None): def load_app_hooks(app_name=None): hooks = {} for app in [app_name] if app_name else get_installed_apps(): app = "frappe" if app=="webnotes" else app app_hooks = get_module(app + ".hooks") for key in dir(app_hooks): if not key.startswith("_"): append_hook(hooks, key, getattr(app_hooks, key)) return hooks def append_hook(target, key, value): if isinstance(value, dict): target.setdefault(key, {}) for inkey in value: append_hook(target[key], inkey, value[inkey]) else: append_to_list(target, key, value) def append_to_list(target, key, value): target.setdefault(key, []) if not isinstance(value, list): value = [value] target[key].extend(value) if app_name: hooks = _dict(load_app_hooks(app_name)) else: hooks = _dict(cache().get_value("app_hooks", load_app_hooks)) if hook: return hooks.get(hook) or (default if default is not None else []) else: return hooks def setup_module_map(): _cache = cache() if conf.db_name: local.app_modules = _cache.get_value("app_modules") local.module_app = _cache.get_value("module_app") if not local.app_modules: local.module_app, local.app_modules = {}, {} for app in get_all_apps(True): if app=="webnotes": app="frappe" local.app_modules.setdefault(app, []) for module in get_module_list(app): module = scrub(module) local.module_app[module] = app local.app_modules[app].append(module) if conf.db_name: _cache.set_value("app_modules", local.app_modules) _cache.set_value("module_app", local.module_app) def get_file_items(path, raise_not_found=False, ignore_empty_lines=True): import frappe.utils content = read_file(path, raise_not_found=raise_not_found) if content: content = frappe.utils.strip(content) return [p.strip() for p in content.splitlines() if (not ignore_empty_lines) or (p.strip() and not p.startswith("#"))] else: return [] def get_file_json(path): with open(path, 'r') as f: return json.load(f) def read_file(path, raise_not_found=False): from frappe.utils import cstr if os.path.exists(path): with open(path, "r") as f: return cstr(f.read()) elif raise_not_found: raise IOError("{} Not Found".format(path)) else: return None def get_attr(method_string): modulename = '.'.join(method_string.split('.')[:-1]) methodname = method_string.split('.')[-1] return getattr(get_module(modulename), methodname) def call(fn, *args, **kwargs): if hasattr(fn, 'fnargs'): fnargs = fn.fnargs else: fnargs, varargs, varkw, defaults = inspect.getargspec(fn) newargs = {} for a in fnargs: if a in kwargs: newargs[a] = kwargs.get(a) return fn(*args, **newargs) def make_property_setter(args, ignore_validate=False, validate_fields_for_doctype=True): args = _dict(args) ps = get_doc({ 'doctype': "Property Setter", 'doctype_or_field': args.doctype_or_field or "DocField", 'doc_type': args.doctype, 'field_name': args.fieldname, 'property': args.property, 'value': args.value, 'property_type': args.property_type or "Data", '__islocal': 1 }) ps.ignore_validate = ignore_validate ps.validate_fields_for_doctype = validate_fields_for_doctype ps.insert() def import_doc(path, ignore_links=False, ignore_insert=False, insert=False): from frappe.core.page.data_import_tool import data_import_tool data_import_tool.import_doc(path, ignore_links=ignore_links, ignore_insert=ignore_insert, insert=insert) def copy_doc(doc, ignore_no_copy=True): """ No_copy fields also get copied.""" import copy def remove_no_copy_fields(d): for df in d.meta.get("fields", {"no_copy": 1}): if hasattr(d, df.fieldname): d.set(df.fieldname, None) if not isinstance(doc, dict): d = doc.as_dict() else: d = doc newdoc = get_doc(copy.deepcopy(d)) newdoc.name = None newdoc.set("__islocal", 1) newdoc.owner = None newdoc.creation = None newdoc.amended_from = None newdoc.amendment_date = None if not ignore_no_copy: remove_no_copy_fields(newdoc) for d in newdoc.get_all_children(): d.name = None d.parent = None d.set("__islocal", 1) d.owner = None d.creation = None if not ignore_no_copy: remove_no_copy_fields(d) return newdoc def compare(val1, condition, val2): import frappe.utils return frappe.utils.compare(val1, condition, val2) def respond_as_web_page(title, html, success=None, http_status_code=None): local.message_title = title local.message = html local.message_success = success local.response['type'] = 'page' local.response['page_name'] = 'message' if http_status_code: local.response['http_status_code'] = http_status_code def build_match_conditions(doctype, as_condition=True): import frappe.widgets.reportview return frappe.widgets.reportview.build_match_conditions(doctype, as_condition) def get_list(doctype, filters=None, fields=None, or_filters=None, docstatus=None, group_by=None, order_by=None, limit_start=0, limit_page_length=None, as_list=False, debug=False, ignore_permissions=False, user=None): import frappe.model.db_query return frappe.model.db_query.DatabaseQuery(doctype).execute(filters=filters, fields=fields, docstatus=docstatus, or_filters=or_filters, group_by=group_by, order_by=order_by, limit_start=limit_start, limit_page_length=limit_page_length, as_list=as_list, debug=debug, ignore_permissions=ignore_permissions, user=user) def get_all(doctype, **args): args["ignore_permissions"] = True return get_list(doctype, **args) run_query = get_list def add_version(doc): get_doc({ "doctype": "Version", "ref_doctype": doc.doctype, "docname": doc.name, "doclist_json": json.dumps(doc.as_dict(), indent=1, sort_keys=True) }).insert(ignore_permissions=True) def get_test_records(doctype): from frappe.modules import get_doctype_module, get_module_path path = os.path.join(get_module_path(get_doctype_module(doctype)), "doctype", scrub(doctype), "test_records.json") if os.path.exists(path): with open(path, "r") as f: return json.loads(f.read()) else: return [] def format_value(value, df, doc=None, currency=None): import frappe.utils.formatters return frappe.utils.formatters.format_value(value, df, doc, currency=currency) def get_print_format(doctype, name, print_format=None, style=None, as_pdf=False): from frappe.website.render import build_page from frappe.utils.pdf import get_pdf local.form_dict.doctype = doctype local.form_dict.name = name local.form_dict.format = print_format local.form_dict.style = style html = build_page("print") if as_pdf: return get_pdf(html) else: return html def attach_print(doctype, name, file_name): from frappe.utils import scrub_urls print_settings = db.get_singles_dict("Print Settings") if int(print_settings.send_print_as_pdf or 0): return { "fname": file_name + ".pdf", "fcontent": get_print_format(doctype, name, as_pdf=True) } else: return { "fname": file_name + ".html", "fcontent": scrub_urls(get_print_format(doctype, name)).encode("utf-8") } logging_setup_complete = False def get_logger(module=None): from frappe.setup_logging import setup_logging global logging_setup_complete if not logging_setup_complete: setup_logging() logging_setup_complete = True return logging.getLogger(module or "frappe")
gpl-2.0
bayusantoso/final-assignment-web-ontology
IMPLEMENTATION/Application/SourceCode/GOApps/flask/Lib/encodings/cp862.py
272
33370
""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP862.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_map) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_map)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='cp862', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Map decoding_map = codecs.make_identity_dict(range(256)) decoding_map.update({ 0x0080: 0x05d0, # HEBREW LETTER ALEF 0x0081: 0x05d1, # HEBREW LETTER BET 0x0082: 0x05d2, # HEBREW LETTER GIMEL 0x0083: 0x05d3, # HEBREW LETTER DALET 0x0084: 0x05d4, # HEBREW LETTER HE 0x0085: 0x05d5, # HEBREW LETTER VAV 0x0086: 0x05d6, # HEBREW LETTER ZAYIN 0x0087: 0x05d7, # HEBREW LETTER HET 0x0088: 0x05d8, # HEBREW LETTER TET 0x0089: 0x05d9, # HEBREW LETTER YOD 0x008a: 0x05da, # HEBREW LETTER FINAL KAF 0x008b: 0x05db, # HEBREW LETTER KAF 0x008c: 0x05dc, # HEBREW LETTER LAMED 0x008d: 0x05dd, # HEBREW LETTER FINAL MEM 0x008e: 0x05de, # HEBREW LETTER MEM 0x008f: 0x05df, # HEBREW LETTER FINAL NUN 0x0090: 0x05e0, # HEBREW LETTER NUN 0x0091: 0x05e1, # HEBREW LETTER SAMEKH 0x0092: 0x05e2, # HEBREW LETTER AYIN 0x0093: 0x05e3, # HEBREW LETTER FINAL PE 0x0094: 0x05e4, # HEBREW LETTER PE 0x0095: 0x05e5, # HEBREW LETTER FINAL TSADI 0x0096: 0x05e6, # HEBREW LETTER TSADI 0x0097: 0x05e7, # HEBREW LETTER QOF 0x0098: 0x05e8, # HEBREW LETTER RESH 0x0099: 0x05e9, # HEBREW LETTER SHIN 0x009a: 0x05ea, # HEBREW LETTER TAV 0x009b: 0x00a2, # CENT SIGN 0x009c: 0x00a3, # POUND SIGN 0x009d: 0x00a5, # YEN SIGN 0x009e: 0x20a7, # PESETA SIGN 0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK 0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE 0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE 0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE 0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE 0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE 0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE 0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR 0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR 0x00a8: 0x00bf, # INVERTED QUESTION MARK 0x00a9: 0x2310, # REVERSED NOT SIGN 0x00aa: 0x00ac, # NOT SIGN 0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF 0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER 0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00b0: 0x2591, # LIGHT SHADE 0x00b1: 0x2592, # MEDIUM SHADE 0x00b2: 0x2593, # DARK SHADE 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT 0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE 0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE 0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE 0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT 0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE 0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL 0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE 0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL 0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE 0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE 0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE 0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE 0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE 0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE 0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE 0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE 0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE 0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT 0x00db: 0x2588, # FULL BLOCK 0x00dc: 0x2584, # LOWER HALF BLOCK 0x00dd: 0x258c, # LEFT HALF BLOCK 0x00de: 0x2590, # RIGHT HALF BLOCK 0x00df: 0x2580, # UPPER HALF BLOCK 0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA 0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S (GERMAN) 0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA 0x00e3: 0x03c0, # GREEK SMALL LETTER PI 0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA 0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA 0x00e6: 0x00b5, # MICRO SIGN 0x00e7: 0x03c4, # GREEK SMALL LETTER TAU 0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI 0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA 0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA 0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA 0x00ec: 0x221e, # INFINITY 0x00ed: 0x03c6, # GREEK SMALL LETTER PHI 0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON 0x00ef: 0x2229, # INTERSECTION 0x00f0: 0x2261, # IDENTICAL TO 0x00f1: 0x00b1, # PLUS-MINUS SIGN 0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO 0x00f3: 0x2264, # LESS-THAN OR EQUAL TO 0x00f4: 0x2320, # TOP HALF INTEGRAL 0x00f5: 0x2321, # BOTTOM HALF INTEGRAL 0x00f6: 0x00f7, # DIVISION SIGN 0x00f7: 0x2248, # ALMOST EQUAL TO 0x00f8: 0x00b0, # DEGREE SIGN 0x00f9: 0x2219, # BULLET OPERATOR 0x00fa: 0x00b7, # MIDDLE DOT 0x00fb: 0x221a, # SQUARE ROOT 0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N 0x00fd: 0x00b2, # SUPERSCRIPT TWO 0x00fe: 0x25a0, # BLACK SQUARE 0x00ff: 0x00a0, # NO-BREAK SPACE }) ### Decoding Table decoding_table = ( '\x00' # 0x0000 -> NULL '\x01' # 0x0001 -> START OF HEADING '\x02' # 0x0002 -> START OF TEXT '\x03' # 0x0003 -> END OF TEXT '\x04' # 0x0004 -> END OF TRANSMISSION '\x05' # 0x0005 -> ENQUIRY '\x06' # 0x0006 -> ACKNOWLEDGE '\x07' # 0x0007 -> BELL '\x08' # 0x0008 -> BACKSPACE '\t' # 0x0009 -> HORIZONTAL TABULATION '\n' # 0x000a -> LINE FEED '\x0b' # 0x000b -> VERTICAL TABULATION '\x0c' # 0x000c -> FORM FEED '\r' # 0x000d -> CARRIAGE RETURN '\x0e' # 0x000e -> SHIFT OUT '\x0f' # 0x000f -> SHIFT IN '\x10' # 0x0010 -> DATA LINK ESCAPE '\x11' # 0x0011 -> DEVICE CONTROL ONE '\x12' # 0x0012 -> DEVICE CONTROL TWO '\x13' # 0x0013 -> DEVICE CONTROL THREE '\x14' # 0x0014 -> DEVICE CONTROL FOUR '\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE '\x16' # 0x0016 -> SYNCHRONOUS IDLE '\x17' # 0x0017 -> END OF TRANSMISSION BLOCK '\x18' # 0x0018 -> CANCEL '\x19' # 0x0019 -> END OF MEDIUM '\x1a' # 0x001a -> SUBSTITUTE '\x1b' # 0x001b -> ESCAPE '\x1c' # 0x001c -> FILE SEPARATOR '\x1d' # 0x001d -> GROUP SEPARATOR '\x1e' # 0x001e -> RECORD SEPARATOR '\x1f' # 0x001f -> UNIT SEPARATOR ' ' # 0x0020 -> SPACE '!' # 0x0021 -> EXCLAMATION MARK '"' # 0x0022 -> QUOTATION MARK '#' # 0x0023 -> NUMBER SIGN '$' # 0x0024 -> DOLLAR SIGN '%' # 0x0025 -> PERCENT SIGN '&' # 0x0026 -> AMPERSAND "'" # 0x0027 -> APOSTROPHE '(' # 0x0028 -> LEFT PARENTHESIS ')' # 0x0029 -> RIGHT PARENTHESIS '*' # 0x002a -> ASTERISK '+' # 0x002b -> PLUS SIGN ',' # 0x002c -> COMMA '-' # 0x002d -> HYPHEN-MINUS '.' # 0x002e -> FULL STOP '/' # 0x002f -> SOLIDUS '0' # 0x0030 -> DIGIT ZERO '1' # 0x0031 -> DIGIT ONE '2' # 0x0032 -> DIGIT TWO '3' # 0x0033 -> DIGIT THREE '4' # 0x0034 -> DIGIT FOUR '5' # 0x0035 -> DIGIT FIVE '6' # 0x0036 -> DIGIT SIX '7' # 0x0037 -> DIGIT SEVEN '8' # 0x0038 -> DIGIT EIGHT '9' # 0x0039 -> DIGIT NINE ':' # 0x003a -> COLON ';' # 0x003b -> SEMICOLON '<' # 0x003c -> LESS-THAN SIGN '=' # 0x003d -> EQUALS SIGN '>' # 0x003e -> GREATER-THAN SIGN '?' # 0x003f -> QUESTION MARK '@' # 0x0040 -> COMMERCIAL AT 'A' # 0x0041 -> LATIN CAPITAL LETTER A 'B' # 0x0042 -> LATIN CAPITAL LETTER B 'C' # 0x0043 -> LATIN CAPITAL LETTER C 'D' # 0x0044 -> LATIN CAPITAL LETTER D 'E' # 0x0045 -> LATIN CAPITAL LETTER E 'F' # 0x0046 -> LATIN CAPITAL LETTER F 'G' # 0x0047 -> LATIN CAPITAL LETTER G 'H' # 0x0048 -> LATIN CAPITAL LETTER H 'I' # 0x0049 -> LATIN CAPITAL LETTER I 'J' # 0x004a -> LATIN CAPITAL LETTER J 'K' # 0x004b -> LATIN CAPITAL LETTER K 'L' # 0x004c -> LATIN CAPITAL LETTER L 'M' # 0x004d -> LATIN CAPITAL LETTER M 'N' # 0x004e -> LATIN CAPITAL LETTER N 'O' # 0x004f -> LATIN CAPITAL LETTER O 'P' # 0x0050 -> LATIN CAPITAL LETTER P 'Q' # 0x0051 -> LATIN CAPITAL LETTER Q 'R' # 0x0052 -> LATIN CAPITAL LETTER R 'S' # 0x0053 -> LATIN CAPITAL LETTER S 'T' # 0x0054 -> LATIN CAPITAL LETTER T 'U' # 0x0055 -> LATIN CAPITAL LETTER U 'V' # 0x0056 -> LATIN CAPITAL LETTER V 'W' # 0x0057 -> LATIN CAPITAL LETTER W 'X' # 0x0058 -> LATIN CAPITAL LETTER X 'Y' # 0x0059 -> LATIN CAPITAL LETTER Y 'Z' # 0x005a -> LATIN CAPITAL LETTER Z '[' # 0x005b -> LEFT SQUARE BRACKET '\\' # 0x005c -> REVERSE SOLIDUS ']' # 0x005d -> RIGHT SQUARE BRACKET '^' # 0x005e -> CIRCUMFLEX ACCENT '_' # 0x005f -> LOW LINE '`' # 0x0060 -> GRAVE ACCENT 'a' # 0x0061 -> LATIN SMALL LETTER A 'b' # 0x0062 -> LATIN SMALL LETTER B 'c' # 0x0063 -> LATIN SMALL LETTER C 'd' # 0x0064 -> LATIN SMALL LETTER D 'e' # 0x0065 -> LATIN SMALL LETTER E 'f' # 0x0066 -> LATIN SMALL LETTER F 'g' # 0x0067 -> LATIN SMALL LETTER G 'h' # 0x0068 -> LATIN SMALL LETTER H 'i' # 0x0069 -> LATIN SMALL LETTER I 'j' # 0x006a -> LATIN SMALL LETTER J 'k' # 0x006b -> LATIN SMALL LETTER K 'l' # 0x006c -> LATIN SMALL LETTER L 'm' # 0x006d -> LATIN SMALL LETTER M 'n' # 0x006e -> LATIN SMALL LETTER N 'o' # 0x006f -> LATIN SMALL LETTER O 'p' # 0x0070 -> LATIN SMALL LETTER P 'q' # 0x0071 -> LATIN SMALL LETTER Q 'r' # 0x0072 -> LATIN SMALL LETTER R 's' # 0x0073 -> LATIN SMALL LETTER S 't' # 0x0074 -> LATIN SMALL LETTER T 'u' # 0x0075 -> LATIN SMALL LETTER U 'v' # 0x0076 -> LATIN SMALL LETTER V 'w' # 0x0077 -> LATIN SMALL LETTER W 'x' # 0x0078 -> LATIN SMALL LETTER X 'y' # 0x0079 -> LATIN SMALL LETTER Y 'z' # 0x007a -> LATIN SMALL LETTER Z '{' # 0x007b -> LEFT CURLY BRACKET '|' # 0x007c -> VERTICAL LINE '}' # 0x007d -> RIGHT CURLY BRACKET '~' # 0x007e -> TILDE '\x7f' # 0x007f -> DELETE '\u05d0' # 0x0080 -> HEBREW LETTER ALEF '\u05d1' # 0x0081 -> HEBREW LETTER BET '\u05d2' # 0x0082 -> HEBREW LETTER GIMEL '\u05d3' # 0x0083 -> HEBREW LETTER DALET '\u05d4' # 0x0084 -> HEBREW LETTER HE '\u05d5' # 0x0085 -> HEBREW LETTER VAV '\u05d6' # 0x0086 -> HEBREW LETTER ZAYIN '\u05d7' # 0x0087 -> HEBREW LETTER HET '\u05d8' # 0x0088 -> HEBREW LETTER TET '\u05d9' # 0x0089 -> HEBREW LETTER YOD '\u05da' # 0x008a -> HEBREW LETTER FINAL KAF '\u05db' # 0x008b -> HEBREW LETTER KAF '\u05dc' # 0x008c -> HEBREW LETTER LAMED '\u05dd' # 0x008d -> HEBREW LETTER FINAL MEM '\u05de' # 0x008e -> HEBREW LETTER MEM '\u05df' # 0x008f -> HEBREW LETTER FINAL NUN '\u05e0' # 0x0090 -> HEBREW LETTER NUN '\u05e1' # 0x0091 -> HEBREW LETTER SAMEKH '\u05e2' # 0x0092 -> HEBREW LETTER AYIN '\u05e3' # 0x0093 -> HEBREW LETTER FINAL PE '\u05e4' # 0x0094 -> HEBREW LETTER PE '\u05e5' # 0x0095 -> HEBREW LETTER FINAL TSADI '\u05e6' # 0x0096 -> HEBREW LETTER TSADI '\u05e7' # 0x0097 -> HEBREW LETTER QOF '\u05e8' # 0x0098 -> HEBREW LETTER RESH '\u05e9' # 0x0099 -> HEBREW LETTER SHIN '\u05ea' # 0x009a -> HEBREW LETTER TAV '\xa2' # 0x009b -> CENT SIGN '\xa3' # 0x009c -> POUND SIGN '\xa5' # 0x009d -> YEN SIGN '\u20a7' # 0x009e -> PESETA SIGN '\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK '\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE '\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE '\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE '\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE '\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE '\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE '\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR '\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR '\xbf' # 0x00a8 -> INVERTED QUESTION MARK '\u2310' # 0x00a9 -> REVERSED NOT SIGN '\xac' # 0x00aa -> NOT SIGN '\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF '\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER '\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK '\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK '\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK '\u2591' # 0x00b0 -> LIGHT SHADE '\u2592' # 0x00b1 -> MEDIUM SHADE '\u2593' # 0x00b2 -> DARK SHADE '\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL '\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT '\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE '\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE '\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE '\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE '\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT '\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL '\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT '\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT '\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE '\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE '\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT '\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT '\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL '\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL '\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT '\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL '\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL '\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE '\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE '\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT '\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT '\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL '\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL '\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT '\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL '\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL '\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE '\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE '\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE '\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE '\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE '\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE '\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE '\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE '\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE '\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE '\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT '\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT '\u2588' # 0x00db -> FULL BLOCK '\u2584' # 0x00dc -> LOWER HALF BLOCK '\u258c' # 0x00dd -> LEFT HALF BLOCK '\u2590' # 0x00de -> RIGHT HALF BLOCK '\u2580' # 0x00df -> UPPER HALF BLOCK '\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA '\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S (GERMAN) '\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA '\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI '\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA '\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA '\xb5' # 0x00e6 -> MICRO SIGN '\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU '\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI '\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA '\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA '\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA '\u221e' # 0x00ec -> INFINITY '\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI '\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON '\u2229' # 0x00ef -> INTERSECTION '\u2261' # 0x00f0 -> IDENTICAL TO '\xb1' # 0x00f1 -> PLUS-MINUS SIGN '\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO '\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO '\u2320' # 0x00f4 -> TOP HALF INTEGRAL '\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL '\xf7' # 0x00f6 -> DIVISION SIGN '\u2248' # 0x00f7 -> ALMOST EQUAL TO '\xb0' # 0x00f8 -> DEGREE SIGN '\u2219' # 0x00f9 -> BULLET OPERATOR '\xb7' # 0x00fa -> MIDDLE DOT '\u221a' # 0x00fb -> SQUARE ROOT '\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N '\xb2' # 0x00fd -> SUPERSCRIPT TWO '\u25a0' # 0x00fe -> BLACK SQUARE '\xa0' # 0x00ff -> NO-BREAK SPACE ) ### Encoding Map encoding_map = { 0x0000: 0x0000, # NULL 0x0001: 0x0001, # START OF HEADING 0x0002: 0x0002, # START OF TEXT 0x0003: 0x0003, # END OF TEXT 0x0004: 0x0004, # END OF TRANSMISSION 0x0005: 0x0005, # ENQUIRY 0x0006: 0x0006, # ACKNOWLEDGE 0x0007: 0x0007, # BELL 0x0008: 0x0008, # BACKSPACE 0x0009: 0x0009, # HORIZONTAL TABULATION 0x000a: 0x000a, # LINE FEED 0x000b: 0x000b, # VERTICAL TABULATION 0x000c: 0x000c, # FORM FEED 0x000d: 0x000d, # CARRIAGE RETURN 0x000e: 0x000e, # SHIFT OUT 0x000f: 0x000f, # SHIFT IN 0x0010: 0x0010, # DATA LINK ESCAPE 0x0011: 0x0011, # DEVICE CONTROL ONE 0x0012: 0x0012, # DEVICE CONTROL TWO 0x0013: 0x0013, # DEVICE CONTROL THREE 0x0014: 0x0014, # DEVICE CONTROL FOUR 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE 0x0016: 0x0016, # SYNCHRONOUS IDLE 0x0017: 0x0017, # END OF TRANSMISSION BLOCK 0x0018: 0x0018, # CANCEL 0x0019: 0x0019, # END OF MEDIUM 0x001a: 0x001a, # SUBSTITUTE 0x001b: 0x001b, # ESCAPE 0x001c: 0x001c, # FILE SEPARATOR 0x001d: 0x001d, # GROUP SEPARATOR 0x001e: 0x001e, # RECORD SEPARATOR 0x001f: 0x001f, # UNIT SEPARATOR 0x0020: 0x0020, # SPACE 0x0021: 0x0021, # EXCLAMATION MARK 0x0022: 0x0022, # QUOTATION MARK 0x0023: 0x0023, # NUMBER SIGN 0x0024: 0x0024, # DOLLAR SIGN 0x0025: 0x0025, # PERCENT SIGN 0x0026: 0x0026, # AMPERSAND 0x0027: 0x0027, # APOSTROPHE 0x0028: 0x0028, # LEFT PARENTHESIS 0x0029: 0x0029, # RIGHT PARENTHESIS 0x002a: 0x002a, # ASTERISK 0x002b: 0x002b, # PLUS SIGN 0x002c: 0x002c, # COMMA 0x002d: 0x002d, # HYPHEN-MINUS 0x002e: 0x002e, # FULL STOP 0x002f: 0x002f, # SOLIDUS 0x0030: 0x0030, # DIGIT ZERO 0x0031: 0x0031, # DIGIT ONE 0x0032: 0x0032, # DIGIT TWO 0x0033: 0x0033, # DIGIT THREE 0x0034: 0x0034, # DIGIT FOUR 0x0035: 0x0035, # DIGIT FIVE 0x0036: 0x0036, # DIGIT SIX 0x0037: 0x0037, # DIGIT SEVEN 0x0038: 0x0038, # DIGIT EIGHT 0x0039: 0x0039, # DIGIT NINE 0x003a: 0x003a, # COLON 0x003b: 0x003b, # SEMICOLON 0x003c: 0x003c, # LESS-THAN SIGN 0x003d: 0x003d, # EQUALS SIGN 0x003e: 0x003e, # GREATER-THAN SIGN 0x003f: 0x003f, # QUESTION MARK 0x0040: 0x0040, # COMMERCIAL AT 0x0041: 0x0041, # LATIN CAPITAL LETTER A 0x0042: 0x0042, # LATIN CAPITAL LETTER B 0x0043: 0x0043, # LATIN CAPITAL LETTER C 0x0044: 0x0044, # LATIN CAPITAL LETTER D 0x0045: 0x0045, # LATIN CAPITAL LETTER E 0x0046: 0x0046, # LATIN CAPITAL LETTER F 0x0047: 0x0047, # LATIN CAPITAL LETTER G 0x0048: 0x0048, # LATIN CAPITAL LETTER H 0x0049: 0x0049, # LATIN CAPITAL LETTER I 0x004a: 0x004a, # LATIN CAPITAL LETTER J 0x004b: 0x004b, # LATIN CAPITAL LETTER K 0x004c: 0x004c, # LATIN CAPITAL LETTER L 0x004d: 0x004d, # LATIN CAPITAL LETTER M 0x004e: 0x004e, # LATIN CAPITAL LETTER N 0x004f: 0x004f, # LATIN CAPITAL LETTER O 0x0050: 0x0050, # LATIN CAPITAL LETTER P 0x0051: 0x0051, # LATIN CAPITAL LETTER Q 0x0052: 0x0052, # LATIN CAPITAL LETTER R 0x0053: 0x0053, # LATIN CAPITAL LETTER S 0x0054: 0x0054, # LATIN CAPITAL LETTER T 0x0055: 0x0055, # LATIN CAPITAL LETTER U 0x0056: 0x0056, # LATIN CAPITAL LETTER V 0x0057: 0x0057, # LATIN CAPITAL LETTER W 0x0058: 0x0058, # LATIN CAPITAL LETTER X 0x0059: 0x0059, # LATIN CAPITAL LETTER Y 0x005a: 0x005a, # LATIN CAPITAL LETTER Z 0x005b: 0x005b, # LEFT SQUARE BRACKET 0x005c: 0x005c, # REVERSE SOLIDUS 0x005d: 0x005d, # RIGHT SQUARE BRACKET 0x005e: 0x005e, # CIRCUMFLEX ACCENT 0x005f: 0x005f, # LOW LINE 0x0060: 0x0060, # GRAVE ACCENT 0x0061: 0x0061, # LATIN SMALL LETTER A 0x0062: 0x0062, # LATIN SMALL LETTER B 0x0063: 0x0063, # LATIN SMALL LETTER C 0x0064: 0x0064, # LATIN SMALL LETTER D 0x0065: 0x0065, # LATIN SMALL LETTER E 0x0066: 0x0066, # LATIN SMALL LETTER F 0x0067: 0x0067, # LATIN SMALL LETTER G 0x0068: 0x0068, # LATIN SMALL LETTER H 0x0069: 0x0069, # LATIN SMALL LETTER I 0x006a: 0x006a, # LATIN SMALL LETTER J 0x006b: 0x006b, # LATIN SMALL LETTER K 0x006c: 0x006c, # LATIN SMALL LETTER L 0x006d: 0x006d, # LATIN SMALL LETTER M 0x006e: 0x006e, # LATIN SMALL LETTER N 0x006f: 0x006f, # LATIN SMALL LETTER O 0x0070: 0x0070, # LATIN SMALL LETTER P 0x0071: 0x0071, # LATIN SMALL LETTER Q 0x0072: 0x0072, # LATIN SMALL LETTER R 0x0073: 0x0073, # LATIN SMALL LETTER S 0x0074: 0x0074, # LATIN SMALL LETTER T 0x0075: 0x0075, # LATIN SMALL LETTER U 0x0076: 0x0076, # LATIN SMALL LETTER V 0x0077: 0x0077, # LATIN SMALL LETTER W 0x0078: 0x0078, # LATIN SMALL LETTER X 0x0079: 0x0079, # LATIN SMALL LETTER Y 0x007a: 0x007a, # LATIN SMALL LETTER Z 0x007b: 0x007b, # LEFT CURLY BRACKET 0x007c: 0x007c, # VERTICAL LINE 0x007d: 0x007d, # RIGHT CURLY BRACKET 0x007e: 0x007e, # TILDE 0x007f: 0x007f, # DELETE 0x00a0: 0x00ff, # NO-BREAK SPACE 0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK 0x00a2: 0x009b, # CENT SIGN 0x00a3: 0x009c, # POUND SIGN 0x00a5: 0x009d, # YEN SIGN 0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00ac: 0x00aa, # NOT SIGN 0x00b0: 0x00f8, # DEGREE SIGN 0x00b1: 0x00f1, # PLUS-MINUS SIGN 0x00b2: 0x00fd, # SUPERSCRIPT TWO 0x00b5: 0x00e6, # MICRO SIGN 0x00b7: 0x00fa, # MIDDLE DOT 0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER 0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF 0x00bf: 0x00a8, # INVERTED QUESTION MARK 0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE 0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S (GERMAN) 0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE 0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE 0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE 0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE 0x00f7: 0x00f6, # DIVISION SIGN 0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE 0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK 0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA 0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA 0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA 0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI 0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA 0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA 0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA 0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON 0x03c0: 0x00e3, # GREEK SMALL LETTER PI 0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA 0x03c4: 0x00e7, # GREEK SMALL LETTER TAU 0x03c6: 0x00ed, # GREEK SMALL LETTER PHI 0x05d0: 0x0080, # HEBREW LETTER ALEF 0x05d1: 0x0081, # HEBREW LETTER BET 0x05d2: 0x0082, # HEBREW LETTER GIMEL 0x05d3: 0x0083, # HEBREW LETTER DALET 0x05d4: 0x0084, # HEBREW LETTER HE 0x05d5: 0x0085, # HEBREW LETTER VAV 0x05d6: 0x0086, # HEBREW LETTER ZAYIN 0x05d7: 0x0087, # HEBREW LETTER HET 0x05d8: 0x0088, # HEBREW LETTER TET 0x05d9: 0x0089, # HEBREW LETTER YOD 0x05da: 0x008a, # HEBREW LETTER FINAL KAF 0x05db: 0x008b, # HEBREW LETTER KAF 0x05dc: 0x008c, # HEBREW LETTER LAMED 0x05dd: 0x008d, # HEBREW LETTER FINAL MEM 0x05de: 0x008e, # HEBREW LETTER MEM 0x05df: 0x008f, # HEBREW LETTER FINAL NUN 0x05e0: 0x0090, # HEBREW LETTER NUN 0x05e1: 0x0091, # HEBREW LETTER SAMEKH 0x05e2: 0x0092, # HEBREW LETTER AYIN 0x05e3: 0x0093, # HEBREW LETTER FINAL PE 0x05e4: 0x0094, # HEBREW LETTER PE 0x05e5: 0x0095, # HEBREW LETTER FINAL TSADI 0x05e6: 0x0096, # HEBREW LETTER TSADI 0x05e7: 0x0097, # HEBREW LETTER QOF 0x05e8: 0x0098, # HEBREW LETTER RESH 0x05e9: 0x0099, # HEBREW LETTER SHIN 0x05ea: 0x009a, # HEBREW LETTER TAV 0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N 0x20a7: 0x009e, # PESETA SIGN 0x2219: 0x00f9, # BULLET OPERATOR 0x221a: 0x00fb, # SQUARE ROOT 0x221e: 0x00ec, # INFINITY 0x2229: 0x00ef, # INTERSECTION 0x2248: 0x00f7, # ALMOST EQUAL TO 0x2261: 0x00f0, # IDENTICAL TO 0x2264: 0x00f3, # LESS-THAN OR EQUAL TO 0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO 0x2310: 0x00a9, # REVERSED NOT SIGN 0x2320: 0x00f4, # TOP HALF INTEGRAL 0x2321: 0x00f5, # BOTTOM HALF INTEGRAL 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL 0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE 0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT 0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE 0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT 0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE 0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT 0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE 0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT 0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE 0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT 0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE 0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT 0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE 0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL 0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE 0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL 0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE 0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL 0x2580: 0x00df, # UPPER HALF BLOCK 0x2584: 0x00dc, # LOWER HALF BLOCK 0x2588: 0x00db, # FULL BLOCK 0x258c: 0x00dd, # LEFT HALF BLOCK 0x2590: 0x00de, # RIGHT HALF BLOCK 0x2591: 0x00b0, # LIGHT SHADE 0x2592: 0x00b1, # MEDIUM SHADE 0x2593: 0x00b2, # DARK SHADE 0x25a0: 0x00fe, # BLACK SQUARE }
gpl-3.0
2013Commons/HUE-SHARK
build/env/lib/python2.7/site-packages/Pygments-1.3.1-py2.7.egg/pygments/lexer.py
58
22989
# -*- coding: utf-8 -*- """ pygments.lexer ~~~~~~~~~~~~~~ Base lexer classes. :copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.filter import apply_filters, Filter from pygments.filters import get_filter_by_name from pygments.token import Error, Text, Other, _TokenType from pygments.util import get_bool_opt, get_int_opt, get_list_opt, \ make_analysator __all__ = ['Lexer', 'RegexLexer', 'ExtendedRegexLexer', 'DelegatingLexer', 'LexerContext', 'include', 'bygroups', 'using', 'this'] _default_analyse = staticmethod(lambda x: 0.0) class LexerMeta(type): """ This metaclass automagically converts ``analyse_text`` methods into static methods which always return float values. """ def __new__(cls, name, bases, d): if 'analyse_text' in d: d['analyse_text'] = make_analysator(d['analyse_text']) return type.__new__(cls, name, bases, d) class Lexer(object): """ Lexer for a specific language. Basic options recognized: ``stripnl`` Strip leading and trailing newlines from the input (default: True). ``stripall`` Strip all leading and trailing whitespace from the input (default: False). ``ensurenl`` Make sure that the input ends with a newline (default: True). This is required for some lexers that consume input linewise. *New in Pygments 1.3.* ``tabsize`` If given and greater than 0, expand tabs in the input (default: 0). ``encoding`` If given, must be an encoding name. This encoding will be used to convert the input string to Unicode, if it is not already a Unicode string (default: ``'latin1'``). Can also be ``'guess'`` to use a simple UTF-8 / Latin1 detection, or ``'chardet'`` to use the chardet library, if it is installed. """ #: Name of the lexer name = None #: Shortcuts for the lexer aliases = [] #: fn match rules filenames = [] #: fn alias filenames alias_filenames = [] #: mime types mimetypes = [] __metaclass__ = LexerMeta def __init__(self, **options): self.options = options self.stripnl = get_bool_opt(options, 'stripnl', True) self.stripall = get_bool_opt(options, 'stripall', False) self.ensurenl = get_bool_opt(options, 'ensurenl', True) self.tabsize = get_int_opt(options, 'tabsize', 0) self.encoding = options.get('encoding', 'latin1') # self.encoding = options.get('inencoding', None) or self.encoding self.filters = [] for filter_ in get_list_opt(options, 'filters', ()): self.add_filter(filter_) def __repr__(self): if self.options: return '<pygments.lexers.%s with %r>' % (self.__class__.__name__, self.options) else: return '<pygments.lexers.%s>' % self.__class__.__name__ def add_filter(self, filter_, **options): """ Add a new stream filter to this lexer. """ if not isinstance(filter_, Filter): filter_ = get_filter_by_name(filter_, **options) self.filters.append(filter_) def analyse_text(text): """ Has to return a float between ``0`` and ``1`` that indicates if a lexer wants to highlight this text. Used by ``guess_lexer``. If this method returns ``0`` it won't highlight it in any case, if it returns ``1`` highlighting with this lexer is guaranteed. The `LexerMeta` metaclass automatically wraps this function so that it works like a static method (no ``self`` or ``cls`` parameter) and the return value is automatically converted to `float`. If the return value is an object that is boolean `False` it's the same as if the return values was ``0.0``. """ def get_tokens(self, text, unfiltered=False): """ Return an iterable of (tokentype, value) pairs generated from `text`. If `unfiltered` is set to `True`, the filtering mechanism is bypassed even if filters are defined. Also preprocess the text, i.e. expand tabs and strip it if wanted and applies registered filters. """ if not isinstance(text, unicode): if self.encoding == 'guess': try: text = text.decode('utf-8') if text.startswith(u'\ufeff'): text = text[len(u'\ufeff'):] except UnicodeDecodeError: text = text.decode('latin1') elif self.encoding == 'chardet': try: import chardet except ImportError: raise ImportError('To enable chardet encoding guessing, ' 'please install the chardet library ' 'from http://chardet.feedparser.org/') enc = chardet.detect(text) text = text.decode(enc['encoding']) else: text = text.decode(self.encoding) # text now *is* a unicode string text = text.replace('\r\n', '\n') text = text.replace('\r', '\n') if self.stripall: text = text.strip() elif self.stripnl: text = text.strip('\n') if self.tabsize > 0: text = text.expandtabs(self.tabsize) if self.ensurenl and not text.endswith('\n'): text += '\n' def streamer(): for i, t, v in self.get_tokens_unprocessed(text): yield t, v stream = streamer() if not unfiltered: stream = apply_filters(stream, self.filters, self) return stream def get_tokens_unprocessed(self, text): """ Return an iterable of (tokentype, value) pairs. In subclasses, implement this method as a generator to maximize effectiveness. """ raise NotImplementedError class DelegatingLexer(Lexer): """ This lexer takes two lexer as arguments. A root lexer and a language lexer. First everything is scanned using the language lexer, afterwards all ``Other`` tokens are lexed using the root lexer. The lexers from the ``template`` lexer package use this base lexer. """ def __init__(self, _root_lexer, _language_lexer, _needle=Other, **options): self.root_lexer = _root_lexer(**options) self.language_lexer = _language_lexer(**options) self.needle = _needle Lexer.__init__(self, **options) def get_tokens_unprocessed(self, text): buffered = '' insertions = [] lng_buffer = [] for i, t, v in self.language_lexer.get_tokens_unprocessed(text): if t is self.needle: if lng_buffer: insertions.append((len(buffered), lng_buffer)) lng_buffer = [] buffered += v else: lng_buffer.append((i, t, v)) if lng_buffer: insertions.append((len(buffered), lng_buffer)) return do_insertions(insertions, self.root_lexer.get_tokens_unprocessed(buffered)) #------------------------------------------------------------------------------- # RegexLexer and ExtendedRegexLexer # class include(str): """ Indicates that a state should include rules from another state. """ pass class combined(tuple): """ Indicates a state combined from multiple states. """ def __new__(cls, *args): return tuple.__new__(cls, args) def __init__(self, *args): # tuple.__init__ doesn't do anything pass class _PseudoMatch(object): """ A pseudo match object constructed from a string. """ def __init__(self, start, text): self._text = text self._start = start def start(self, arg=None): return self._start def end(self, arg=None): return self._start + len(self._text) def group(self, arg=None): if arg: raise IndexError('No such group') return self._text def groups(self): return (self._text,) def groupdict(self): return {} def bygroups(*args): """ Callback that yields multiple actions for each group in the match. """ def callback(lexer, match, ctx=None): for i, action in enumerate(args): if action is None: continue elif type(action) is _TokenType: data = match.group(i + 1) if data: yield match.start(i + 1), action, data else: if ctx: ctx.pos = match.start(i + 1) for item in action(lexer, _PseudoMatch(match.start(i + 1), match.group(i + 1)), ctx): if item: yield item if ctx: ctx.pos = match.end() return callback class _This(object): """ Special singleton used for indicating the caller class. Used by ``using``. """ this = _This() def using(_other, **kwargs): """ Callback that processes the match with a different lexer. The keyword arguments are forwarded to the lexer, except `state` which is handled separately. `state` specifies the state that the new lexer will start in, and can be an enumerable such as ('root', 'inline', 'string') or a simple string which is assumed to be on top of the root state. Note: For that to work, `_other` must not be an `ExtendedRegexLexer`. """ gt_kwargs = {} if 'state' in kwargs: s = kwargs.pop('state') if isinstance(s, (list, tuple)): gt_kwargs['stack'] = s else: gt_kwargs['stack'] = ('root', s) if _other is this: def callback(lexer, match, ctx=None): # if keyword arguments are given the callback # function has to create a new lexer instance if kwargs: # XXX: cache that somehow kwargs.update(lexer.options) lx = lexer.__class__(**kwargs) else: lx = lexer s = match.start() for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs): yield i + s, t, v if ctx: ctx.pos = match.end() else: def callback(lexer, match, ctx=None): # XXX: cache that somehow kwargs.update(lexer.options) lx = _other(**kwargs) s = match.start() for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs): yield i + s, t, v if ctx: ctx.pos = match.end() return callback class RegexLexerMeta(LexerMeta): """ Metaclass for RegexLexer, creates the self._tokens attribute from self.tokens on the first instantiation. """ def _process_state(cls, unprocessed, processed, state): assert type(state) is str, "wrong state name %r" % state assert state[0] != '#', "invalid state name %r" % state if state in processed: return processed[state] tokens = processed[state] = [] rflags = cls.flags for tdef in unprocessed[state]: if isinstance(tdef, include): # it's a state reference assert tdef != state, "circular state reference %r" % state tokens.extend(cls._process_state(unprocessed, processed, str(tdef))) continue assert type(tdef) is tuple, "wrong rule def %r" % tdef try: rex = re.compile(tdef[0], rflags).match except Exception, err: raise ValueError("uncompilable regex %r in state %r of %r: %s" % (tdef[0], state, cls, err)) assert type(tdef[1]) is _TokenType or callable(tdef[1]), \ 'token type must be simple type or callable, not %r' % (tdef[1],) if len(tdef) == 2: new_state = None else: tdef2 = tdef[2] if isinstance(tdef2, str): # an existing state if tdef2 == '#pop': new_state = -1 elif tdef2 in unprocessed: new_state = (tdef2,) elif tdef2 == '#push': new_state = tdef2 elif tdef2[:5] == '#pop:': new_state = -int(tdef2[5:]) else: assert False, 'unknown new state %r' % tdef2 elif isinstance(tdef2, combined): # combine a new state from existing ones new_state = '_tmp_%d' % cls._tmpname cls._tmpname += 1 itokens = [] for istate in tdef2: assert istate != state, 'circular state ref %r' % istate itokens.extend(cls._process_state(unprocessed, processed, istate)) processed[new_state] = itokens new_state = (new_state,) elif isinstance(tdef2, tuple): # push more than one state for state in tdef2: assert (state in unprocessed or state in ('#pop', '#push')), \ 'unknown new state ' + state new_state = tdef2 else: assert False, 'unknown new state def %r' % tdef2 tokens.append((rex, tdef[1], new_state)) return tokens def process_tokendef(cls, name, tokendefs=None): processed = cls._all_tokens[name] = {} tokendefs = tokendefs or cls.tokens[name] for state in tokendefs.keys(): cls._process_state(tokendefs, processed, state) return processed def __call__(cls, *args, **kwds): if not hasattr(cls, '_tokens'): cls._all_tokens = {} cls._tmpname = 0 if hasattr(cls, 'token_variants') and cls.token_variants: # don't process yet pass else: cls._tokens = cls.process_tokendef('', cls.tokens) return type.__call__(cls, *args, **kwds) class RegexLexer(Lexer): """ Base for simple stateful regular expression-based lexers. Simplifies the lexing process so that you need only provide a list of states and regular expressions. """ __metaclass__ = RegexLexerMeta #: Flags for compiling the regular expressions. #: Defaults to MULTILINE. flags = re.MULTILINE #: Dict of ``{'state': [(regex, tokentype, new_state), ...], ...}`` #: #: The initial state is 'root'. #: ``new_state`` can be omitted to signify no state transition. #: If it is a string, the state is pushed on the stack and changed. #: If it is a tuple of strings, all states are pushed on the stack and #: the current state will be the topmost. #: It can also be ``combined('state1', 'state2', ...)`` #: to signify a new, anonymous state combined from the rules of two #: or more existing ones. #: Furthermore, it can be '#pop' to signify going back one step in #: the state stack, or '#push' to push the current state on the stack #: again. #: #: The tuple can also be replaced with ``include('state')``, in which #: case the rules from the state named by the string are included in the #: current one. tokens = {} def get_tokens_unprocessed(self, text, stack=('root',)): """ Split ``text`` into (tokentype, text) pairs. ``stack`` is the inital stack (default: ``['root']``) """ pos = 0 tokendefs = self._tokens statestack = list(stack) statetokens = tokendefs[statestack[-1]] while 1: for rexmatch, action, new_state in statetokens: m = rexmatch(text, pos) if m: if type(action) is _TokenType: yield pos, action, m.group() else: for item in action(self, m): yield item pos = m.end() if new_state is not None: # state transition if isinstance(new_state, tuple): for state in new_state: if state == '#pop': statestack.pop() elif state == '#push': statestack.append(statestack[-1]) else: statestack.append(state) elif isinstance(new_state, int): # pop del statestack[new_state:] elif new_state == '#push': statestack.append(statestack[-1]) else: assert False, "wrong state def: %r" % new_state statetokens = tokendefs[statestack[-1]] break else: try: if text[pos] == '\n': # at EOL, reset state to "root" pos += 1 statestack = ['root'] statetokens = tokendefs['root'] yield pos, Text, u'\n' continue yield pos, Error, text[pos] pos += 1 except IndexError: break class LexerContext(object): """ A helper object that holds lexer position data. """ def __init__(self, text, pos, stack=None, end=None): self.text = text self.pos = pos self.end = end or len(text) # end=0 not supported ;-) self.stack = stack or ['root'] def __repr__(self): return 'LexerContext(%r, %r, %r)' % ( self.text, self.pos, self.stack) class ExtendedRegexLexer(RegexLexer): """ A RegexLexer that uses a context object to store its state. """ def get_tokens_unprocessed(self, text=None, context=None): """ Split ``text`` into (tokentype, text) pairs. If ``context`` is given, use this lexer context instead. """ tokendefs = self._tokens if not context: ctx = LexerContext(text, 0) statetokens = tokendefs['root'] else: ctx = context statetokens = tokendefs[ctx.stack[-1]] text = ctx.text while 1: for rexmatch, action, new_state in statetokens: m = rexmatch(text, ctx.pos, ctx.end) if m: if type(action) is _TokenType: yield ctx.pos, action, m.group() ctx.pos = m.end() else: for item in action(self, m, ctx): yield item if not new_state: # altered the state stack? statetokens = tokendefs[ctx.stack[-1]] # CAUTION: callback must set ctx.pos! if new_state is not None: # state transition if isinstance(new_state, tuple): ctx.stack.extend(new_state) elif isinstance(new_state, int): # pop del ctx.stack[new_state:] elif new_state == '#push': ctx.stack.append(ctx.stack[-1]) else: assert False, "wrong state def: %r" % new_state statetokens = tokendefs[ctx.stack[-1]] break else: try: if ctx.pos >= ctx.end: break if text[ctx.pos] == '\n': # at EOL, reset state to "root" ctx.pos += 1 ctx.stack = ['root'] statetokens = tokendefs['root'] yield ctx.pos, Text, u'\n' continue yield ctx.pos, Error, text[ctx.pos] ctx.pos += 1 except IndexError: break def do_insertions(insertions, tokens): """ Helper for lexers which must combine the results of several sublexers. ``insertions`` is a list of ``(index, itokens)`` pairs. Each ``itokens`` iterable should be inserted at position ``index`` into the token stream given by the ``tokens`` argument. The result is a combined token stream. TODO: clean up the code here. """ insertions = iter(insertions) try: index, itokens = insertions.next() except StopIteration: # no insertions for item in tokens: yield item return realpos = None insleft = True # iterate over the token stream where we want to insert # the tokens from the insertion list. for i, t, v in tokens: # first iteration. store the postition of first item if realpos is None: realpos = i oldi = 0 while insleft and i + len(v) >= index: tmpval = v[oldi:index - i] yield realpos, t, tmpval realpos += len(tmpval) for it_index, it_token, it_value in itokens: yield realpos, it_token, it_value realpos += len(it_value) oldi = index - i try: index, itokens = insertions.next() except StopIteration: insleft = False break # not strictly necessary yield realpos, t, v[oldi:] realpos += len(v) - oldi # leftover tokens while insleft: # no normal tokens, set realpos to zero realpos = realpos or 0 for p, t, v in itokens: yield realpos, t, v realpos += len(v) try: index, itokens = insertions.next() except StopIteration: insleft = False break # not strictly necessary
apache-2.0
derekjchow/models
official/keras_application_models/benchmark_main.py
1
8439
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Benchmark on the keras built-in application models.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=g-bad-import-order import numpy as np from absl import app as absl_app from absl import flags import tensorflow as tf # pylint: enable=g-bad-import-order from official.keras_application_models import dataset from official.keras_application_models import model_callbacks from official.utils.flags import core as flags_core from official.utils.logs import logger from official.utils.misc import distribution_utils # Define a dictionary that maps model names to their model classes inside Keras MODELS = { "vgg16": tf.keras.applications.VGG16, "vgg19": tf.keras.applications.VGG19, "inceptionv3": tf.keras.applications.InceptionV3, "xception": tf.keras.applications.Xception, "resnet50": tf.keras.applications.ResNet50, "inceptionresnetv2": tf.keras.applications.InceptionResNetV2, "mobilenet": tf.keras.applications.MobileNet, "densenet121": tf.keras.applications.DenseNet121, "densenet169": tf.keras.applications.DenseNet169, "densenet201": tf.keras.applications.DenseNet201, "nasnetlarge": tf.keras.applications.NASNetLarge, "nasnetmobile": tf.keras.applications.NASNetMobile, } def run_keras_model_benchmark(_): """Run the benchmark on keras model.""" # Ensure a valid model name was supplied via command line argument if FLAGS.model not in MODELS.keys(): raise AssertionError("The --model command line argument should " "be a key in the `MODELS` dictionary.") # Check if eager execution is enabled if FLAGS.eager: tf.logging.info("Eager execution is enabled...") tf.enable_eager_execution() # Load the model tf.logging.info("Benchmark on {} model...".format(FLAGS.model)) keras_model = MODELS[FLAGS.model] # Get dataset dataset_name = "ImageNet" if FLAGS.use_synthetic_data: tf.logging.info("Using synthetic dataset...") dataset_name += "_Synthetic" train_dataset = dataset.generate_synthetic_input_dataset( FLAGS.model, FLAGS.batch_size) val_dataset = dataset.generate_synthetic_input_dataset( FLAGS.model, FLAGS.batch_size) model = keras_model(weights=None) else: tf.logging.info("Using CIFAR-10 dataset...") dataset_name = "CIFAR-10" ds = dataset.Cifar10Dataset(FLAGS.batch_size) train_dataset = ds.train_dataset val_dataset = ds.test_dataset model = keras_model( weights=None, input_shape=ds.input_shape, classes=ds.num_classes) num_gpus = flags_core.get_num_gpus(FLAGS) distribution = None # Use distribution strategy if FLAGS.dist_strat: distribution = distribution_utils.get_distribution_strategy( distribution_strategy=FLAGS.distribution_strategy, num_gpus=num_gpus) elif num_gpus > 1: # Run with multi_gpu_model # If eager execution is enabled, only one GPU is utilized even if multiple # GPUs are provided. if FLAGS.eager: tf.logging.warning( "{} GPUs are provided, but only one GPU is utilized as " "eager execution is enabled.".format(num_gpus)) model = tf.keras.utils.multi_gpu_model(model, gpus=num_gpus) # Adam optimizer and some other optimizers doesn't work well with # distribution strategy (b/113076709) # Use GradientDescentOptimizer here optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001) model.compile(loss="categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"], distribute=distribution) # Create benchmark logger for benchmark logging run_params = { "batch_size": FLAGS.batch_size, "synthetic_data": FLAGS.use_synthetic_data, "train_epochs": FLAGS.train_epochs, "num_train_images": FLAGS.num_train_images, "num_eval_images": FLAGS.num_eval_images, } benchmark_logger = logger.get_benchmark_logger() benchmark_logger.log_run_info( model_name=FLAGS.model, dataset_name=dataset_name, run_params=run_params, test_id=FLAGS.benchmark_test_id) # Create callbacks that log metric values about the training and evaluation callbacks = model_callbacks.get_model_callbacks( FLAGS.callbacks, batch_size=FLAGS.batch_size, metric_logger=benchmark_logger) # Train and evaluate the model history = model.fit( train_dataset, epochs=FLAGS.train_epochs, callbacks=callbacks, validation_data=val_dataset, steps_per_epoch=int(np.ceil(FLAGS.num_train_images / FLAGS.batch_size)), validation_steps=int(np.ceil(FLAGS.num_eval_images / FLAGS.batch_size)) ) tf.logging.info("Logging the evaluation results...") for epoch in range(FLAGS.train_epochs): eval_results = { "accuracy": history.history["val_acc"][epoch], "loss": history.history["val_loss"][epoch], tf.GraphKeys.GLOBAL_STEP: (epoch + 1) * np.ceil( FLAGS.num_eval_images/FLAGS.batch_size) } benchmark_logger.log_evaluation_result(eval_results) # Clear the session explicitly to avoid session delete error tf.keras.backend.clear_session() def define_keras_benchmark_flags(): """Add flags for keras built-in application models.""" flags_core.define_base(hooks=False) flags_core.define_performance() flags_core.define_image() flags_core.define_benchmark() flags.adopt_module_key_flags(flags_core) flags_core.set_defaults( data_format="channels_last", use_synthetic_data=True, batch_size=32, train_epochs=2) flags.DEFINE_enum( name="model", default=None, enum_values=MODELS.keys(), case_sensitive=False, help=flags_core.help_wrap( "Model to be benchmarked.")) flags.DEFINE_integer( name="num_train_images", default=1000, help=flags_core.help_wrap( "The number of synthetic images for training. The default value is " "1000.")) flags.DEFINE_integer( name="num_eval_images", default=50, help=flags_core.help_wrap( "The number of synthetic images for evaluation. The default value is " "50.")) flags.DEFINE_boolean( name="eager", default=False, help=flags_core.help_wrap( "To enable eager execution. Note that if eager execution is enabled, " "only one GPU is utilized even if multiple GPUs are provided and " "multi_gpu_model is used.")) flags.DEFINE_boolean( name="dist_strat", default=False, help=flags_core.help_wrap( "To enable distribution strategy for model training and evaluation. " "Number of GPUs used for distribution strategy can be set by the " "argument --num_gpus.")) flags.DEFINE_list( name="callbacks", default=["ExamplesPerSecondCallback", "LoggingMetricCallback"], help=flags_core.help_wrap( "A list of (case insensitive) strings to specify the names of " "callbacks. For example: `--callbacks ExamplesPerSecondCallback," "LoggingMetricCallback`")) @flags.multi_flags_validator( ["eager", "dist_strat"], message="Both --eager and --dist_strat were set. Only one can be " "defined, as DistributionStrategy is not supported in Eager " "execution currently.") # pylint: disable=unused-variable def _check_eager_dist_strat(flag_dict): return not(flag_dict["eager"] and flag_dict["dist_strat"]) def main(_): with logger.benchmark_context(FLAGS): run_keras_model_benchmark(FLAGS) if __name__ == "__main__": tf.logging.set_verbosity(tf.logging.INFO) define_keras_benchmark_flags() FLAGS = flags.FLAGS absl_app.run(main)
apache-2.0
RevolutionTech/hummingbird
hummingbird_django/settings.py
1
2852
""" Django settings for hummingbird_django project. For more information on this file, see https://docs.djangoproject.com/en/1.7/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.7/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(__file__)) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'ujdo90h)l_xw$^6rj@tnmjg+^0e6zfs1cs$!vnlngjqibpg1$q' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True TEMPLATE_DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'hummingbird', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'hummingbird_django.urls' WSGI_APPLICATION = 'hummingbird_django.wsgi.application' # Database # https://docs.djangoproject.com/en/1.7/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } MEDIA_URL = '/' MEDIA_ROOT = os.path.join(BASE_DIR) TEMPLATE_DIRS = ( os.path.join(BASE_DIR,'templates'), ) # Internationalization # https://docs.djangoproject.com/en/1.7/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'America/Los_Angeles' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.7/howto/static-files/ STATIC_URL = '/static/' # Main Settings PLAY_UNKNOWNS = False # Time Settings import datetime TIME_RESET_TIME = datetime.time(hour=4, minute=0) # 4:00am TIME_WAIT_TO_PLAY = 60 * 5 # 5 minutes TIME_DELAY_TO_PLAY_SONG = 3 #time_check_queue = 0.25 TIME_CHECK_QUEUE = 1 TIME_INPUT_TIMEOUT = 30 TIME_MAX_SONG_LENGTH = 20 TIME_FADEOUT_SONG = 3000 # in milliseconds # User Files DATA_FILE = "songs.csv" AUDIO_DIR = "audio/" RANDOM_SUBDIR = "random/" SOUND_SUBDIR = "sound/" TCPDUMP_DID_NOT_MATCH_LOG = "tcpdump_dnm.log" # Data Settings DO_NOT_PLAY = "DNP" NEED_TO_ASSIGN = "NTA" UNKNOWN_USER_PREFIX = "Unknown #" UNKNOWN_USER_SUFFIX_LENGTH = 5
isc
mlassnig/pilot
NordugridATLASSiteInformation.py
3
1933
# Class definition: # NordugridATLASSiteInformation # This class is the Nordugrid-ATLAS site information class inheriting from ATLASSiteInformation # Instances are generated with SiteInformationFactory via pUtil::getSiteInformation() # Implemented as a singleton class # http://stackoverflow.com/questions/42558/python-and-the-singleton-pattern # import relevant python/pilot modules import os import commands import SiteMover from SiteInformation import SiteInformation # Main site information class from ATLASSiteInformation import ATLASSiteInformation # Main site information class from pUtil import tolog # Logging method that sends text to the pilot log from pUtil import readpar # Used to read values from the schedconfig DB (queuedata) from PilotErrors import PilotErrors # Error codes class NordugridATLASSiteInformation(ATLASSiteInformation): # private data members __experiment = "Nordugrid-ATLAS" __instance = None # Required methods def __init__(self): """ Default initialization """ pass def __new__(cls, *args, **kwargs): """ Override the __new__ method to make the class a singleton """ if not cls.__instance: cls.__instance = super(ATLASSiteInformation, cls).__new__(cls, *args, **kwargs) return cls.__instance def getExperiment(self): """ Return a string with the experiment name """ return self.__experiment if __name__ == "__main__": os.environ['PilotHomeDir'] = os.getcwd() si = NordugridATLASSiteInformation() tolog("Experiment: %s" % (si.getExperiment())) cloud = "CERN" queuename = si.getTier1Queue(cloud) if queuename != "": tolog("Cloud %s has Tier-1 queue %s" % (cloud, queuename)) else: tolog("Failed to find a Tier-1 queue name for cloud %s" % (cloud))
apache-2.0
Chuban/moose
python/peacock/tests/postprocessor_tab/test_AxesSettingsPlugin.py
6
4831
#!/usr/bin/env python import sys import unittest from PyQt5 import QtCore, QtWidgets from peacock.PostprocessorViewer.plugins.AxesSettingsPlugin import main from peacock.utils import Testing class TestAxesSettingsPlugin(Testing.PeacockImageTestCase): """ Test class for the ArtistToggleWidget which toggles postprocessor lines. """ #: QApplication: The main App for QT, this must be static to work correctly. qapp = QtWidgets.QApplication(sys.argv) def setUp(self): """ Creates the GUI. """ self._control, self._widget, self._window = main(['../input/white_elephant_jan_2016.csv']) def plot(self): # Create plot select = self._widget.currentWidget().PostprocessorSelectPlugin var = 'air_temp_set_1' select._groups[0]._toggles[var].CheckBox.setCheckState(QtCore.Qt.Checked) select._groups[0]._toggles[var].CheckBox.clicked.emit(True) var = 'snow_depth_set_1' select._groups[0]._toggles[var].CheckBox.setCheckState(QtCore.Qt.Checked) select._groups[0]._toggles[var].PlotAxis.setCurrentIndex(1) select._groups[0]._toggles[var].CheckBox.clicked.emit(True) def testEmpty(self): """ Test that an empty plot is possible. """ ax0, ax1 = self._window.axes() self.assertEqual(ax0.get_xlabel(), '') self.assertEqual(ax0.get_ylabel(), '') self.assertEqual(ax1.get_ylabel(), '') self.assertFalse(self._control.LegendLocation.isEnabled()) self.assertFalse(self._control.Legend2Location.isEnabled()) self.assertImage('testEmpty.png') def testTitle(self): """ Test that a title may be added. """ self._control.Title.setText("Testing Title") self._control.Title.editingFinished.emit() ax0, ax1 = self._window.axes() self.assertEqual(ax0.get_title(), "Testing Title") def testLegend(self): """ Test that legend toggle operates. """ self.plot() self._control.Legend.setCheckState(QtCore.Qt.Checked) self._control.Legend.clicked.emit(True) self._control.Legend2.setCheckState(QtCore.Qt.Checked) self._control.Legend2.clicked.emit(True) self.assertTrue(self._control.LegendLocation.isEnabled()) self.assertTrue(self._control.Legend2Location.isEnabled()) self.assertImage('testLegend.png') def testLegendLocation(self): """ Test legend location selection. """ self.plot() self._control.Legend.setCheckState(QtCore.Qt.Checked) self._control.Legend.clicked.emit(True) self._control.LegendLocation.setCurrentIndex(3) self._control.Legend2.setCheckState(QtCore.Qt.Checked) self._control.Legend2.clicked.emit(True) self._control.Legend2Location.setCurrentIndex(1) self.assertImage('testLegendLocation.png') def testEmptyLegend(self): """ Test that a legend created with no data produces warning. """ # Enable legends self.plot() self._control.Legend.setCheckState(QtCore.Qt.Checked) self._control.Legend.clicked.emit(True) self._control.Legend2.setCheckState(QtCore.Qt.Checked) self._control.Legend2.clicked.emit(True) # Remove lines select = self._widget.currentWidget().PostprocessorSelectPlugin var = 'air_temp_set_1' select._groups[0]._toggles[var].CheckBox.setCheckState(QtCore.Qt.Unchecked) select._groups[0]._toggles[var].CheckBox.clicked.emit(False) var = 'snow_depth_set_1' select._groups[0]._toggles[var].CheckBox.setCheckState(QtCore.Qt.Unchecked) select._groups[0]._toggles[var].PlotAxis.setCurrentIndex(1) select._groups[0]._toggles[var].CheckBox.clicked.emit(False) self.assertImage('testEmpty.png') def testRepr(self): """ Test python scripting. """ self.plot() output, imports = self._control.repr() self.assertNotIn("axes0.legend(loc='best')", output) self.assertNotIn("axes1.legend(loc='best')", output) self.assertNotIn("axes0.set_title('Title')", output) self._control.Title.setText("Title") self._control.Legend.setCheckState(QtCore.Qt.Checked) self._control.Legend.clicked.emit(True) self._control.Legend2.setCheckState(QtCore.Qt.Checked) self._control.Legend2.clicked.emit(True) output, imports = self._control.repr() self.assertIn("axes0.legend(loc='best')", output) self.assertIn("axes1.legend(loc='best')", output) self.assertIn("axes0.set_title('Title')", output) if __name__ == '__main__': unittest.main(module=__name__, verbosity=2, buffer=True)
lgpl-2.1
denys-duchier/django
tests/gis_tests/layermap/models.py
21
2409
from django.contrib.gis.db import models class NamedModel(models.Model): name = models.CharField(max_length=25) class Meta: abstract = True required_db_features = ['gis_enabled'] def __str__(self): return self.name class State(NamedModel): pass class County(NamedModel): state = models.ForeignKey(State, models.CASCADE) mpoly = models.MultiPolygonField(srid=4269) # Multipolygon in NAD83 class CountyFeat(NamedModel): poly = models.PolygonField(srid=4269) class City(NamedModel): name_txt = models.TextField(default='') name_short = models.CharField(max_length=5) population = models.IntegerField() density = models.DecimalField(max_digits=7, decimal_places=1) dt = models.DateField() point = models.PointField() class Meta: app_label = 'layermap' required_db_features = ['gis_enabled'] class Interstate(NamedModel): length = models.DecimalField(max_digits=6, decimal_places=2) path = models.LineStringField() class Meta: app_label = 'layermap' required_db_features = ['gis_enabled'] # Same as `City` above, but for testing model inheritance. class CityBase(NamedModel): population = models.IntegerField() density = models.DecimalField(max_digits=7, decimal_places=1) point = models.PointField() class ICity1(CityBase): dt = models.DateField() class Meta(CityBase.Meta): pass class ICity2(ICity1): dt_time = models.DateTimeField(auto_now=True) class Meta(ICity1.Meta): pass class Invalid(models.Model): point = models.PointField() class Meta: required_db_features = ['gis_enabled'] # Mapping dictionaries for the models above. co_mapping = { 'name': 'Name', # ForeignKey's use another mapping dictionary for the _related_ Model (State in this case). 'state': {'name': 'State'}, 'mpoly': 'MULTIPOLYGON', # Will convert POLYGON features into MULTIPOLYGONS. } cofeat_mapping = {'name': 'Name', 'poly': 'POLYGON', } city_mapping = {'name': 'Name', 'population': 'Population', 'density': 'Density', 'dt': 'Created', 'point': 'POINT', } inter_mapping = {'name': 'Name', 'length': 'Length', 'path': 'LINESTRING', }
bsd-3-clause
Addepar/buck
python-dsl/buck_parser/glob_internal.py
5
2442
# Copyright (c) Facebook, Inc. and its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Glob implementation in python.""" from .util import is_special def path_component_starts_with_dot(relative_path): for p in relative_path.parts: if p.startswith("."): return True return False def glob_internal( includes, excludes, project_root_relative_excludes, include_dotfiles, search_base, project_root, ): def includes_iterator(): for pattern in includes: for path in search_base.glob(pattern): # TODO(beng): Handle hidden files on Windows. if path.is_file() and ( include_dotfiles or not path_component_starts_with_dot(path.relative_to(search_base)) ): yield path non_special_excludes = set() match_excludes = set() for pattern in excludes: if is_special(pattern): match_excludes.add(pattern) else: non_special_excludes.add(pattern) def exclusion(path): relative_to_search_base = path.relative_to(search_base) if relative_to_search_base.as_posix() in non_special_excludes: return True for pattern in match_excludes: result = relative_to_search_base.match(pattern, match_entire=True) if result: return True relative_to_project_root = path.relative_to(project_root) for pattern in project_root_relative_excludes: result = relative_to_project_root.match(pattern, match_entire=True) if result: return True return False return sorted( set( [ str(p.relative_to(search_base)) for p in includes_iterator() if not exclusion(p) ] ) ) __all__ = [glob_internal]
apache-2.0
flightcoin/flightcoin
contrib/pyminer/pyminer.py
1257
6438
#!/usr/bin/python # # Copyright (c) 2011 The Bitcoin developers # Distributed under the MIT/X11 software license, see the accompanying # file license.txt or http://www.opensource.org/licenses/mit-license.php. # import time import json import pprint import hashlib import struct import re import base64 import httplib import sys from multiprocessing import Process ERR_SLEEP = 15 MAX_NONCE = 1000000L settings = {} pp = pprint.PrettyPrinter(indent=4) class BitcoinRPC: OBJID = 1 def __init__(self, host, port, username, password): authpair = "%s:%s" % (username, password) self.authhdr = "Basic %s" % (base64.b64encode(authpair)) self.conn = httplib.HTTPConnection(host, port, False, 30) def rpc(self, method, params=None): self.OBJID += 1 obj = { 'version' : '1.1', 'method' : method, 'id' : self.OBJID } if params is None: obj['params'] = [] else: obj['params'] = params self.conn.request('POST', '/', json.dumps(obj), { 'Authorization' : self.authhdr, 'Content-type' : 'application/json' }) resp = self.conn.getresponse() if resp is None: print "JSON-RPC: no response" return None body = resp.read() resp_obj = json.loads(body) if resp_obj is None: print "JSON-RPC: cannot JSON-decode body" return None if 'error' in resp_obj and resp_obj['error'] != None: return resp_obj['error'] if 'result' not in resp_obj: print "JSON-RPC: no result in object" return None return resp_obj['result'] def getblockcount(self): return self.rpc('getblockcount') def getwork(self, data=None): return self.rpc('getwork', data) def uint32(x): return x & 0xffffffffL def bytereverse(x): return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) | (((x) >> 8) & 0x0000ff00) | ((x) >> 24) )) def bufreverse(in_buf): out_words = [] for i in range(0, len(in_buf), 4): word = struct.unpack('@I', in_buf[i:i+4])[0] out_words.append(struct.pack('@I', bytereverse(word))) return ''.join(out_words) def wordreverse(in_buf): out_words = [] for i in range(0, len(in_buf), 4): out_words.append(in_buf[i:i+4]) out_words.reverse() return ''.join(out_words) class Miner: def __init__(self, id): self.id = id self.max_nonce = MAX_NONCE def work(self, datastr, targetstr): # decode work data hex string to binary static_data = datastr.decode('hex') static_data = bufreverse(static_data) # the first 76b of 80b do not change blk_hdr = static_data[:76] # decode 256-bit target value targetbin = targetstr.decode('hex') targetbin = targetbin[::-1] # byte-swap and dword-swap targetbin_str = targetbin.encode('hex') target = long(targetbin_str, 16) # pre-hash first 76b of block header static_hash = hashlib.sha256() static_hash.update(blk_hdr) for nonce in xrange(self.max_nonce): # encode 32-bit nonce value nonce_bin = struct.pack("<I", nonce) # hash final 4b, the nonce value hash1_o = static_hash.copy() hash1_o.update(nonce_bin) hash1 = hash1_o.digest() # sha256 hash of sha256 hash hash_o = hashlib.sha256() hash_o.update(hash1) hash = hash_o.digest() # quick test for winning solution: high 32 bits zero? if hash[-4:] != '\0\0\0\0': continue # convert binary hash to 256-bit Python long hash = bufreverse(hash) hash = wordreverse(hash) hash_str = hash.encode('hex') l = long(hash_str, 16) # proof-of-work test: hash < target if l < target: print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,) return (nonce + 1, nonce_bin) else: print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,) # return (nonce + 1, nonce_bin) return (nonce + 1, None) def submit_work(self, rpc, original_data, nonce_bin): nonce_bin = bufreverse(nonce_bin) nonce = nonce_bin.encode('hex') solution = original_data[:152] + nonce + original_data[160:256] param_arr = [ solution ] result = rpc.getwork(param_arr) print time.asctime(), "--> Upstream RPC result:", result def iterate(self, rpc): work = rpc.getwork() if work is None: time.sleep(ERR_SLEEP) return if 'data' not in work or 'target' not in work: time.sleep(ERR_SLEEP) return time_start = time.time() (hashes_done, nonce_bin) = self.work(work['data'], work['target']) time_end = time.time() time_diff = time_end - time_start self.max_nonce = long( (hashes_done * settings['scantime']) / time_diff) if self.max_nonce > 0xfffffffaL: self.max_nonce = 0xfffffffaL if settings['hashmeter']: print "HashMeter(%d): %d hashes, %.2f Khash/sec" % ( self.id, hashes_done, (hashes_done / 1000.0) / time_diff) if nonce_bin is not None: self.submit_work(rpc, work['data'], nonce_bin) def loop(self): rpc = BitcoinRPC(settings['host'], settings['port'], settings['rpcuser'], settings['rpcpass']) if rpc is None: return while True: self.iterate(rpc) def miner_thread(id): miner = Miner(id) miner.loop() if __name__ == '__main__': if len(sys.argv) != 2: print "Usage: pyminer.py CONFIG-FILE" sys.exit(1) f = open(sys.argv[1]) for line in f: # skip comment lines m = re.search('^\s*#', line) if m: continue # parse key=value lines m = re.search('^(\w+)\s*=\s*(\S.*)$', line) if m is None: continue settings[m.group(1)] = m.group(2) f.close() if 'host' not in settings: settings['host'] = '127.0.0.1' if 'port' not in settings: settings['port'] = 8332 if 'threads' not in settings: settings['threads'] = 1 if 'hashmeter' not in settings: settings['hashmeter'] = 0 if 'scantime' not in settings: settings['scantime'] = 30L if 'rpcuser' not in settings or 'rpcpass' not in settings: print "Missing username and/or password in cfg file" sys.exit(1) settings['port'] = int(settings['port']) settings['threads'] = int(settings['threads']) settings['hashmeter'] = int(settings['hashmeter']) settings['scantime'] = long(settings['scantime']) thr_list = [] for thr_id in range(settings['threads']): p = Process(target=miner_thread, args=(thr_id,)) p.start() thr_list.append(p) time.sleep(1) # stagger threads print settings['threads'], "mining threads started" print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port']) try: for thr_proc in thr_list: thr_proc.join() except KeyboardInterrupt: pass print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
mit
HKUST-SING/tensorflow
tensorflow/python/ops/cloud/bigquery_reader_ops_test.py
12
9585
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for BigQueryReader Op.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import json import os import re import threading from six.moves import SimpleHTTPServer from six.moves import socketserver from tensorflow.core.example import example_pb2 from tensorflow.core.framework import types_pb2 from tensorflow.python.framework import dtypes from tensorflow.python.ops import array_ops from tensorflow.python.ops import data_flow_ops from tensorflow.python.ops import parsing_ops from tensorflow.python.ops.cloud import cloud from tensorflow.python.platform import test from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import compat _PROJECT = "test-project" _DATASET = "test-dataset" _TABLE = "test-table" # List representation of the test rows in the 'test-table' in BigQuery. # The schema for each row is: [int64, string, float]. # The values for rows are generated such that some columns have null values. The # general formula here is: # - The int64 column is present in every row. # - The string column is only avaiable in even rows. # - The float column is only available in every third row. _ROWS = [[0, "s_0", 0.1], [1, None, None], [2, "s_2", None], [3, None, 3.1], [4, "s_4", None], [5, None, None], [6, "s_6", 6.1], [7, None, None], [8, "s_8", None], [9, None, 9.1]] # Schema for 'test-table'. # The schema currently has three columns: int64, string, and float _SCHEMA = { "kind": "bigquery#table", "id": "test-project:test-dataset.test-table", "schema": { "fields": [{ "name": "int64_col", "type": "INTEGER", "mode": "NULLABLE" }, { "name": "string_col", "type": "STRING", "mode": "NULLABLE" }, { "name": "float_col", "type": "FLOAT", "mode": "NULLABLE" }] } } def _ConvertRowToExampleProto(row): """Converts the input row to an Example proto. Args: row: Input Row instance. Returns: An Example proto initialized with row values. """ example = example_pb2.Example() example.features.feature["int64_col"].int64_list.value.append(row[0]) if row[1] is not None: example.features.feature["string_col"].bytes_list.value.append( compat.as_bytes(row[1])) if row[2] is not None: example.features.feature["float_col"].float_list.value.append(row[2]) return example class FakeBigQueryServer(threading.Thread): """Fake http server to return schema and data for sample table.""" def __init__(self, address, port): """Creates a FakeBigQueryServer. Args: address: Server address port: Server port. Pass 0 to automatically pick an empty port. """ threading.Thread.__init__(self) self.handler = BigQueryRequestHandler self.httpd = socketserver.TCPServer((address, port), self.handler) def run(self): self.httpd.serve_forever() def shutdown(self): self.httpd.shutdown() self.httpd.socket.close() class BigQueryRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): """Responds to BigQuery HTTP requests. Attributes: num_rows: num_rows in the underlying table served by this class. """ num_rows = 0 def do_GET(self): if "data?maxResults=" not in self.path: # This is a schema request. _SCHEMA["numRows"] = self.num_rows response = json.dumps(_SCHEMA) else: # This is a data request. # # Extract max results and start index. max_results = int(re.findall(r"maxResults=(\d+)", self.path)[0]) start_index = int(re.findall(r"startIndex=(\d+)", self.path)[0]) # Send the rows as JSON. rows = [] for row in _ROWS[start_index:start_index + max_results]: row_json = { "f": [{ "v": str(row[0]) }, { "v": str(row[1]) if row[1] is not None else None }, { "v": str(row[2]) if row[2] is not None else None }] } rows.append(row_json) response = json.dumps({ "kind": "bigquery#table", "id": "test-project:test-dataset.test-table", "rows": rows }) self.send_response(200) self.end_headers() self.wfile.write(compat.as_bytes(response)) def _SetUpQueue(reader): """Sets up a queue for a reader.""" queue = data_flow_ops.FIFOQueue(8, [types_pb2.DT_STRING], shapes=()) key, value = reader.read(queue) queue.enqueue_many(reader.partitions()).run() queue.close().run() return key, value class BigQueryReaderOpsTest(test.TestCase): def setUp(self): super(BigQueryReaderOpsTest, self).setUp() self.server = FakeBigQueryServer("127.0.0.1", 0) self.server.start() logging.info("server address is %s:%s", self.server.httpd.server_address[0], self.server.httpd.server_address[1]) # An override to bypass the GCP auth token retrieval logic # in google_auth_provider.cc. os.environ["GOOGLE_AUTH_TOKEN_FOR_TESTING"] = "not-used" def tearDown(self): self.server.shutdown() super(BigQueryReaderOpsTest, self).tearDown() def _ReadAndCheckRowsUsingFeatures(self, num_rows): self.server.handler.num_rows = num_rows with self.test_session() as sess: feature_configs = { "int64_col": parsing_ops.FixedLenFeature( [1], dtype=dtypes.int64), "string_col": parsing_ops.FixedLenFeature( [1], dtype=dtypes.string, default_value="s_default"), } reader = cloud.BigQueryReader( project_id=_PROJECT, dataset_id=_DATASET, table_id=_TABLE, num_partitions=4, features=feature_configs, timestamp_millis=1, test_end_point=("%s:%s" % (self.server.httpd.server_address[0], self.server.httpd.server_address[1]))) key, value = _SetUpQueue(reader) seen_rows = [] features = parsing_ops.parse_example( array_ops.reshape(value, [1]), feature_configs) for _ in range(num_rows): int_value, str_value = sess.run( [features["int64_col"], features["string_col"]]) # Parse values returned from the session. self.assertEqual(int_value.shape, (1, 1)) self.assertEqual(str_value.shape, (1, 1)) int64_col = int_value[0][0] string_col = str_value[0][0] seen_rows.append(int64_col) # Compare. expected_row = _ROWS[int64_col] self.assertEqual(int64_col, expected_row[0]) self.assertEqual( compat.as_str(string_col), ("s_%d" % int64_col) if expected_row[1] else "s_default") self.assertItemsEqual(seen_rows, range(num_rows)) with self.assertRaisesOpError("is closed and has insufficient elements " "\\(requested 1, current size 0\\)"): sess.run([key, value]) def testReadingSingleRowUsingFeatures(self): self._ReadAndCheckRowsUsingFeatures(1) def testReadingMultipleRowsUsingFeatures(self): self._ReadAndCheckRowsUsingFeatures(10) def testReadingMultipleRowsUsingColumns(self): num_rows = 10 self.server.handler.num_rows = num_rows with self.test_session() as sess: reader = cloud.BigQueryReader( project_id=_PROJECT, dataset_id=_DATASET, table_id=_TABLE, num_partitions=4, columns=["int64_col", "float_col", "string_col"], timestamp_millis=1, test_end_point=("%s:%s" % (self.server.httpd.server_address[0], self.server.httpd.server_address[1]))) key, value = _SetUpQueue(reader) seen_rows = [] for row_index in range(num_rows): returned_row_id, example_proto = sess.run([key, value]) example = example_pb2.Example() example.ParseFromString(example_proto) self.assertIn("int64_col", example.features.feature) feature = example.features.feature["int64_col"] self.assertEqual(len(feature.int64_list.value), 1) int64_col = feature.int64_list.value[0] seen_rows.append(int64_col) # Create our expected Example. expected_example = example_pb2.Example() expected_example = _ConvertRowToExampleProto(_ROWS[int64_col]) # Compare. self.assertProtoEquals(example, expected_example) self.assertEqual(row_index, int(returned_row_id)) self.assertItemsEqual(seen_rows, range(num_rows)) with self.assertRaisesOpError("is closed and has insufficient elements " "\\(requested 1, current size 0\\)"): sess.run([key, value]) if __name__ == "__main__": test.main()
apache-2.0
kenorb-contrib/BitTorrent
launchmany-console.py
2
6333
#!/usr/bin/env python # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # Written by John Hoffman # Updated to 4.20 by David Harrison app_name = "BitTorrent" if __name__ == '__main__': from BTL.translation import _ import sys import os from BitTorrent import platform from BitTorrent.launchmanycore import LaunchMany from BitTorrent.defaultargs import get_defaults from BitTorrent.parseargs import parseargs, printHelp from BitTorrent.prefs import Preferences from BitTorrent import configfile from BitTorrent import version from BTL.platform import encode_for_filesystem, decode_from_filesystem from BitTorrent import BTFailure from BitTorrent import bt_log_fmt from BTL.log import injectLogger import logging from logging import ERROR, WARNING, INFO from BitTorrent import console, old_stderr, STDERR exceptions = [] log = logging.getLogger('launchmany-console') class HeadlessDisplayer: def display(self, data): # formats the data and dumps it to the root logger. if not data: log.info( _("no torrents")) elif type(data) == str: log.info(data) else: for x in data: ( name, status, progress, peers, seeds, seedsmsg, uprate, dnrate, upamt, dnamt, size, t, msg ) = x logging.getLogger('launchmany-console').info( '"%s": "%s" (%s) - %sP%s%s u%0.1fK/s-d%0.1fK/s u%dK-d%dK "%s"' % ( name, status, progress, peers, seeds, seedsmsg, uprate/1000, dnrate/1000, upamt/1024, dnamt/1024, msg)) return False def modify_default( defaults_tuplelist, key, newvalue ): name,value,doc = [(n,v,d) for (n,v,d) in defaults_tuplelist if n == key][0] defaults_tuplelist = [(n,v,d) for (n,v,d) in defaults_tuplelist if not n == key] defaults_tuplelist.append( (key,newvalue,doc) ) return defaults_tuplelist if __name__ == '__main__': uiname = 'launchmany-console' defaults = get_defaults(uiname) try: if len(sys.argv) < 2: printHelp(uiname, defaults) sys.exit(1) # Modifying default values from get_defaults is annoying... # Implementing specific default values for each uiname in # defaultargs.py is even more annoying. --Dave ddir = os.path.join( platform.get_dot_dir(), "launchmany-console" ) ddir = decode_from_filesystem(ddir) modify_default(defaults, 'data_dir', ddir) config, args = configfile.parse_configuration_and_args(defaults, uiname, sys.argv[1:], 0, 1) # returned from here config['save_in'] is /home/dave/Desktop/... if args: torrent_dir = args[0] config['torrent_dir'] = decode_from_filesystem(torrent_dir) else: torrent_dir = config['torrent_dir'] torrent_dir,bad = encode_for_filesystem(torrent_dir) if bad: raise BTFailure(_("Warning: ")+config['torrent_dir']+ _(" is not a directory")) if not os.path.isdir(torrent_dir): raise BTFailure(_("Warning: ")+torrent_dir+ _(" is not a directory")) # the default behavior is to save_in files to the platform # get_save_dir. For launchmany, if no command-line argument # changed the save directory then use the torrent directory. #if config['save_in'] == platform.get_save_dir(): # config['save_in'] = config['torrent_dir'] if '--save_in' in sys.argv: print "Don't use --save_in for launchmany-console. Saving files from " \ "many torrents in the same directory can result in filename collisions." sys.exit(1) # The default 'save_in' is likely to be something like /home/myname/BitTorrent Downloads # but we would prefer that the 'save_in' be an empty string so that # LaunchMany will save the file in the same location as the destination for the file. # When downloading or seeding a large number of files we want to be sure there are # no file name collisions which can occur when the same save_in directory is used for # all torrents. When 'save in' is empty, the destination of the filename is used, which # for save_as style 4 (the default) will be in the same directory as the .torrent file. # If each .torrent file is in its own directory then filename collisions cannot occur. config['save_in'] = u"" except BTFailure, e: print _("error: ") + unicode(e.args[0]) + \ _("\nrun with no args for parameter explanations") sys.exit(1) except KeyboardInterrupt: sys.exit(1) d = HeadlessDisplayer() config = Preferences().initWithDict(config) injectLogger(use_syslog = False, capture_output = True, verbose = True, log_level = logging.INFO, log_twisted = False ) logging.getLogger('').removeHandler(console) # remove handler installed by BitTorrent.__init__. LaunchMany(config, d.display, 'launchmany-console') logging.getLogger("").critical( "After return from LaunchMany" ) # Uncomment the following if it looks like threads are hanging around. # monitor_thread can be found in cdv://cdv.bittorrent.com:6602/python-scripts #import threading #nondaemons = [d for d in threading.enumerate() if not d.isDaemon()] #if len(nondaemons) > 1: # import time # from monitor_thread import print_stacks # time.sleep(4) # nondaemons = [d for d in threading.enumerate() if not d.isDaemon()] # if len(nondaemons) > 1: # print_stacks()
gpl-3.0
flyher/pymo
android/pgs4a-0.9.6/python-install/lib/python2.7/test/test_xml_etree.py
16
53139
# xml.etree test. This file contains enough tests to make sure that # all included components work as they should. # Large parts are extracted from the upstream test suite. # IMPORTANT: the same doctests are run from "test_xml_etree_c" in # order to ensure consistency between the C implementation and the # Python implementation. # # For this purpose, the module-level "ET" symbol is temporarily # monkey-patched when running the "test_xml_etree_c" test suite. # Don't re-import "xml.etree.ElementTree" module in the docstring, # except if the test is specific to the Python implementation. import sys import cgi from test import test_support from test.test_support import findfile from xml.etree import ElementTree as ET SIMPLE_XMLFILE = findfile("simple.xml", subdir="xmltestdata") SIMPLE_NS_XMLFILE = findfile("simple-ns.xml", subdir="xmltestdata") SAMPLE_XML = """\ <body> <tag class='a'>text</tag> <tag class='b' /> <section> <tag class='b' id='inner'>subtext</tag> </section> </body> """ SAMPLE_SECTION = """\ <section> <tag class='b' id='inner'>subtext</tag> <nexttag /> <nextsection> <tag /> </nextsection> </section> """ SAMPLE_XML_NS = """ <body xmlns="http://effbot.org/ns"> <tag>text</tag> <tag /> <section> <tag>subtext</tag> </section> </body> """ def sanity(): """ Import sanity. >>> from xml.etree import ElementTree >>> from xml.etree import ElementInclude >>> from xml.etree import ElementPath """ def check_method(method): if not hasattr(method, '__call__'): print method, "not callable" def serialize(elem, to_string=True, **options): import StringIO file = StringIO.StringIO() tree = ET.ElementTree(elem) tree.write(file, **options) if to_string: return file.getvalue() else: file.seek(0) return file def summarize(elem): if elem.tag == ET.Comment: return "<Comment>" return elem.tag def summarize_list(seq): return [summarize(elem) for elem in seq] def normalize_crlf(tree): for elem in tree.iter(): if elem.text: elem.text = elem.text.replace("\r\n", "\n") if elem.tail: elem.tail = elem.tail.replace("\r\n", "\n") def check_string(string): len(string) for char in string: if len(char) != 1: print "expected one-character string, got %r" % char new_string = string + "" new_string = string + " " string[:0] def check_mapping(mapping): len(mapping) keys = mapping.keys() items = mapping.items() for key in keys: item = mapping[key] mapping["key"] = "value" if mapping["key"] != "value": print "expected value string, got %r" % mapping["key"] def check_element(element): if not ET.iselement(element): print "not an element" if not hasattr(element, "tag"): print "no tag member" if not hasattr(element, "attrib"): print "no attrib member" if not hasattr(element, "text"): print "no text member" if not hasattr(element, "tail"): print "no tail member" check_string(element.tag) check_mapping(element.attrib) if element.text is not None: check_string(element.text) if element.tail is not None: check_string(element.tail) for elem in element: check_element(elem) # -------------------------------------------------------------------- # element tree tests def interface(): r""" Test element tree interface. >>> element = ET.Element("tag") >>> check_element(element) >>> tree = ET.ElementTree(element) >>> check_element(tree.getroot()) >>> element = ET.Element("t\xe4g", key="value") >>> tree = ET.ElementTree(element) >>> repr(element) # doctest: +ELLIPSIS "<Element 't\\xe4g' at 0x...>" >>> element = ET.Element("tag", key="value") Make sure all standard element methods exist. >>> check_method(element.append) >>> check_method(element.extend) >>> check_method(element.insert) >>> check_method(element.remove) >>> check_method(element.getchildren) >>> check_method(element.find) >>> check_method(element.iterfind) >>> check_method(element.findall) >>> check_method(element.findtext) >>> check_method(element.clear) >>> check_method(element.get) >>> check_method(element.set) >>> check_method(element.keys) >>> check_method(element.items) >>> check_method(element.iter) >>> check_method(element.itertext) >>> check_method(element.getiterator) These methods return an iterable. See bug 6472. >>> check_method(element.iter("tag").next) >>> check_method(element.iterfind("tag").next) >>> check_method(element.iterfind("*").next) >>> check_method(tree.iter("tag").next) >>> check_method(tree.iterfind("tag").next) >>> check_method(tree.iterfind("*").next) These aliases are provided: >>> assert ET.XML == ET.fromstring >>> assert ET.PI == ET.ProcessingInstruction >>> assert ET.XMLParser == ET.XMLTreeBuilder """ def simpleops(): """ Basic method sanity checks. >>> elem = ET.XML("<body><tag/></body>") >>> serialize(elem) '<body><tag /></body>' >>> e = ET.Element("tag2") >>> elem.append(e) >>> serialize(elem) '<body><tag /><tag2 /></body>' >>> elem.remove(e) >>> serialize(elem) '<body><tag /></body>' >>> elem.insert(0, e) >>> serialize(elem) '<body><tag2 /><tag /></body>' >>> elem.remove(e) >>> elem.extend([e]) >>> serialize(elem) '<body><tag /><tag2 /></body>' >>> elem.remove(e) >>> element = ET.Element("tag", key="value") >>> serialize(element) # 1 '<tag key="value" />' >>> subelement = ET.Element("subtag") >>> element.append(subelement) >>> serialize(element) # 2 '<tag key="value"><subtag /></tag>' >>> element.insert(0, subelement) >>> serialize(element) # 3 '<tag key="value"><subtag /><subtag /></tag>' >>> element.remove(subelement) >>> serialize(element) # 4 '<tag key="value"><subtag /></tag>' >>> element.remove(subelement) >>> serialize(element) # 5 '<tag key="value" />' >>> element.remove(subelement) Traceback (most recent call last): ValueError: list.remove(x): x not in list >>> serialize(element) # 6 '<tag key="value" />' >>> element[0:0] = [subelement, subelement, subelement] >>> serialize(element[1]) '<subtag />' >>> element[1:9] == [element[1], element[2]] True >>> element[:9:2] == [element[0], element[2]] True >>> del element[1:2] >>> serialize(element) '<tag key="value"><subtag /><subtag /></tag>' """ def cdata(): """ Test CDATA handling (etc). >>> serialize(ET.XML("<tag>hello</tag>")) '<tag>hello</tag>' >>> serialize(ET.XML("<tag>&#104;&#101;&#108;&#108;&#111;</tag>")) '<tag>hello</tag>' >>> serialize(ET.XML("<tag><![CDATA[hello]]></tag>")) '<tag>hello</tag>' """ # Only with Python implementation def simplefind(): """ Test find methods using the elementpath fallback. >>> from xml.etree import ElementTree >>> CurrentElementPath = ElementTree.ElementPath >>> ElementTree.ElementPath = ElementTree._SimpleElementPath() >>> elem = ElementTree.XML(SAMPLE_XML) >>> elem.find("tag").tag 'tag' >>> ElementTree.ElementTree(elem).find("tag").tag 'tag' >>> elem.findtext("tag") 'text' >>> elem.findtext("tog") >>> elem.findtext("tog", "default") 'default' >>> ElementTree.ElementTree(elem).findtext("tag") 'text' >>> summarize_list(elem.findall("tag")) ['tag', 'tag'] >>> summarize_list(elem.findall(".//tag")) ['tag', 'tag', 'tag'] Path syntax doesn't work in this case. >>> elem.find("section/tag") >>> elem.findtext("section/tag") >>> summarize_list(elem.findall("section/tag")) [] >>> ElementTree.ElementPath = CurrentElementPath """ def find(): """ Test find methods (including xpath syntax). >>> elem = ET.XML(SAMPLE_XML) >>> elem.find("tag").tag 'tag' >>> ET.ElementTree(elem).find("tag").tag 'tag' >>> elem.find("section/tag").tag 'tag' >>> elem.find("./tag").tag 'tag' >>> ET.ElementTree(elem).find("./tag").tag 'tag' >>> ET.ElementTree(elem).find("/tag").tag 'tag' >>> elem[2] = ET.XML(SAMPLE_SECTION) >>> elem.find("section/nexttag").tag 'nexttag' >>> ET.ElementTree(elem).find("section/tag").tag 'tag' >>> ET.ElementTree(elem).find("tog") >>> ET.ElementTree(elem).find("tog/foo") >>> elem.findtext("tag") 'text' >>> elem.findtext("section/nexttag") '' >>> elem.findtext("section/nexttag", "default") '' >>> elem.findtext("tog") >>> elem.findtext("tog", "default") 'default' >>> ET.ElementTree(elem).findtext("tag") 'text' >>> ET.ElementTree(elem).findtext("tog/foo") >>> ET.ElementTree(elem).findtext("tog/foo", "default") 'default' >>> ET.ElementTree(elem).findtext("./tag") 'text' >>> ET.ElementTree(elem).findtext("/tag") 'text' >>> elem.findtext("section/tag") 'subtext' >>> ET.ElementTree(elem).findtext("section/tag") 'subtext' >>> summarize_list(elem.findall(".")) ['body'] >>> summarize_list(elem.findall("tag")) ['tag', 'tag'] >>> summarize_list(elem.findall("tog")) [] >>> summarize_list(elem.findall("tog/foo")) [] >>> summarize_list(elem.findall("*")) ['tag', 'tag', 'section'] >>> summarize_list(elem.findall(".//tag")) ['tag', 'tag', 'tag', 'tag'] >>> summarize_list(elem.findall("section/tag")) ['tag'] >>> summarize_list(elem.findall("section//tag")) ['tag', 'tag'] >>> summarize_list(elem.findall("section/*")) ['tag', 'nexttag', 'nextsection'] >>> summarize_list(elem.findall("section//*")) ['tag', 'nexttag', 'nextsection', 'tag'] >>> summarize_list(elem.findall("section/.//*")) ['tag', 'nexttag', 'nextsection', 'tag'] >>> summarize_list(elem.findall("*/*")) ['tag', 'nexttag', 'nextsection'] >>> summarize_list(elem.findall("*//*")) ['tag', 'nexttag', 'nextsection', 'tag'] >>> summarize_list(elem.findall("*/tag")) ['tag'] >>> summarize_list(elem.findall("*/./tag")) ['tag'] >>> summarize_list(elem.findall("./tag")) ['tag', 'tag'] >>> summarize_list(elem.findall(".//tag")) ['tag', 'tag', 'tag', 'tag'] >>> summarize_list(elem.findall("././tag")) ['tag', 'tag'] >>> summarize_list(elem.findall(".//tag[@class]")) ['tag', 'tag', 'tag'] >>> summarize_list(elem.findall(".//tag[@class='a']")) ['tag'] >>> summarize_list(elem.findall(".//tag[@class='b']")) ['tag', 'tag'] >>> summarize_list(elem.findall(".//tag[@id]")) ['tag'] >>> summarize_list(elem.findall(".//section[tag]")) ['section'] >>> summarize_list(elem.findall(".//section[element]")) [] >>> summarize_list(elem.findall("../tag")) [] >>> summarize_list(elem.findall("section/../tag")) ['tag', 'tag'] >>> summarize_list(ET.ElementTree(elem).findall("./tag")) ['tag', 'tag'] Following example is invalid in 1.2. A leading '*' is assumed in 1.3. >>> elem.findall("section//") == elem.findall("section//*") True ET's Path module handles this case incorrectly; this gives a warning in 1.3, and the behaviour will be modified in 1.4. >>> summarize_list(ET.ElementTree(elem).findall("/tag")) ['tag', 'tag'] >>> elem = ET.XML(SAMPLE_XML_NS) >>> summarize_list(elem.findall("tag")) [] >>> summarize_list(elem.findall("{http://effbot.org/ns}tag")) ['{http://effbot.org/ns}tag', '{http://effbot.org/ns}tag'] >>> summarize_list(elem.findall(".//{http://effbot.org/ns}tag")) ['{http://effbot.org/ns}tag', '{http://effbot.org/ns}tag', '{http://effbot.org/ns}tag'] """ def file_init(): """ >>> import StringIO >>> stringfile = StringIO.StringIO(SAMPLE_XML) >>> tree = ET.ElementTree(file=stringfile) >>> tree.find("tag").tag 'tag' >>> tree.find("section/tag").tag 'tag' >>> tree = ET.ElementTree(file=SIMPLE_XMLFILE) >>> tree.find("element").tag 'element' >>> tree.find("element/../empty-element").tag 'empty-element' """ def bad_find(): """ Check bad or unsupported path expressions. >>> elem = ET.XML(SAMPLE_XML) >>> elem.findall("/tag") Traceback (most recent call last): SyntaxError: cannot use absolute path on element """ def path_cache(): """ Check that the path cache behaves sanely. >>> elem = ET.XML(SAMPLE_XML) >>> for i in range(10): ET.ElementTree(elem).find('./'+str(i)) >>> cache_len_10 = len(ET.ElementPath._cache) >>> for i in range(10): ET.ElementTree(elem).find('./'+str(i)) >>> len(ET.ElementPath._cache) == cache_len_10 True >>> for i in range(20): ET.ElementTree(elem).find('./'+str(i)) >>> len(ET.ElementPath._cache) > cache_len_10 True >>> for i in range(600): ET.ElementTree(elem).find('./'+str(i)) >>> len(ET.ElementPath._cache) < 500 True """ def copy(): """ Test copy handling (etc). >>> import copy >>> e1 = ET.XML("<tag>hello<foo/></tag>") >>> e2 = copy.copy(e1) >>> e3 = copy.deepcopy(e1) >>> e1.find("foo").tag = "bar" >>> serialize(e1) '<tag>hello<bar /></tag>' >>> serialize(e2) '<tag>hello<bar /></tag>' >>> serialize(e3) '<tag>hello<foo /></tag>' """ def attrib(): """ Test attribute handling. >>> elem = ET.Element("tag") >>> elem.get("key") # 1.1 >>> elem.get("key", "default") # 1.2 'default' >>> elem.set("key", "value") >>> elem.get("key") # 1.3 'value' >>> elem = ET.Element("tag", key="value") >>> elem.get("key") # 2.1 'value' >>> elem.attrib # 2.2 {'key': 'value'} >>> attrib = {"key": "value"} >>> elem = ET.Element("tag", attrib) >>> attrib.clear() # check for aliasing issues >>> elem.get("key") # 3.1 'value' >>> elem.attrib # 3.2 {'key': 'value'} >>> attrib = {"key": "value"} >>> elem = ET.Element("tag", **attrib) >>> attrib.clear() # check for aliasing issues >>> elem.get("key") # 4.1 'value' >>> elem.attrib # 4.2 {'key': 'value'} >>> elem = ET.Element("tag", {"key": "other"}, key="value") >>> elem.get("key") # 5.1 'value' >>> elem.attrib # 5.2 {'key': 'value'} >>> elem = ET.Element('test') >>> elem.text = "aa" >>> elem.set('testa', 'testval') >>> elem.set('testb', 'test2') >>> ET.tostring(elem) '<test testa="testval" testb="test2">aa</test>' >>> sorted(elem.keys()) ['testa', 'testb'] >>> sorted(elem.items()) [('testa', 'testval'), ('testb', 'test2')] >>> elem.attrib['testb'] 'test2' >>> elem.attrib['testb'] = 'test1' >>> elem.attrib['testc'] = 'test2' >>> ET.tostring(elem) '<test testa="testval" testb="test1" testc="test2">aa</test>' """ def makeelement(): """ Test makeelement handling. >>> elem = ET.Element("tag") >>> attrib = {"key": "value"} >>> subelem = elem.makeelement("subtag", attrib) >>> if subelem.attrib is attrib: ... print "attrib aliasing" >>> elem.append(subelem) >>> serialize(elem) '<tag><subtag key="value" /></tag>' >>> elem.clear() >>> serialize(elem) '<tag />' >>> elem.append(subelem) >>> serialize(elem) '<tag><subtag key="value" /></tag>' >>> elem.extend([subelem, subelem]) >>> serialize(elem) '<tag><subtag key="value" /><subtag key="value" /><subtag key="value" /></tag>' >>> elem[:] = [subelem] >>> serialize(elem) '<tag><subtag key="value" /></tag>' >>> elem[:] = tuple([subelem]) >>> serialize(elem) '<tag><subtag key="value" /></tag>' """ def parsefile(): """ Test parsing from file. >>> tree = ET.parse(SIMPLE_XMLFILE) >>> normalize_crlf(tree) >>> tree.write(sys.stdout) <root> <element key="value">text</element> <element>text</element>tail <empty-element /> </root> >>> tree = ET.parse(SIMPLE_NS_XMLFILE) >>> normalize_crlf(tree) >>> tree.write(sys.stdout) <ns0:root xmlns:ns0="namespace"> <ns0:element key="value">text</ns0:element> <ns0:element>text</ns0:element>tail <ns0:empty-element /> </ns0:root> >>> with open(SIMPLE_XMLFILE) as f: ... data = f.read() >>> parser = ET.XMLParser() >>> parser.version # doctest: +ELLIPSIS 'Expat ...' >>> parser.feed(data) >>> print serialize(parser.close()) <root> <element key="value">text</element> <element>text</element>tail <empty-element /> </root> >>> parser = ET.XMLTreeBuilder() # 1.2 compatibility >>> parser.feed(data) >>> print serialize(parser.close()) <root> <element key="value">text</element> <element>text</element>tail <empty-element /> </root> >>> target = ET.TreeBuilder() >>> parser = ET.XMLParser(target=target) >>> parser.feed(data) >>> print serialize(parser.close()) <root> <element key="value">text</element> <element>text</element>tail <empty-element /> </root> """ def parseliteral(): """ >>> element = ET.XML("<html><body>text</body></html>") >>> ET.ElementTree(element).write(sys.stdout) <html><body>text</body></html> >>> element = ET.fromstring("<html><body>text</body></html>") >>> ET.ElementTree(element).write(sys.stdout) <html><body>text</body></html> >>> sequence = ["<html><body>", "text</bo", "dy></html>"] >>> element = ET.fromstringlist(sequence) >>> print ET.tostring(element) <html><body>text</body></html> >>> print "".join(ET.tostringlist(element)) <html><body>text</body></html> >>> ET.tostring(element, "ascii") "<?xml version='1.0' encoding='ascii'?>\\n<html><body>text</body></html>" >>> _, ids = ET.XMLID("<html><body>text</body></html>") >>> len(ids) 0 >>> _, ids = ET.XMLID("<html><body id='body'>text</body></html>") >>> len(ids) 1 >>> ids["body"].tag 'body' """ def iterparse(): """ Test iterparse interface. >>> iterparse = ET.iterparse >>> context = iterparse(SIMPLE_XMLFILE) >>> action, elem = next(context) >>> print action, elem.tag end element >>> for action, elem in context: ... print action, elem.tag end element end empty-element end root >>> context.root.tag 'root' >>> context = iterparse(SIMPLE_NS_XMLFILE) >>> for action, elem in context: ... print action, elem.tag end {namespace}element end {namespace}element end {namespace}empty-element end {namespace}root >>> events = () >>> context = iterparse(SIMPLE_XMLFILE, events) >>> for action, elem in context: ... print action, elem.tag >>> events = () >>> context = iterparse(SIMPLE_XMLFILE, events=events) >>> for action, elem in context: ... print action, elem.tag >>> events = ("start", "end") >>> context = iterparse(SIMPLE_XMLFILE, events) >>> for action, elem in context: ... print action, elem.tag start root start element end element start element end element start empty-element end empty-element end root >>> events = ("start", "end", "start-ns", "end-ns") >>> context = iterparse(SIMPLE_NS_XMLFILE, events) >>> for action, elem in context: ... if action in ("start", "end"): ... print action, elem.tag ... else: ... print action, elem start-ns ('', 'namespace') start {namespace}root start {namespace}element end {namespace}element start {namespace}element end {namespace}element start {namespace}empty-element end {namespace}empty-element end {namespace}root end-ns None >>> events = ("start", "end", "bogus") >>> with open(SIMPLE_XMLFILE, "rb") as f: ... iterparse(f, events) Traceback (most recent call last): ValueError: unknown event 'bogus' >>> import StringIO >>> source = StringIO.StringIO( ... "<?xml version='1.0' encoding='iso-8859-1'?>\\n" ... "<body xmlns='http://&#233;ffbot.org/ns'\\n" ... " xmlns:cl\\xe9='http://effbot.org/ns'>text</body>\\n") >>> events = ("start-ns",) >>> context = iterparse(source, events) >>> for action, elem in context: ... print action, elem start-ns ('', u'http://\\xe9ffbot.org/ns') start-ns (u'cl\\xe9', 'http://effbot.org/ns') >>> source = StringIO.StringIO("<document />junk") >>> try: ... for action, elem in iterparse(source): ... print action, elem.tag ... except ET.ParseError, v: ... print v junk after document element: line 1, column 12 """ def writefile(): """ >>> elem = ET.Element("tag") >>> elem.text = "text" >>> serialize(elem) '<tag>text</tag>' >>> ET.SubElement(elem, "subtag").text = "subtext" >>> serialize(elem) '<tag>text<subtag>subtext</subtag></tag>' Test tag suppression >>> elem.tag = None >>> serialize(elem) 'text<subtag>subtext</subtag>' >>> elem.insert(0, ET.Comment("comment")) >>> serialize(elem) # assumes 1.3 'text<!--comment--><subtag>subtext</subtag>' >>> elem[0] = ET.PI("key", "value") >>> serialize(elem) 'text<?key value?><subtag>subtext</subtag>' """ def custom_builder(): """ Test parser w. custom builder. >>> with open(SIMPLE_XMLFILE) as f: ... data = f.read() >>> class Builder: ... def start(self, tag, attrib): ... print "start", tag ... def end(self, tag): ... print "end", tag ... def data(self, text): ... pass >>> builder = Builder() >>> parser = ET.XMLParser(target=builder) >>> parser.feed(data) start root start element end element start element end element start empty-element end empty-element end root >>> with open(SIMPLE_NS_XMLFILE) as f: ... data = f.read() >>> class Builder: ... def start(self, tag, attrib): ... print "start", tag ... def end(self, tag): ... print "end", tag ... def data(self, text): ... pass ... def pi(self, target, data): ... print "pi", target, repr(data) ... def comment(self, data): ... print "comment", repr(data) >>> builder = Builder() >>> parser = ET.XMLParser(target=builder) >>> parser.feed(data) pi pi 'data' comment ' comment ' start {namespace}root start {namespace}element end {namespace}element start {namespace}element end {namespace}element start {namespace}empty-element end {namespace}empty-element end {namespace}root """ def getchildren(): """ Test Element.getchildren() >>> with open(SIMPLE_XMLFILE, "r") as f: ... tree = ET.parse(f) >>> for elem in tree.getroot().iter(): ... summarize_list(elem.getchildren()) ['element', 'element', 'empty-element'] [] [] [] >>> for elem in tree.getiterator(): ... summarize_list(elem.getchildren()) ['element', 'element', 'empty-element'] [] [] [] >>> elem = ET.XML(SAMPLE_XML) >>> len(elem.getchildren()) 3 >>> len(elem[2].getchildren()) 1 >>> elem[:] == elem.getchildren() True >>> child1 = elem[0] >>> child2 = elem[2] >>> del elem[1:2] >>> len(elem.getchildren()) 2 >>> child1 == elem[0] True >>> child2 == elem[1] True >>> elem[0:2] = [child2, child1] >>> child2 == elem[0] True >>> child1 == elem[1] True >>> child1 == elem[0] False >>> elem.clear() >>> elem.getchildren() [] """ def writestring(): """ >>> elem = ET.XML("<html><body>text</body></html>") >>> ET.tostring(elem) '<html><body>text</body></html>' >>> elem = ET.fromstring("<html><body>text</body></html>") >>> ET.tostring(elem) '<html><body>text</body></html>' """ def check_encoding(encoding): """ >>> check_encoding("ascii") >>> check_encoding("us-ascii") >>> check_encoding("iso-8859-1") >>> check_encoding("iso-8859-15") >>> check_encoding("cp437") >>> check_encoding("mac-roman") """ ET.XML("<?xml version='1.0' encoding='%s'?><xml />" % encoding) def encoding(): r""" Test encoding issues. >>> elem = ET.Element("tag") >>> elem.text = u"abc" >>> serialize(elem) '<tag>abc</tag>' >>> serialize(elem, encoding="utf-8") '<tag>abc</tag>' >>> serialize(elem, encoding="us-ascii") '<tag>abc</tag>' >>> serialize(elem, encoding="iso-8859-1") "<?xml version='1.0' encoding='iso-8859-1'?>\n<tag>abc</tag>" >>> elem.text = "<&\"\'>" >>> serialize(elem) '<tag>&lt;&amp;"\'&gt;</tag>' >>> serialize(elem, encoding="utf-8") '<tag>&lt;&amp;"\'&gt;</tag>' >>> serialize(elem, encoding="us-ascii") # cdata characters '<tag>&lt;&amp;"\'&gt;</tag>' >>> serialize(elem, encoding="iso-8859-1") '<?xml version=\'1.0\' encoding=\'iso-8859-1\'?>\n<tag>&lt;&amp;"\'&gt;</tag>' >>> elem.attrib["key"] = "<&\"\'>" >>> elem.text = None >>> serialize(elem) '<tag key="&lt;&amp;&quot;\'&gt;" />' >>> serialize(elem, encoding="utf-8") '<tag key="&lt;&amp;&quot;\'&gt;" />' >>> serialize(elem, encoding="us-ascii") '<tag key="&lt;&amp;&quot;\'&gt;" />' >>> serialize(elem, encoding="iso-8859-1") '<?xml version=\'1.0\' encoding=\'iso-8859-1\'?>\n<tag key="&lt;&amp;&quot;\'&gt;" />' >>> elem.text = u'\xe5\xf6\xf6<>' >>> elem.attrib.clear() >>> serialize(elem) '<tag>&#229;&#246;&#246;&lt;&gt;</tag>' >>> serialize(elem, encoding="utf-8") '<tag>\xc3\xa5\xc3\xb6\xc3\xb6&lt;&gt;</tag>' >>> serialize(elem, encoding="us-ascii") '<tag>&#229;&#246;&#246;&lt;&gt;</tag>' >>> serialize(elem, encoding="iso-8859-1") "<?xml version='1.0' encoding='iso-8859-1'?>\n<tag>\xe5\xf6\xf6&lt;&gt;</tag>" >>> elem.attrib["key"] = u'\xe5\xf6\xf6<>' >>> elem.text = None >>> serialize(elem) '<tag key="&#229;&#246;&#246;&lt;&gt;" />' >>> serialize(elem, encoding="utf-8") '<tag key="\xc3\xa5\xc3\xb6\xc3\xb6&lt;&gt;" />' >>> serialize(elem, encoding="us-ascii") '<tag key="&#229;&#246;&#246;&lt;&gt;" />' >>> serialize(elem, encoding="iso-8859-1") '<?xml version=\'1.0\' encoding=\'iso-8859-1\'?>\n<tag key="\xe5\xf6\xf6&lt;&gt;" />' """ def methods(): r""" Test serialization methods. >>> e = ET.XML("<html><link/><script>1 &lt; 2</script></html>") >>> e.tail = "\n" >>> serialize(e) '<html><link /><script>1 &lt; 2</script></html>\n' >>> serialize(e, method=None) '<html><link /><script>1 &lt; 2</script></html>\n' >>> serialize(e, method="xml") '<html><link /><script>1 &lt; 2</script></html>\n' >>> serialize(e, method="html") '<html><link><script>1 < 2</script></html>\n' >>> serialize(e, method="text") '1 < 2\n' """ def iterators(): """ Test iterators. >>> e = ET.XML("<html><body>this is a <i>paragraph</i>.</body>..</html>") >>> summarize_list(e.iter()) ['html', 'body', 'i'] >>> summarize_list(e.find("body").iter()) ['body', 'i'] >>> summarize(next(e.iter())) 'html' >>> "".join(e.itertext()) 'this is a paragraph...' >>> "".join(e.find("body").itertext()) 'this is a paragraph.' >>> next(e.itertext()) 'this is a ' Method iterparse should return an iterator. See bug 6472. >>> sourcefile = serialize(e, to_string=False) >>> next(ET.iterparse(sourcefile)) # doctest: +ELLIPSIS ('end', <Element 'i' at 0x...>) >>> tree = ET.ElementTree(None) >>> tree.iter() Traceback (most recent call last): AttributeError: 'NoneType' object has no attribute 'iter' """ ENTITY_XML = """\ <!DOCTYPE points [ <!ENTITY % user-entities SYSTEM 'user-entities.xml'> %user-entities; ]> <document>&entity;</document> """ def entity(): """ Test entity handling. 1) good entities >>> e = ET.XML("<document title='&#x8230;'>test</document>") >>> serialize(e) '<document title="&#33328;">test</document>' 2) bad entities >>> ET.XML("<document>&entity;</document>") Traceback (most recent call last): ParseError: undefined entity: line 1, column 10 >>> ET.XML(ENTITY_XML) Traceback (most recent call last): ParseError: undefined entity &entity;: line 5, column 10 3) custom entity >>> parser = ET.XMLParser() >>> parser.entity["entity"] = "text" >>> parser.feed(ENTITY_XML) >>> root = parser.close() >>> serialize(root) '<document>text</document>' """ def error(xml): """ Test error handling. >>> issubclass(ET.ParseError, SyntaxError) True >>> error("foo").position (1, 0) >>> error("<tag>&foo;</tag>").position (1, 5) >>> error("foobar<").position (1, 6) """ try: ET.XML(xml) except ET.ParseError: return sys.exc_value def namespace(): """ Test namespace issues. 1) xml namespace >>> elem = ET.XML("<tag xml:lang='en' />") >>> serialize(elem) # 1.1 '<tag xml:lang="en" />' 2) other "well-known" namespaces >>> elem = ET.XML("<rdf:RDF xmlns:rdf='http://www.w3.org/1999/02/22-rdf-syntax-ns#' />") >>> serialize(elem) # 2.1 '<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" />' >>> elem = ET.XML("<html:html xmlns:html='http://www.w3.org/1999/xhtml' />") >>> serialize(elem) # 2.2 '<html:html xmlns:html="http://www.w3.org/1999/xhtml" />' >>> elem = ET.XML("<soap:Envelope xmlns:soap='http://schemas.xmlsoap.org/soap/envelope' />") >>> serialize(elem) # 2.3 '<ns0:Envelope xmlns:ns0="http://schemas.xmlsoap.org/soap/envelope" />' 3) unknown namespaces >>> elem = ET.XML(SAMPLE_XML_NS) >>> print serialize(elem) <ns0:body xmlns:ns0="http://effbot.org/ns"> <ns0:tag>text</ns0:tag> <ns0:tag /> <ns0:section> <ns0:tag>subtext</ns0:tag> </ns0:section> </ns0:body> """ def qname(): """ Test QName handling. 1) decorated tags >>> elem = ET.Element("{uri}tag") >>> serialize(elem) # 1.1 '<ns0:tag xmlns:ns0="uri" />' >>> elem = ET.Element(ET.QName("{uri}tag")) >>> serialize(elem) # 1.2 '<ns0:tag xmlns:ns0="uri" />' >>> elem = ET.Element(ET.QName("uri", "tag")) >>> serialize(elem) # 1.3 '<ns0:tag xmlns:ns0="uri" />' >>> elem = ET.Element(ET.QName("uri", "tag")) >>> subelem = ET.SubElement(elem, ET.QName("uri", "tag1")) >>> subelem = ET.SubElement(elem, ET.QName("uri", "tag2")) >>> serialize(elem) # 1.4 '<ns0:tag xmlns:ns0="uri"><ns0:tag1 /><ns0:tag2 /></ns0:tag>' 2) decorated attributes >>> elem.clear() >>> elem.attrib["{uri}key"] = "value" >>> serialize(elem) # 2.1 '<ns0:tag xmlns:ns0="uri" ns0:key="value" />' >>> elem.clear() >>> elem.attrib[ET.QName("{uri}key")] = "value" >>> serialize(elem) # 2.2 '<ns0:tag xmlns:ns0="uri" ns0:key="value" />' 3) decorated values are not converted by default, but the QName wrapper can be used for values >>> elem.clear() >>> elem.attrib["{uri}key"] = "{uri}value" >>> serialize(elem) # 3.1 '<ns0:tag xmlns:ns0="uri" ns0:key="{uri}value" />' >>> elem.clear() >>> elem.attrib["{uri}key"] = ET.QName("{uri}value") >>> serialize(elem) # 3.2 '<ns0:tag xmlns:ns0="uri" ns0:key="ns0:value" />' >>> elem.clear() >>> subelem = ET.Element("tag") >>> subelem.attrib["{uri1}key"] = ET.QName("{uri2}value") >>> elem.append(subelem) >>> elem.append(subelem) >>> serialize(elem) # 3.3 '<ns0:tag xmlns:ns0="uri" xmlns:ns1="uri1" xmlns:ns2="uri2"><tag ns1:key="ns2:value" /><tag ns1:key="ns2:value" /></ns0:tag>' 4) Direct QName tests >>> str(ET.QName('ns', 'tag')) '{ns}tag' >>> str(ET.QName('{ns}tag')) '{ns}tag' >>> q1 = ET.QName('ns', 'tag') >>> q2 = ET.QName('ns', 'tag') >>> q1 == q2 True >>> q2 = ET.QName('ns', 'other-tag') >>> q1 == q2 False >>> q1 == 'ns:tag' False >>> q1 == '{ns}tag' True """ def doctype_public(): """ Test PUBLIC doctype. >>> elem = ET.XML('<!DOCTYPE html PUBLIC' ... ' "-//W3C//DTD XHTML 1.0 Transitional//EN"' ... ' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">' ... '<html>text</html>') """ def xpath_tokenizer(p): """ Test the XPath tokenizer. >>> # tests from the xml specification >>> xpath_tokenizer("*") ['*'] >>> xpath_tokenizer("text()") ['text', '()'] >>> xpath_tokenizer("@name") ['@', 'name'] >>> xpath_tokenizer("@*") ['@', '*'] >>> xpath_tokenizer("para[1]") ['para', '[', '1', ']'] >>> xpath_tokenizer("para[last()]") ['para', '[', 'last', '()', ']'] >>> xpath_tokenizer("*/para") ['*', '/', 'para'] >>> xpath_tokenizer("/doc/chapter[5]/section[2]") ['/', 'doc', '/', 'chapter', '[', '5', ']', '/', 'section', '[', '2', ']'] >>> xpath_tokenizer("chapter//para") ['chapter', '//', 'para'] >>> xpath_tokenizer("//para") ['//', 'para'] >>> xpath_tokenizer("//olist/item") ['//', 'olist', '/', 'item'] >>> xpath_tokenizer(".") ['.'] >>> xpath_tokenizer(".//para") ['.', '//', 'para'] >>> xpath_tokenizer("..") ['..'] >>> xpath_tokenizer("../@lang") ['..', '/', '@', 'lang'] >>> xpath_tokenizer("chapter[title]") ['chapter', '[', 'title', ']'] >>> xpath_tokenizer("employee[@secretary and @assistant]") ['employee', '[', '@', 'secretary', '', 'and', '', '@', 'assistant', ']'] >>> # additional tests >>> xpath_tokenizer("{http://spam}egg") ['{http://spam}egg'] >>> xpath_tokenizer("./spam.egg") ['.', '/', 'spam.egg'] >>> xpath_tokenizer(".//{http://spam}egg") ['.', '//', '{http://spam}egg'] """ from xml.etree import ElementPath out = [] for op, tag in ElementPath.xpath_tokenizer(p): out.append(op or tag) return out def processinginstruction(): """ Test ProcessingInstruction directly >>> ET.tostring(ET.ProcessingInstruction('test', 'instruction')) '<?test instruction?>' >>> ET.tostring(ET.PI('test', 'instruction')) '<?test instruction?>' Issue #2746 >>> ET.tostring(ET.PI('test', '<testing&>')) '<?test <testing&>?>' >>> ET.tostring(ET.PI('test', u'<testing&>\xe3'), 'latin1') "<?xml version='1.0' encoding='latin1'?>\\n<?test <testing&>\\xe3?>" """ # # xinclude tests (samples from appendix C of the xinclude specification) XINCLUDE = {} XINCLUDE["C1.xml"] = """\ <?xml version='1.0'?> <document xmlns:xi="http://www.w3.org/2001/XInclude"> <p>120 Mz is adequate for an average home user.</p> <xi:include href="disclaimer.xml"/> </document> """ XINCLUDE["disclaimer.xml"] = """\ <?xml version='1.0'?> <disclaimer> <p>The opinions represented herein represent those of the individual and should not be interpreted as official policy endorsed by this organization.</p> </disclaimer> """ XINCLUDE["C2.xml"] = """\ <?xml version='1.0'?> <document xmlns:xi="http://www.w3.org/2001/XInclude"> <p>This document has been accessed <xi:include href="count.txt" parse="text"/> times.</p> </document> """ XINCLUDE["count.txt"] = "324387" XINCLUDE["C2b.xml"] = """\ <?xml version='1.0'?> <document xmlns:xi="http://www.w3.org/2001/XInclude"> <p>This document has been <em>accessed</em> <xi:include href="count.txt" parse="text"/> times.</p> </document> """ XINCLUDE["C3.xml"] = """\ <?xml version='1.0'?> <document xmlns:xi="http://www.w3.org/2001/XInclude"> <p>The following is the source of the "data.xml" resource:</p> <example><xi:include href="data.xml" parse="text"/></example> </document> """ XINCLUDE["data.xml"] = """\ <?xml version='1.0'?> <data> <item><![CDATA[Brooks & Shields]]></item> </data> """ XINCLUDE["C5.xml"] = """\ <?xml version='1.0'?> <div xmlns:xi="http://www.w3.org/2001/XInclude"> <xi:include href="example.txt" parse="text"> <xi:fallback> <xi:include href="fallback-example.txt" parse="text"> <xi:fallback><a href="mailto:[email protected]">Report error</a></xi:fallback> </xi:include> </xi:fallback> </xi:include> </div> """ XINCLUDE["default.xml"] = """\ <?xml version='1.0'?> <document xmlns:xi="http://www.w3.org/2001/XInclude"> <p>Example.</p> <xi:include href="{}"/> </document> """.format(cgi.escape(SIMPLE_XMLFILE, True)) def xinclude_loader(href, parse="xml", encoding=None): try: data = XINCLUDE[href] except KeyError: raise IOError("resource not found") if parse == "xml": from xml.etree.ElementTree import XML return XML(data) return data def xinclude(): r""" Basic inclusion example (XInclude C.1) >>> from xml.etree import ElementTree as ET >>> from xml.etree import ElementInclude >>> document = xinclude_loader("C1.xml") >>> ElementInclude.include(document, xinclude_loader) >>> print serialize(document) # C1 <document> <p>120 Mz is adequate for an average home user.</p> <disclaimer> <p>The opinions represented herein represent those of the individual and should not be interpreted as official policy endorsed by this organization.</p> </disclaimer> </document> Textual inclusion example (XInclude C.2) >>> document = xinclude_loader("C2.xml") >>> ElementInclude.include(document, xinclude_loader) >>> print serialize(document) # C2 <document> <p>This document has been accessed 324387 times.</p> </document> Textual inclusion after sibling element (based on modified XInclude C.2) >>> document = xinclude_loader("C2b.xml") >>> ElementInclude.include(document, xinclude_loader) >>> print(serialize(document)) # C2b <document> <p>This document has been <em>accessed</em> 324387 times.</p> </document> Textual inclusion of XML example (XInclude C.3) >>> document = xinclude_loader("C3.xml") >>> ElementInclude.include(document, xinclude_loader) >>> print serialize(document) # C3 <document> <p>The following is the source of the "data.xml" resource:</p> <example>&lt;?xml version='1.0'?&gt; &lt;data&gt; &lt;item&gt;&lt;![CDATA[Brooks &amp; Shields]]&gt;&lt;/item&gt; &lt;/data&gt; </example> </document> Fallback example (XInclude C.5) Note! Fallback support is not yet implemented >>> document = xinclude_loader("C5.xml") >>> ElementInclude.include(document, xinclude_loader) Traceback (most recent call last): IOError: resource not found >>> # print serialize(document) # C5 """ def xinclude_default(): """ >>> from xml.etree import ElementInclude >>> document = xinclude_loader("default.xml") >>> ElementInclude.include(document) >>> print serialize(document) # default <document> <p>Example.</p> <root> <element key="value">text</element> <element>text</element>tail <empty-element /> </root> </document> """ # # badly formatted xi:include tags XINCLUDE_BAD = {} XINCLUDE_BAD["B1.xml"] = """\ <?xml version='1.0'?> <document xmlns:xi="http://www.w3.org/2001/XInclude"> <p>120 Mz is adequate for an average home user.</p> <xi:include href="disclaimer.xml" parse="BAD_TYPE"/> </document> """ XINCLUDE_BAD["B2.xml"] = """\ <?xml version='1.0'?> <div xmlns:xi="http://www.w3.org/2001/XInclude"> <xi:fallback></xi:fallback> </div> """ def xinclude_failures(): r""" Test failure to locate included XML file. >>> from xml.etree import ElementInclude >>> def none_loader(href, parser, encoding=None): ... return None >>> document = ET.XML(XINCLUDE["C1.xml"]) >>> ElementInclude.include(document, loader=none_loader) Traceback (most recent call last): FatalIncludeError: cannot load 'disclaimer.xml' as 'xml' Test failure to locate included text file. >>> document = ET.XML(XINCLUDE["C2.xml"]) >>> ElementInclude.include(document, loader=none_loader) Traceback (most recent call last): FatalIncludeError: cannot load 'count.txt' as 'text' Test bad parse type. >>> document = ET.XML(XINCLUDE_BAD["B1.xml"]) >>> ElementInclude.include(document, loader=none_loader) Traceback (most recent call last): FatalIncludeError: unknown parse type in xi:include tag ('BAD_TYPE') Test xi:fallback outside xi:include. >>> document = ET.XML(XINCLUDE_BAD["B2.xml"]) >>> ElementInclude.include(document, loader=none_loader) Traceback (most recent call last): FatalIncludeError: xi:fallback tag must be child of xi:include ('{http://www.w3.org/2001/XInclude}fallback') """ # -------------------------------------------------------------------- # reported bugs def bug_xmltoolkit21(): """ marshaller gives obscure errors for non-string values >>> elem = ET.Element(123) >>> serialize(elem) # tag Traceback (most recent call last): TypeError: cannot serialize 123 (type int) >>> elem = ET.Element("elem") >>> elem.text = 123 >>> serialize(elem) # text Traceback (most recent call last): TypeError: cannot serialize 123 (type int) >>> elem = ET.Element("elem") >>> elem.tail = 123 >>> serialize(elem) # tail Traceback (most recent call last): TypeError: cannot serialize 123 (type int) >>> elem = ET.Element("elem") >>> elem.set(123, "123") >>> serialize(elem) # attribute key Traceback (most recent call last): TypeError: cannot serialize 123 (type int) >>> elem = ET.Element("elem") >>> elem.set("123", 123) >>> serialize(elem) # attribute value Traceback (most recent call last): TypeError: cannot serialize 123 (type int) """ def bug_xmltoolkit25(): """ typo in ElementTree.findtext >>> elem = ET.XML(SAMPLE_XML) >>> tree = ET.ElementTree(elem) >>> tree.findtext("tag") 'text' >>> tree.findtext("section/tag") 'subtext' """ def bug_xmltoolkit28(): """ .//tag causes exceptions >>> tree = ET.XML("<doc><table><tbody/></table></doc>") >>> summarize_list(tree.findall(".//thead")) [] >>> summarize_list(tree.findall(".//tbody")) ['tbody'] """ def bug_xmltoolkitX1(): """ dump() doesn't flush the output buffer >>> tree = ET.XML("<doc><table><tbody/></table></doc>") >>> ET.dump(tree); sys.stdout.write("tail") <doc><table><tbody /></table></doc> tail """ def bug_xmltoolkit39(): """ non-ascii element and attribute names doesn't work >>> tree = ET.XML("<?xml version='1.0' encoding='iso-8859-1'?><t\xe4g />") >>> ET.tostring(tree, "utf-8") '<t\\xc3\\xa4g />' >>> tree = ET.XML("<?xml version='1.0' encoding='iso-8859-1'?><tag \xe4ttr='v&#228;lue' />") >>> tree.attrib {u'\\xe4ttr': u'v\\xe4lue'} >>> ET.tostring(tree, "utf-8") '<tag \\xc3\\xa4ttr="v\\xc3\\xa4lue" />' >>> tree = ET.XML("<?xml version='1.0' encoding='iso-8859-1'?><t\xe4g>text</t\xe4g>") >>> ET.tostring(tree, "utf-8") '<t\\xc3\\xa4g>text</t\\xc3\\xa4g>' >>> tree = ET.Element(u"t\u00e4g") >>> ET.tostring(tree, "utf-8") '<t\\xc3\\xa4g />' >>> tree = ET.Element("tag") >>> tree.set(u"\u00e4ttr", u"v\u00e4lue") >>> ET.tostring(tree, "utf-8") '<tag \\xc3\\xa4ttr="v\\xc3\\xa4lue" />' """ def bug_xmltoolkit54(): """ problems handling internally defined entities >>> e = ET.XML("<!DOCTYPE doc [<!ENTITY ldots '&#x8230;'>]><doc>&ldots;</doc>") >>> serialize(e) '<doc>&#33328;</doc>' """ def bug_xmltoolkit55(): """ make sure we're reporting the first error, not the last >>> e = ET.XML("<!DOCTYPE doc SYSTEM 'doc.dtd'><doc>&ldots;&ndots;&rdots;</doc>") Traceback (most recent call last): ParseError: undefined entity &ldots;: line 1, column 36 """ class ExceptionFile: def read(self, x): raise IOError def xmltoolkit60(): """ Handle crash in stream source. >>> tree = ET.parse(ExceptionFile()) Traceback (most recent call last): IOError """ XMLTOOLKIT62_DOC = """<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE patent-application-publication SYSTEM "pap-v15-2001-01-31.dtd" []> <patent-application-publication> <subdoc-abstract> <paragraph id="A-0001" lvl="0">A new cultivar of Begonia plant named &lsquo;BCT9801BEG&rsquo;.</paragraph> </subdoc-abstract> </patent-application-publication>""" def xmltoolkit62(): """ Don't crash when using custom entities. >>> xmltoolkit62() u'A new cultivar of Begonia plant named \u2018BCT9801BEG\u2019.' """ ENTITIES = {u'rsquo': u'\u2019', u'lsquo': u'\u2018'} parser = ET.XMLTreeBuilder() parser.entity.update(ENTITIES) parser.feed(XMLTOOLKIT62_DOC) t = parser.close() return t.find('.//paragraph').text def xmltoolkit63(): """ Check reference leak. >>> xmltoolkit63() >>> count = sys.getrefcount(None) >>> for i in range(1000): ... xmltoolkit63() >>> sys.getrefcount(None) - count 0 """ tree = ET.TreeBuilder() tree.start("tag", {}) tree.data("text") tree.end("tag") # -------------------------------------------------------------------- def bug_200708_newline(): r""" Preserve newlines in attributes. >>> e = ET.Element('SomeTag', text="def _f():\n return 3\n") >>> ET.tostring(e) '<SomeTag text="def _f():&#10; return 3&#10;" />' >>> ET.XML(ET.tostring(e)).get("text") 'def _f():\n return 3\n' >>> ET.tostring(ET.XML(ET.tostring(e))) '<SomeTag text="def _f():&#10; return 3&#10;" />' """ def bug_200708_close(): """ Test default builder. >>> parser = ET.XMLParser() # default >>> parser.feed("<element>some text</element>") >>> summarize(parser.close()) 'element' Test custom builder. >>> class EchoTarget: ... def close(self): ... return ET.Element("element") # simulate root >>> parser = ET.XMLParser(EchoTarget()) >>> parser.feed("<element>some text</element>") >>> summarize(parser.close()) 'element' """ def bug_200709_default_namespace(): """ >>> e = ET.Element("{default}elem") >>> s = ET.SubElement(e, "{default}elem") >>> serialize(e, default_namespace="default") # 1 '<elem xmlns="default"><elem /></elem>' >>> e = ET.Element("{default}elem") >>> s = ET.SubElement(e, "{default}elem") >>> s = ET.SubElement(e, "{not-default}elem") >>> serialize(e, default_namespace="default") # 2 '<elem xmlns="default" xmlns:ns1="not-default"><elem /><ns1:elem /></elem>' >>> e = ET.Element("{default}elem") >>> s = ET.SubElement(e, "{default}elem") >>> s = ET.SubElement(e, "elem") # unprefixed name >>> serialize(e, default_namespace="default") # 3 Traceback (most recent call last): ValueError: cannot use non-qualified names with default_namespace option """ def bug_200709_register_namespace(): """ >>> ET.tostring(ET.Element("{http://namespace.invalid/does/not/exist/}title")) '<ns0:title xmlns:ns0="http://namespace.invalid/does/not/exist/" />' >>> ET.register_namespace("foo", "http://namespace.invalid/does/not/exist/") >>> ET.tostring(ET.Element("{http://namespace.invalid/does/not/exist/}title")) '<foo:title xmlns:foo="http://namespace.invalid/does/not/exist/" />' And the Dublin Core namespace is in the default list: >>> ET.tostring(ET.Element("{http://purl.org/dc/elements/1.1/}title")) '<dc:title xmlns:dc="http://purl.org/dc/elements/1.1/" />' """ def bug_200709_element_comment(): """ Not sure if this can be fixed, really (since the serializer needs ET.Comment, not cET.comment). >>> a = ET.Element('a') >>> a.append(ET.Comment('foo')) >>> a[0].tag == ET.Comment True >>> a = ET.Element('a') >>> a.append(ET.PI('foo')) >>> a[0].tag == ET.PI True """ def bug_200709_element_insert(): """ >>> a = ET.Element('a') >>> b = ET.SubElement(a, 'b') >>> c = ET.SubElement(a, 'c') >>> d = ET.Element('d') >>> a.insert(0, d) >>> summarize_list(a) ['d', 'b', 'c'] >>> a.insert(-1, d) >>> summarize_list(a) ['d', 'b', 'd', 'c'] """ def bug_200709_iter_comment(): """ >>> a = ET.Element('a') >>> b = ET.SubElement(a, 'b') >>> comment_b = ET.Comment("TEST-b") >>> b.append(comment_b) >>> summarize_list(a.iter(ET.Comment)) ['<Comment>'] """ # -------------------------------------------------------------------- # reported on bugs.python.org def bug_1534630(): """ >>> bob = ET.TreeBuilder() >>> e = bob.data("data") >>> e = bob.start("tag", {}) >>> e = bob.end("tag") >>> e = bob.close() >>> serialize(e) '<tag />' """ def check_issue6233(): """ >>> e = ET.XML("<?xml version='1.0' encoding='utf-8'?><body>t\\xc3\\xa3g</body>") >>> ET.tostring(e, 'ascii') "<?xml version='1.0' encoding='ascii'?>\\n<body>t&#227;g</body>" >>> e = ET.XML("<?xml version='1.0' encoding='iso-8859-1'?><body>t\\xe3g</body>") >>> ET.tostring(e, 'ascii') "<?xml version='1.0' encoding='ascii'?>\\n<body>t&#227;g</body>" """ def check_issue3151(): """ >>> e = ET.XML('<prefix:localname xmlns:prefix="${stuff}"/>') >>> e.tag '{${stuff}}localname' >>> t = ET.ElementTree(e) >>> ET.tostring(e) '<ns0:localname xmlns:ns0="${stuff}" />' """ def check_issue6565(): """ >>> elem = ET.XML("<body><tag/></body>") >>> summarize_list(elem) ['tag'] >>> newelem = ET.XML(SAMPLE_XML) >>> elem[:] = newelem[:] >>> summarize_list(elem) ['tag', 'tag', 'section'] """ # -------------------------------------------------------------------- class CleanContext(object): """Provide default namespace mapping and path cache.""" checkwarnings = None def __init__(self, quiet=False): if sys.flags.optimize >= 2: # under -OO, doctests cannot be run and therefore not all warnings # will be emitted quiet = True deprecations = ( # Search behaviour is broken if search path starts with "/". ("This search is broken in 1.3 and earlier, and will be fixed " "in a future version. If you rely on the current behaviour, " "change it to '.+'", FutureWarning), # Element.getchildren() and Element.getiterator() are deprecated. ("This method will be removed in future versions. " "Use .+ instead.", DeprecationWarning), ("This method will be removed in future versions. " "Use .+ instead.", PendingDeprecationWarning), # XMLParser.doctype() is deprecated. ("This method of XMLParser is deprecated. Define doctype.. " "method on the TreeBuilder target.", DeprecationWarning)) self.checkwarnings = test_support.check_warnings(*deprecations, quiet=quiet) def __enter__(self): from xml.etree import ElementTree self._nsmap = ElementTree._namespace_map self._path_cache = ElementTree.ElementPath._cache # Copy the default namespace mapping ElementTree._namespace_map = self._nsmap.copy() # Copy the path cache (should be empty) ElementTree.ElementPath._cache = self._path_cache.copy() self.checkwarnings.__enter__() def __exit__(self, *args): from xml.etree import ElementTree # Restore mapping and path cache ElementTree._namespace_map = self._nsmap ElementTree.ElementPath._cache = self._path_cache self.checkwarnings.__exit__(*args) def test_main(module_name='xml.etree.ElementTree'): from test import test_xml_etree use_py_module = (module_name == 'xml.etree.ElementTree') # The same doctests are used for both the Python and the C implementations assert test_xml_etree.ET.__name__ == module_name # XXX the C module should give the same warnings as the Python module with CleanContext(quiet=not use_py_module): test_support.run_doctest(test_xml_etree, verbosity=True) # The module should not be changed by the tests assert test_xml_etree.ET.__name__ == module_name if __name__ == '__main__': test_main()
mit
tim-janik/beast
yapps2_deb/yapps/grammar.py
42
8821
# grammar.py, part of Yapps 2 - yet another python parser system # Copyright 1999-2003 by Amit J. Patel <[email protected]> # # This version of the Yapps 2 grammar can be distributed under the # terms of the MIT open source license, either found in the LICENSE # file included with the Yapps distribution # <http://theory.stanford.edu/~amitp/yapps/> or at # <http://www.opensource.org/licenses/mit-license.php> # """Parser for Yapps grammars. This file defines the grammar of Yapps grammars. Naturally, it is implemented in Yapps. The grammar.py module needed by Yapps is built by running Yapps on yapps_grammar.g. (Holy circularity, Batman!) """ import sys, re from yapps import parsetree ###################################################################### def cleanup_choice(rule, lst): if len(lst) == 0: return Sequence(rule, []) if len(lst) == 1: return lst[0] return parsetree.Choice(rule, *tuple(lst)) def cleanup_sequence(rule, lst): if len(lst) == 1: return lst[0] return parsetree.Sequence(rule, *tuple(lst)) def resolve_name(rule, tokens, id, args): if id in [x[0] for x in tokens]: # It's a token if args: print 'Warning: ignoring parameters on TOKEN %s<<%s>>' % (id, args) return parsetree.Terminal(rule, id) else: # It's a name, so assume it's a nonterminal return parsetree.NonTerminal(rule, id, args) # Begin -- grammar generated by Yapps import sys, re from yapps import runtime class ParserDescriptionScanner(runtime.Scanner): patterns = [ ('"rule"', re.compile('rule')), ('"ignore"', re.compile('ignore')), ('"token"', re.compile('token')), ('"option"', re.compile('option')), ('":"', re.compile(':')), ('"parser"', re.compile('parser')), ('[ \t\r\n]+', re.compile('[ \t\r\n]+')), ('#.*?\r?\n', re.compile('#.*?\r?\n')), ('EOF', re.compile('$')), ('ATTR', re.compile('<<.+?>>')), ('STMT', re.compile('{{.+?}}')), ('ID', re.compile('[a-zA-Z_][a-zA-Z_0-9]*')), ('STR', re.compile('[rR]?\'([^\\n\'\\\\]|\\\\.)*\'|[rR]?"([^\\n"\\\\]|\\\\.)*"')), ('LP', re.compile('\\(')), ('RP', re.compile('\\)')), ('LB', re.compile('\\[')), ('RB', re.compile('\\]')), ('OR', re.compile('[|]')), ('STAR', re.compile('[*]')), ('PLUS', re.compile('[+]')), ('QUEST', re.compile('[?]')), ('COLON', re.compile(':')), ] def __init__(self, str,*args,**kw): runtime.Scanner.__init__(self,None,{'[ \t\r\n]+':None,'#.*?\r?\n':None,},str,*args,**kw) class ParserDescription(runtime.Parser): Context = runtime.Context def Parser(self, _parent=None): _context = self.Context(_parent, self._scanner, 'Parser', []) self._scan('"parser"', context=_context) ID = self._scan('ID', context=_context) self._scan('":"', context=_context) Options = self.Options(_context) Tokens = self.Tokens(_context) Rules = self.Rules(Tokens, _context) EOF = self._scan('EOF', context=_context) return parsetree.Generator(ID,Options,Tokens,Rules) def Options(self, _parent=None): _context = self.Context(_parent, self._scanner, 'Options', []) opt = {} while self._peek('"option"', '"token"', '"ignore"', 'EOF', '"rule"', context=_context) == '"option"': self._scan('"option"', context=_context) self._scan('":"', context=_context) Str = self.Str(_context) opt[Str] = 1 return opt def Tokens(self, _parent=None): _context = self.Context(_parent, self._scanner, 'Tokens', []) tok = [] while self._peek('"token"', '"ignore"', 'EOF', '"rule"', context=_context) in ['"token"', '"ignore"']: _token = self._peek('"token"', '"ignore"', context=_context) if _token == '"token"': self._scan('"token"', context=_context) ID = self._scan('ID', context=_context) self._scan('":"', context=_context) Str = self.Str(_context) tok.append( (ID,Str) ) else: # == '"ignore"' self._scan('"ignore"', context=_context) self._scan('":"', context=_context) Str = self.Str(_context) ign = ('#ignore',Str) if self._peek('STMT', '"token"', '"ignore"', 'EOF', '"rule"', context=_context) == 'STMT': STMT = self._scan('STMT', context=_context) ign = ign + (STMT[2:-2],) tok.append( ign ) return tok def Rules(self, tokens, _parent=None): _context = self.Context(_parent, self._scanner, 'Rules', [tokens]) rul = [] while self._peek('"rule"', 'EOF', context=_context) == '"rule"': self._scan('"rule"', context=_context) ID = self._scan('ID', context=_context) OptParam = self.OptParam(_context) self._scan('":"', context=_context) ClauseA = self.ClauseA(ID, tokens, _context) rul.append( (ID, OptParam, ClauseA) ) return rul def ClauseA(self, rule, tokens, _parent=None): _context = self.Context(_parent, self._scanner, 'ClauseA', [rule, tokens]) ClauseB = self.ClauseB(rule,tokens, _context) v = [ClauseB] while self._peek('OR', 'RP', 'RB', '"rule"', 'EOF', context=_context) == 'OR': OR = self._scan('OR', context=_context) ClauseB = self.ClauseB(rule,tokens, _context) v.append(ClauseB) return cleanup_choice(rule,v) def ClauseB(self, rule,tokens, _parent=None): _context = self.Context(_parent, self._scanner, 'ClauseB', [rule,tokens]) v = [] while self._peek('STR', 'ID', 'LP', 'LB', 'STMT', 'OR', 'RP', 'RB', '"rule"', 'EOF', context=_context) in ['STR', 'ID', 'LP', 'LB', 'STMT']: ClauseC = self.ClauseC(rule,tokens, _context) v.append(ClauseC) return cleanup_sequence(rule, v) def ClauseC(self, rule,tokens, _parent=None): _context = self.Context(_parent, self._scanner, 'ClauseC', [rule,tokens]) ClauseD = self.ClauseD(rule,tokens, _context) _token = self._peek('PLUS', 'STAR', 'QUEST', 'STR', 'ID', 'LP', 'LB', 'STMT', 'OR', 'RP', 'RB', '"rule"', 'EOF', context=_context) if _token == 'PLUS': PLUS = self._scan('PLUS', context=_context) return parsetree.Plus(rule, ClauseD) elif _token == 'STAR': STAR = self._scan('STAR', context=_context) return parsetree.Star(rule, ClauseD) elif _token == 'QUEST': QUEST = self._scan('QUEST', context=_context) return parsetree.Option(rule, ClauseD) else: return ClauseD def ClauseD(self, rule,tokens, _parent=None): _context = self.Context(_parent, self._scanner, 'ClauseD', [rule,tokens]) _token = self._peek('STR', 'ID', 'LP', 'LB', 'STMT', context=_context) if _token == 'STR': STR = self._scan('STR', context=_context) t = (STR, eval(STR,{},{})) if t not in tokens: tokens.insert( 0, t ) return parsetree.Terminal(rule, STR) elif _token == 'ID': ID = self._scan('ID', context=_context) OptParam = self.OptParam(_context) return resolve_name(rule,tokens, ID, OptParam) elif _token == 'LP': LP = self._scan('LP', context=_context) ClauseA = self.ClauseA(rule,tokens, _context) RP = self._scan('RP', context=_context) return ClauseA elif _token == 'LB': LB = self._scan('LB', context=_context) ClauseA = self.ClauseA(rule,tokens, _context) RB = self._scan('RB', context=_context) return parsetree.Option(rule, ClauseA) else: # == 'STMT' STMT = self._scan('STMT', context=_context) return parsetree.Eval(rule, STMT[2:-2]) def OptParam(self, _parent=None): _context = self.Context(_parent, self._scanner, 'OptParam', []) if self._peek('ATTR', '":"', 'PLUS', 'STAR', 'QUEST', 'STR', 'ID', 'LP', 'LB', 'STMT', 'OR', 'RP', 'RB', '"rule"', 'EOF', context=_context) == 'ATTR': ATTR = self._scan('ATTR', context=_context) return ATTR[2:-2] return '' def Str(self, _parent=None): _context = self.Context(_parent, self._scanner, 'Str', []) STR = self._scan('STR', context=_context) return eval(STR,{},{}) def parse(rule, text): P = ParserDescription(ParserDescriptionScanner(text)) return runtime.wrap_error_reporter(P, rule) # End -- grammar generated by Yapps
lgpl-2.1
nhicher/ansible
lib/ansible/modules/cloud/ovirt/ovirt_storage_template_facts.py
16
4457
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (c) 2017 Red Hat, Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: ovirt_storage_template_facts short_description: Retrieve facts about one or more oVirt/RHV templates relate to a storage domain. author: "Maor Lipchuk" version_added: "2.4" description: - "Retrieve facts about one or more oVirt/RHV templates relate to a storage domain." notes: - "This module creates a new top-level C(ovirt_storage_templates) fact, which contains a list of templates." options: unregistered: description: - "Flag which indicates whether to get unregistered templates which contain one or more disks which reside on a storage domain or diskless templates." type: bool default: false max: description: - "Sets the maximum number of templates to return. If not specified all the templates are returned." storage_domain: description: - "The storage domain name where the templates should be listed." extends_documentation_fragment: ovirt_facts ''' EXAMPLES = ''' # Examples don't contain auth parameter for simplicity, # look at ovirt_auth module to see how to reuse authentication: # Gather facts about all Templates which relate to a storage domain and # are unregistered: - ovirt_storage_template_facts: unregistered=True - debug: var: ovirt_storage_templates ''' RETURN = ''' ovirt_storage_templates: description: "List of dictionaries describing the Templates. Template attributes are mapped to dictionary keys, all Templates attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/template." returned: On success. type: list ''' import traceback from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.ovirt import ( check_sdk, create_connection, get_dict_of_struct, ovirt_facts_full_argument_spec, get_id_by_name ) def main(): argument_spec = ovirt_facts_full_argument_spec( storage_domain=dict(default=None), max=dict(default=None, type='int'), unregistered=dict(default=False, type='bool'), ) module = AnsibleModule(argument_spec) check_sdk(module) try: auth = module.params.pop('auth') connection = create_connection(auth) storage_domains_service = connection.system_service().storage_domains_service() sd_id = get_id_by_name(storage_domains_service, module.params['storage_domain']) storage_domain_service = storage_domains_service.storage_domain_service(sd_id) templates_service = storage_domain_service.templates_service() # Find the unregistered Template we want to register: if module.params.get('unregistered'): templates = templates_service.list(unregistered=True) else: templates = templates_service.list(max=module.params['max']) module.exit_json( changed=False, ansible_facts=dict( ovirt_storage_templates=[ get_dict_of_struct( struct=c, connection=connection, fetch_nested=module.params.get('fetch_nested'), attributes=module.params.get('nested_attributes'), ) for c in templates ], ), ) except Exception as e: module.fail_json(msg=str(e), exception=traceback.format_exc()) finally: connection.close(logout=auth.get('token') is None) if __name__ == '__main__': main()
gpl-3.0
bakhtout/odoo-educ
addons/product_expiry/__init__.py
442
1053
############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import product_expiry # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
thanhacun/odoo
addons/report_webkit/__init__.py
382
1593
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (c) 2010 Camptocamp SA (http://www.camptocamp.com) # All Right Reserved # # Author : Nicolas Bessi (Camptocamp) # # WARNING: This program as such is intended to be used by professional # programmers who take the whole responsability of assessing all potential # consequences resulting from its eventual inadequacies and bugs # End users who are looking for a ready-to-use solution with commercial # garantees and support are strongly adviced to contract a Free Software # Service Company # # This program is Free Software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # ############################################################################## import header import company import report_helper import webkit_report import ir_report import wizard import convert import report # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
kartikgupta0909/gaia
test/unittest/test_parser.py
3
5700
#!/usr/bin/env python # Copyright (C) 2006-2013 Music Technology Group - Universitat Pompeu Fabra # # This file is part of Gaia # # Gaia is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the Free # Software Foundation (FSF), either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the Affero GNU General Public License # version 3 along with this program. If not, see http://www.gnu.org/licenses/ from gaia2 import * import unittest import testdata def testValidPoint(dataset, clause, fromList = None): # search the point using the clause: # if we have a result, the clause was true # if we have no result, the clause was false v = View(dataset) dist = MetricFactory.create('null', dataset.layout()) filtr = 'WHERE ' + clause if fromList: filtr = 'FROM ' + fromList + ' ' + filtr result = v.nnSearch(dataset.samplePoint(), dist, filtr).get(1) if len(result) == 1: return True return False def testClause(clause): ds = testdata.createSimpleDataSet() return testValidPoint(ds, clause) class TestParser(unittest.TestCase): def setUp(self): cvar.verbose = False def tearDown(self): testdata.resetSettings() def testSimpleExpression(self): listTests = { '10 < 20': True, ' 23.34 == 45': False, ' 23.34 = 45': False, '12 ==12.0': True, '23>23': False, '23\t > 23 ': False, '1<2 and 2<3': True, '1<2 and 2>3': False, 'NOT (True)': False, '1<2 and not (2>3)': True, '12 in ( 1, 2, 3, 4, 5 )': False, '23 in (1, 23, 37, 42, 5)': True, '"ABC" IN ("FG")': False, '"acid" in ("this", "is", "an", "acid", "test")': True, '"smarties" NOT IN ("my pocket", "your pocket")': True, '2.3 BETWEEN 1e-4 AND 2e7': True, '2 between 2 and 3': True, '2 between 3 and 2': True, '4 between 3 and 2': False, 'true': True } for clause in listTests: result = listTests[clause] self.assertEqual(testClause(clause), result, clause) # also test mix of these (1 OR 2, 1 AND 2) for clauseA in listTests: for clauseB in listTests: self.assertEqual(testClause('(' + clauseA + ') AND (' + clauseB + ')'), listTests[clauseA] and listTests[clauseB]) self.assertEqual(testClause('(' + clauseA + ') OR (' + clauseB + ')'), listTests[clauseA] or listTests[clauseB]) def testParserStillInValidStateAfterParserError(self): '''ticket #20: parser is in invalid state after parser error''' ds = testdata.createSimpleDataSet() dist = MetricFactory.create('null', ds.layout()) v = View(ds) result = v.nnSearch(ds.samplePoint(), dist, 'WHERE true').get(1) clause = 'WHERE label.tonal_key_mode.value = \\"major"' try: result = v.nnSearch(ds.samplePoint(), dist, clause).get(1) except: pass # filter correctly failed to compile result = v.nnSearch(ds.samplePoint(), dist, 'WHERE true').get(1) def testVariables(self): d = testdata.createSimpleDataSet() def test(clause, expected): self.assertEqual(testValidPoint(d, clause), expected) d.point('p')['d'] = 'Hello' test('label.d = "Hello"', True) test('label.d = "goodbye"', False) d.point('p')['b'] = 23. test('value.b < 23', False) test('value.b <= 23', True) test('value.b != 23', False) test('value.b > 23', False) test('value.b == 23', True) test('value.b = 23', True) test('value.b <= 23', True) d.point('p')['e'] = [23.0, 24.0, 25.0] test('value.e[0] < 23', False) test('value.e[1] > 23', True) test('value.e[2] > 24.3 and value.e[2] <= 25', True) test('value.b = 23.0 and label.d = "Hello"', True) test('value.b = 23.0 or label.d = "Ho ho"', True) test('value.b < 23.0 and label.d = "Hello"', False) test('value.b = 23.0 and label.d = "Hell"', False) d.point('p')['a.1'] = 17 test('value.a.1 == 17', True) test('value.a.1 < 20 and value.b > 20 and label.d != "ooh yeah"', True) test('point.id IN ("c", "a", "t")', False) test('point.id NOT IN ("a", "p", "u")', False) test('point.id NOT IN ("s", "u", "n")', True) test('point.id == "p"', True) test('point.id != "rock\'n\'roll"', True) def testFixLength(self): testdata.useFixedLength = True self.testSimpleExpression() self.testParserStillInValidStateAfterParserError() self.testVariables() def testEnumerate(self): testdata.useEnumerate = True self.testSimpleExpression() self.testParserStillInValidStateAfterParserError() self.testVariables() def testEnumerateFixLength(self): testdata.useEnumerate = True self.testFixLength() suite = unittest.TestLoader().loadTestsFromTestCase(TestParser) if __name__ == '__main__': unittest.TextTestRunner(verbosity=2).run(suite)
agpl-3.0
backmari/moose
python/peacock/tests/input_tab/InputFileEditorWithMesh/test_InputFileEditorWithMesh.py
1
9015
#!/usr/bin/env python from peacock.Input.InputFileEditorWithMesh import InputFileEditorWithMesh from PyQt5.QtWidgets import QMainWindow, QMessageBox from peacock.Input.ExecutableInfo import ExecutableInfo from peacock.utils import Testing import argparse, os from mock import patch class BaseTests(Testing.PeacockTester): def setUp(self): super(BaseTests, self).setUp() self.input_file = "../../common/transient.i" self.highlight_left = "meshrender_highlight_left.png" self.highlight_right = "meshrender_highlight_right.png" self.highlight_all = "meshrender_highlight_all.png" self.basic_mesh = "meshrender_basic.png" #Testing.remove_file(self.highlight_all) #Testing.remove_file(self.highlight_right) #Testing.remove_file(self.highlight_left) #Testing.remove_file(self.basic_mesh) self.num_time_steps = None self.time_step_changed_count = 0 def newWidget(self): main_win = QMainWindow() w = InputFileEditorWithMesh(size=[640,640]) main_win.setCentralWidget(w) app_info = ExecutableInfo() app_info.setPath(Testing.find_moose_test_exe()) self.assertTrue(app_info.valid()) w.onExecutableInfoChanged(app_info) menubar = main_win.menuBar() menubar.setNativeMenuBar(False) w.addToMainMenu(menubar) w.initialize() w.setEnabled(True) main_win.show() self.assertEqual(w.vtkwin.isVisible(), False) w.numTimeStepsChanged.connect(self.timeStepChanged) return main_win, w def timeStepChanged(self, num_steps): self.num_time_steps = num_steps self.time_step_changed_count += 1 def compareGold(self, w, filename): w.input_filename = filename Testing.remove_file(filename) w.InputFileEditorPlugin.writeInputFile(filename) self.compareFiles(filename) def compareFiles(self, test_file): gold_file = "gold/%s" % test_file test_data = "" gold_data = "" with open(test_file, "r") as f: test_data = f.read() with open(gold_file, "r") as f: gold_data = f.read() self.assertEqual(test_data, gold_data) class Tests(BaseTests): def testBasic(self): main_win, w = self.newWidget() self.assertEqual(w.vtkwin.isVisible(), False) w.setInputFile(self.input_file) self.assertEqual(w.vtkwin.isVisible(), True) Testing.set_window_size(w.vtkwin) w.vtkwin.onWrite(self.basic_mesh) self.assertFalse(Testing.gold_diff(self.basic_mesh)) def testHighlight(self): main_win, w = self.newWidget() w.setInputFile(self.input_file) self.assertEqual(w.vtkwin.isVisible(), True) tree = w.InputFileEditorPlugin.tree b = tree.getBlockInfo("/BCs/left") self.assertNotEqual(b, None) self.assertEqual(b.getParamInfo("boundary").value, '3') w.blockChanged(b) Testing.set_window_size(w.vtkwin) w.vtkwin.onWrite(self.highlight_left) self.assertFalse(Testing.gold_diff(self.highlight_left)) b = tree.getBlockInfo("/BCs/right") self.assertNotEqual(b, None) self.assertEqual(b.getParamInfo("boundary").value, '1') w.blockChanged(b) Testing.set_window_size(w.vtkwin) w.vtkwin.onWrite(self.highlight_right) self.assertFalse(Testing.gold_diff(self.highlight_right)) b = tree.getBlockInfo("/BCs/all") self.assertNotEqual(b, None) self.assertEqual(b.getParamInfo("boundary").value, '0 1 2 3') w.blockChanged(b) Testing.set_window_size(w.vtkwin) w.vtkwin.onWrite(self.highlight_all) self.assertFalse(Testing.gold_diff(self.highlight_all)) b= tree.getBlockInfo("/Executioner") self.assertNotEqual(b, None) w.highlightChanged(b) Testing.set_window_size(w.vtkwin) w.vtkwin.onWrite(self.basic_mesh) self.assertFalse(Testing.gold_diff(self.basic_mesh)) def testHighlightDiffusion(self): self.input_file = "../../common/simple_diffusion.i" main_win, w = self.newWidget() w.setInputFile(self.input_file) self.assertEqual(w.vtkwin.isVisible(), True) tree = w.InputFileEditorPlugin.tree b = tree.getBlockInfo("/BCs/left") self.assertNotEqual(b, None) self.assertEqual(b.getParamInfo("boundary").value, 'left') w.highlightChanged(b) Testing.set_window_size(w.vtkwin) w.vtkwin.onWrite(self.highlight_left) self.assertFalse(Testing.gold_diff(self.highlight_left)) b = tree.getBlockInfo("/BCs/right") self.assertNotEqual(b, None) self.assertEqual(b.getParamInfo("boundary").value, 'right') w.highlightChanged(b) Testing.set_window_size(w.vtkwin) w.vtkwin.onWrite(self.highlight_right) self.assertFalse(Testing.gold_diff(self.highlight_right)) def testFromOptions(self): parser = argparse.ArgumentParser() parser.add_argument('arguments', type=str, metavar="N", nargs="*", ) InputFileEditorWithMesh.commandLineArgs(parser) main_win, w = self.newWidget() opts = {"cmd_line_options": parser.parse_args([])} self.assertEqual(w.vtkwin.isVisible(), False) abs_input_file = os.path.abspath(self.input_file) opts = {"cmd_line_options": parser.parse_args(['-i', self.input_file])} w.initialize(**opts) self.assertEqual(w.vtkwin.isVisible(), True) self.assertEqual(w.InputFileEditorPlugin.tree.input_filename, abs_input_file) w.InputFileEditorPlugin._clearInputFile() self.assertEqual(w.vtkwin.isVisible(), False) self.assertEqual(w.InputFileEditorPlugin.tree.input_filename, None) opts = {"cmd_line_options": parser.parse_args([self.input_file])} w.initialize(**opts) self.assertEqual(w.vtkwin.isVisible(), True) self.assertEqual(w.InputFileEditorPlugin.tree.input_filename, abs_input_file) def testBlockChanged(self): main_win, w = self.newWidget() w.setInputFile(self.input_file) tree = w.InputFileEditorPlugin.tree b = tree.getBlockInfo("/Mesh") self.assertNotEqual(b, None) w.blockChanged(b) b = tree.getBlockInfo("/Executioner") self.assertNotEqual(b, None) self.assertEqual(self.time_step_changed_count, 1) self.assertEqual(self.num_time_steps, 8) w.blockChanged(b) self.assertEqual(self.time_step_changed_count, 2) self.assertEqual(self.num_time_steps, 8) num_steps_param = b.getParamInfo("num_steps") self.assertNotEqual(num_steps_param, None) # was 5, add 5 more num_steps_param.value = 10 w.blockChanged(b) self.assertEqual(self.time_step_changed_count, 3) self.assertEqual(self.num_time_steps, 13) b.included = False w.blockChanged(b) self.assertEqual(self.time_step_changed_count, 4) self.assertEqual(self.num_time_steps, 0) @patch.object(QMessageBox, "question") def testCanClose(self, mock_q): mock_q.return_value = QMessageBox.No main_win, w = self.newWidget() self.assertEqual(w.canClose(), True) tree = w.InputFileEditorPlugin.tree w.setInputFile(self.input_file) self.assertEqual(w.canClose(), True) b = tree.getBlockInfo("/Mesh") self.assertNotEqual(b, None) w.InputFileEditorPlugin.blockChanged.emit(b, w.InputFileEditorPlugin.tree) self.assertEqual(w.InputFileEditorPlugin.has_changed, True) self.assertEqual(w.canClose(), False) w.setInputFile(self.input_file) self.assertEqual(w.canClose(), True) @patch.object(QMessageBox, "question") def testAddBlock(self, mock_q): """ Tests to make sure adding a block to the InputFileEditor actually updates the input file. """ mock_q.return_value = QMessageBox.No # The user doesn't want to ignore changes main_win, w = self.newWidget() tree = w.InputFileEditorPlugin.tree self.assertEqual(w.canClose(), True) b = tree.getBlockInfo("/AuxVariables") self.assertNotEqual(b, None) w.InputFileEditorPlugin.block_tree.copyBlock(b) self.assertEqual(w.InputFileEditorPlugin.has_changed, True) self.assertEqual(w.canClose(), False) mock_q.return_value = QMessageBox.Yes # The user wants to ignore changes self.assertEqual(w.canClose(), True) s = tree.getInputFileString() self.assertEqual("", s) # AuxVariables isn't included b.included = True s = tree.getInputFileString() self.assertEqual("[AuxVariables]\n [./New_0]\n [../]\n[]\n\n", s) if __name__ == '__main__': Testing.run_tests()
lgpl-2.1
petewarden/tensorflow
tensorflow/python/debug/wrappers/disk_usage_test.py
11
4453
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Debugger Wrapper Session Consisting of a Local Curses-based CLI.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import tempfile from tensorflow.python.client import session from tensorflow.python.debug.wrappers import dumping_wrapper from tensorflow.python.debug.wrappers import hooks from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import test_util from tensorflow.python.ops import state_ops from tensorflow.python.ops import variables from tensorflow.python.platform import googletest from tensorflow.python.training import monitored_session @test_util.run_v1_only("Sessions are not available in TF 2.x") class DumpingDebugWrapperDiskUsageLimitTest(test_util.TensorFlowTestCase): @classmethod def setUpClass(cls): # For efficient testing, set the disk usage bytes limit to a small # number (10). os.environ["TFDBG_DISK_BYTES_LIMIT"] = "10" def setUp(self): self.session_root = tempfile.mkdtemp() self.v = variables.Variable(10.0, dtype=dtypes.float32, name="v") self.delta = constant_op.constant(1.0, dtype=dtypes.float32, name="delta") self.eta = constant_op.constant(-1.4, dtype=dtypes.float32, name="eta") self.inc_v = state_ops.assign_add(self.v, self.delta, name="inc_v") self.dec_v = state_ops.assign_add(self.v, self.eta, name="dec_v") self.sess = session.Session() self.sess.run(self.v.initializer) def testWrapperSessionNotExceedingLimit(self): def _watch_fn(fetches, feeds): del fetches, feeds return "DebugIdentity", r"(.*delta.*|.*inc_v.*)", r".*" sess = dumping_wrapper.DumpingDebugWrapperSession( self.sess, session_root=self.session_root, watch_fn=_watch_fn, log_usage=False) sess.run(self.inc_v) def testWrapperSessionExceedingLimit(self): def _watch_fn(fetches, feeds): del fetches, feeds return "DebugIdentity", r".*delta.*", r".*" sess = dumping_wrapper.DumpingDebugWrapperSession( self.sess, session_root=self.session_root, watch_fn=_watch_fn, log_usage=False) # Due to the watch function, each run should dump only 1 tensor, # which has a size of 4 bytes, which corresponds to the dumped 'delta:0' # tensor of scalar shape and float32 dtype. # 1st run should pass, after which the disk usage is at 4 bytes. sess.run(self.inc_v) # 2nd run should also pass, after which 8 bytes are used. sess.run(self.inc_v) # 3rd run should fail, because the total byte count (12) exceeds the # limit (10) with self.assertRaises(ValueError): sess.run(self.inc_v) def testHookNotExceedingLimit(self): def _watch_fn(fetches, feeds): del fetches, feeds return "DebugIdentity", r".*delta.*", r".*" dumping_hook = hooks.DumpingDebugHook( self.session_root, watch_fn=_watch_fn, log_usage=False) mon_sess = monitored_session._HookedSession(self.sess, [dumping_hook]) mon_sess.run(self.inc_v) def testHookExceedingLimit(self): def _watch_fn(fetches, feeds): del fetches, feeds return "DebugIdentity", r".*delta.*", r".*" dumping_hook = hooks.DumpingDebugHook( self.session_root, watch_fn=_watch_fn, log_usage=False) mon_sess = monitored_session._HookedSession(self.sess, [dumping_hook]) # Like in `testWrapperSessionExceedingLimit`, the first two calls # should be within the byte limit, but the third one should error # out due to exceeding the limit. mon_sess.run(self.inc_v) mon_sess.run(self.inc_v) with self.assertRaises(ValueError): mon_sess.run(self.inc_v) if __name__ == "__main__": googletest.main()
apache-2.0
danlrobertson/servo
tests/wpt/web-platform-tests/tools/third_party/py/py/_code/_assertionnew.py
60
11450
""" Find intermediate evalutation results in assert statements through builtin AST. This should replace _assertionold.py eventually. """ import sys import ast import py from py._code.assertion import _format_explanation, BuiltinAssertionError def _is_ast_expr(node): return isinstance(node, ast.expr) def _is_ast_stmt(node): return isinstance(node, ast.stmt) class Failure(Exception): """Error found while interpreting AST.""" def __init__(self, explanation=""): self.cause = sys.exc_info() self.explanation = explanation def interpret(source, frame, should_fail=False): mod = ast.parse(source) visitor = DebugInterpreter(frame) try: visitor.visit(mod) except Failure: failure = sys.exc_info()[1] return getfailure(failure) if should_fail: return ("(assertion failed, but when it was re-run for " "printing intermediate values, it did not fail. Suggestions: " "compute assert expression before the assert or use --no-assert)") def run(offending_line, frame=None): if frame is None: frame = py.code.Frame(sys._getframe(1)) return interpret(offending_line, frame) def getfailure(failure): explanation = _format_explanation(failure.explanation) value = failure.cause[1] if str(value): lines = explanation.splitlines() if not lines: lines.append("") lines[0] += " << %s" % (value,) explanation = "\n".join(lines) text = "%s: %s" % (failure.cause[0].__name__, explanation) if text.startswith("AssertionError: assert "): text = text[16:] return text operator_map = { ast.BitOr : "|", ast.BitXor : "^", ast.BitAnd : "&", ast.LShift : "<<", ast.RShift : ">>", ast.Add : "+", ast.Sub : "-", ast.Mult : "*", ast.Div : "/", ast.FloorDiv : "//", ast.Mod : "%", ast.Eq : "==", ast.NotEq : "!=", ast.Lt : "<", ast.LtE : "<=", ast.Gt : ">", ast.GtE : ">=", ast.Pow : "**", ast.Is : "is", ast.IsNot : "is not", ast.In : "in", ast.NotIn : "not in" } unary_map = { ast.Not : "not %s", ast.Invert : "~%s", ast.USub : "-%s", ast.UAdd : "+%s" } class DebugInterpreter(ast.NodeVisitor): """Interpret AST nodes to gleam useful debugging information. """ def __init__(self, frame): self.frame = frame def generic_visit(self, node): # Fallback when we don't have a special implementation. if _is_ast_expr(node): mod = ast.Expression(node) co = self._compile(mod) try: result = self.frame.eval(co) except Exception: raise Failure() explanation = self.frame.repr(result) return explanation, result elif _is_ast_stmt(node): mod = ast.Module([node]) co = self._compile(mod, "exec") try: self.frame.exec_(co) except Exception: raise Failure() return None, None else: raise AssertionError("can't handle %s" %(node,)) def _compile(self, source, mode="eval"): return compile(source, "<assertion interpretation>", mode) def visit_Expr(self, expr): return self.visit(expr.value) def visit_Module(self, mod): for stmt in mod.body: self.visit(stmt) def visit_Name(self, name): explanation, result = self.generic_visit(name) # See if the name is local. source = "%r in locals() is not globals()" % (name.id,) co = self._compile(source) try: local = self.frame.eval(co) except Exception: # have to assume it isn't local = False if not local: return name.id, result return explanation, result def visit_Compare(self, comp): left = comp.left left_explanation, left_result = self.visit(left) for op, next_op in zip(comp.ops, comp.comparators): next_explanation, next_result = self.visit(next_op) op_symbol = operator_map[op.__class__] explanation = "%s %s %s" % (left_explanation, op_symbol, next_explanation) source = "__exprinfo_left %s __exprinfo_right" % (op_symbol,) co = self._compile(source) try: result = self.frame.eval(co, __exprinfo_left=left_result, __exprinfo_right=next_result) except Exception: raise Failure(explanation) try: if not result: break except KeyboardInterrupt: raise except: break left_explanation, left_result = next_explanation, next_result rcomp = py.code._reprcompare if rcomp: res = rcomp(op_symbol, left_result, next_result) if res: explanation = res return explanation, result def visit_BoolOp(self, boolop): is_or = isinstance(boolop.op, ast.Or) explanations = [] for operand in boolop.values: explanation, result = self.visit(operand) explanations.append(explanation) if result == is_or: break name = is_or and " or " or " and " explanation = "(" + name.join(explanations) + ")" return explanation, result def visit_UnaryOp(self, unary): pattern = unary_map[unary.op.__class__] operand_explanation, operand_result = self.visit(unary.operand) explanation = pattern % (operand_explanation,) co = self._compile(pattern % ("__exprinfo_expr",)) try: result = self.frame.eval(co, __exprinfo_expr=operand_result) except Exception: raise Failure(explanation) return explanation, result def visit_BinOp(self, binop): left_explanation, left_result = self.visit(binop.left) right_explanation, right_result = self.visit(binop.right) symbol = operator_map[binop.op.__class__] explanation = "(%s %s %s)" % (left_explanation, symbol, right_explanation) source = "__exprinfo_left %s __exprinfo_right" % (symbol,) co = self._compile(source) try: result = self.frame.eval(co, __exprinfo_left=left_result, __exprinfo_right=right_result) except Exception: raise Failure(explanation) return explanation, result def visit_Call(self, call): func_explanation, func = self.visit(call.func) arg_explanations = [] ns = {"__exprinfo_func" : func} arguments = [] for arg in call.args: arg_explanation, arg_result = self.visit(arg) arg_name = "__exprinfo_%s" % (len(ns),) ns[arg_name] = arg_result arguments.append(arg_name) arg_explanations.append(arg_explanation) for keyword in call.keywords: arg_explanation, arg_result = self.visit(keyword.value) arg_name = "__exprinfo_%s" % (len(ns),) ns[arg_name] = arg_result keyword_source = "%s=%%s" % (keyword.arg) arguments.append(keyword_source % (arg_name,)) arg_explanations.append(keyword_source % (arg_explanation,)) if call.starargs: arg_explanation, arg_result = self.visit(call.starargs) arg_name = "__exprinfo_star" ns[arg_name] = arg_result arguments.append("*%s" % (arg_name,)) arg_explanations.append("*%s" % (arg_explanation,)) if call.kwargs: arg_explanation, arg_result = self.visit(call.kwargs) arg_name = "__exprinfo_kwds" ns[arg_name] = arg_result arguments.append("**%s" % (arg_name,)) arg_explanations.append("**%s" % (arg_explanation,)) args_explained = ", ".join(arg_explanations) explanation = "%s(%s)" % (func_explanation, args_explained) args = ", ".join(arguments) source = "__exprinfo_func(%s)" % (args,) co = self._compile(source) try: result = self.frame.eval(co, **ns) except Exception: raise Failure(explanation) pattern = "%s\n{%s = %s\n}" rep = self.frame.repr(result) explanation = pattern % (rep, rep, explanation) return explanation, result def _is_builtin_name(self, name): pattern = "%r not in globals() and %r not in locals()" source = pattern % (name.id, name.id) co = self._compile(source) try: return self.frame.eval(co) except Exception: return False def visit_Attribute(self, attr): if not isinstance(attr.ctx, ast.Load): return self.generic_visit(attr) source_explanation, source_result = self.visit(attr.value) explanation = "%s.%s" % (source_explanation, attr.attr) source = "__exprinfo_expr.%s" % (attr.attr,) co = self._compile(source) try: result = self.frame.eval(co, __exprinfo_expr=source_result) except Exception: raise Failure(explanation) explanation = "%s\n{%s = %s.%s\n}" % (self.frame.repr(result), self.frame.repr(result), source_explanation, attr.attr) # Check if the attr is from an instance. source = "%r in getattr(__exprinfo_expr, '__dict__', {})" source = source % (attr.attr,) co = self._compile(source) try: from_instance = self.frame.eval(co, __exprinfo_expr=source_result) except Exception: from_instance = True if from_instance: rep = self.frame.repr(result) pattern = "%s\n{%s = %s\n}" explanation = pattern % (rep, rep, explanation) return explanation, result def visit_Assert(self, assrt): test_explanation, test_result = self.visit(assrt.test) if test_explanation.startswith("False\n{False =") and \ test_explanation.endswith("\n"): test_explanation = test_explanation[15:-2] explanation = "assert %s" % (test_explanation,) if not test_result: try: raise BuiltinAssertionError except Exception: raise Failure(explanation) return explanation, test_result def visit_Assign(self, assign): value_explanation, value_result = self.visit(assign.value) explanation = "... = %s" % (value_explanation,) name = ast.Name("__exprinfo_expr", ast.Load(), lineno=assign.value.lineno, col_offset=assign.value.col_offset) new_assign = ast.Assign(assign.targets, name, lineno=assign.lineno, col_offset=assign.col_offset) mod = ast.Module([new_assign]) co = self._compile(mod, "exec") try: self.frame.exec_(co, __exprinfo_expr=value_result) except Exception: raise Failure(explanation) return explanation, value_result
mpl-2.0
marc-sensenich/ansible
test/units/modules/network/netvisor/test_pn_user.py
9
3099
# Copyright: (c) 2018, Pluribus Networks # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type import json from units.compat.mock import patch from ansible.modules.network.netvisor import pn_user from units.modules.utils import set_module_args from .nvos_module import TestNvosModule, load_fixture class TestUserModule(TestNvosModule): module = pn_user def setUp(self): self.mock_run_nvos_commands = patch('ansible.modules.network.netvisor.pn_user.run_cli') self.run_nvos_commands = self.mock_run_nvos_commands.start() self.mock_run_check_cli = patch('ansible.modules.network.netvisor.pn_user.check_cli') self.run_check_cli = self.mock_run_check_cli.start() def tearDown(self): self.mock_run_nvos_commands.stop() self.run_check_cli.stop() def run_cli_patch(self, module, cli, state_map): if state_map['present'] == 'user-create': results = dict( changed=True, cli_cmd=cli ) elif state_map['absent'] == 'user-delete': results = dict( changed=True, cli_cmd=cli ) elif state_map['update'] == 'user-modify': results = dict( changed=True, cli_cmd=cli ) module.exit_json(**results) def load_fixtures(self, commands=None, state=None, transport='cli'): self.run_nvos_commands.side_effect = self.run_cli_patch if state == 'present': self.run_check_cli.return_value = False if state == 'absent': self.run_check_cli.return_value = True if state == 'update': self.run_check_cli.return_value = True def test_user_create(self): set_module_args({'pn_cliswitch': 'sw01', 'pn_name': 'foo', 'pn_scope': 'local', 'pn_password': 'test123', 'state': 'present'}) result = self.execute_module(changed=True, state='present') expected_cmd = '/usr/bin/cli --quiet -e --no-login-prompt switch sw01 user-create name foo scope local password test123' self.assertEqual(result['cli_cmd'], expected_cmd) def test_user_delete(self): set_module_args({'pn_cliswitch': 'sw01', 'pn_name': 'foo', 'state': 'absent'}) result = self.execute_module(changed=True, state='absent') expected_cmd = '/usr/bin/cli --quiet -e --no-login-prompt switch sw01 user-delete name foo ' self.assertEqual(result['cli_cmd'], expected_cmd) def test_user_modify(self): set_module_args({'pn_cliswitch': 'sw01', 'pn_name': 'foo', 'pn_password': 'test1234', 'state': 'update'}) result = self.execute_module(changed=True, state='update') expected_cmd = '/usr/bin/cli --quiet -e --no-login-prompt switch sw01 user-modify name foo password test1234' self.assertEqual(result['cli_cmd'], expected_cmd)
gpl-3.0
nwjs/chromium.src
third_party/blink/web_tests/external/wpt/tools/third_party/pytest/testing/test_parseopt.py
30
13227
from __future__ import absolute_import, division, print_function import argparse import sys import os import py import pytest from _pytest.config import argparsing as parseopt @pytest.fixture def parser(): return parseopt.Parser() class TestParser(object): def test_no_help_by_default(self, capsys): parser = parseopt.Parser(usage="xyz") pytest.raises(SystemExit, lambda: parser.parse(["-h"])) out, err = capsys.readouterr() assert err.find("error: unrecognized arguments") != -1 def test_argument(self): with pytest.raises(parseopt.ArgumentError): # need a short or long option argument = parseopt.Argument() argument = parseopt.Argument("-t") assert argument._short_opts == ["-t"] assert argument._long_opts == [] assert argument.dest == "t" argument = parseopt.Argument("-t", "--test") assert argument._short_opts == ["-t"] assert argument._long_opts == ["--test"] assert argument.dest == "test" argument = parseopt.Argument("-t", "--test", dest="abc") assert argument.dest == "abc" assert ( str(argument) == ("Argument(_short_opts: ['-t'], _long_opts: ['--test'], dest: 'abc')") ) def test_argument_type(self): argument = parseopt.Argument("-t", dest="abc", type=int) assert argument.type is int argument = parseopt.Argument("-t", dest="abc", type=str) assert argument.type is str argument = parseopt.Argument("-t", dest="abc", type=float) assert argument.type is float with pytest.warns(DeprecationWarning): with pytest.raises(KeyError): argument = parseopt.Argument("-t", dest="abc", type="choice") argument = parseopt.Argument( "-t", dest="abc", type=str, choices=["red", "blue"] ) assert argument.type is str def test_argument_processopt(self): argument = parseopt.Argument("-t", type=int) argument.default = 42 argument.dest = "abc" res = argument.attrs() assert res["default"] == 42 assert res["dest"] == "abc" def test_group_add_and_get(self, parser): group = parser.getgroup("hello", description="desc") assert group.name == "hello" assert group.description == "desc" def test_getgroup_simple(self, parser): group = parser.getgroup("hello", description="desc") assert group.name == "hello" assert group.description == "desc" group2 = parser.getgroup("hello") assert group2 is group def test_group_ordering(self, parser): parser.getgroup("1") parser.getgroup("2") parser.getgroup("3", after="1") groups = parser._groups groups_names = [x.name for x in groups] assert groups_names == list("132") def test_group_addoption(self): group = parseopt.OptionGroup("hello") group.addoption("--option1", action="store_true") assert len(group.options) == 1 assert isinstance(group.options[0], parseopt.Argument) def test_group_addoption_conflict(self): group = parseopt.OptionGroup("hello again") group.addoption("--option1", "--option-1", action="store_true") with pytest.raises(ValueError) as err: group.addoption("--option1", "--option-one", action="store_true") assert str({"--option1"}) in str(err.value) def test_group_shortopt_lowercase(self, parser): group = parser.getgroup("hello") pytest.raises( ValueError, """ group.addoption("-x", action="store_true") """, ) assert len(group.options) == 0 group._addoption("-x", action="store_true") assert len(group.options) == 1 def test_parser_addoption(self, parser): group = parser.getgroup("custom options") assert len(group.options) == 0 group.addoption("--option1", action="store_true") assert len(group.options) == 1 def test_parse(self, parser): parser.addoption("--hello", dest="hello", action="store") args = parser.parse(["--hello", "world"]) assert args.hello == "world" assert not getattr(args, parseopt.FILE_OR_DIR) def test_parse2(self, parser): args = parser.parse([py.path.local()]) assert getattr(args, parseopt.FILE_OR_DIR)[0] == py.path.local() def test_parse_known_args(self, parser): parser.parse_known_args([py.path.local()]) parser.addoption("--hello", action="store_true") ns = parser.parse_known_args(["x", "--y", "--hello", "this"]) assert ns.hello assert ns.file_or_dir == ["x"] def test_parse_known_and_unknown_args(self, parser): parser.addoption("--hello", action="store_true") ns, unknown = parser.parse_known_and_unknown_args( ["x", "--y", "--hello", "this"] ) assert ns.hello assert ns.file_or_dir == ["x"] assert unknown == ["--y", "this"] def test_parse_will_set_default(self, parser): parser.addoption("--hello", dest="hello", default="x", action="store") option = parser.parse([]) assert option.hello == "x" del option.hello parser.parse_setoption([], option) assert option.hello == "x" def test_parse_setoption(self, parser): parser.addoption("--hello", dest="hello", action="store") parser.addoption("--world", dest="world", default=42) class A(object): pass option = A() args = parser.parse_setoption(["--hello", "world"], option) assert option.hello == "world" assert option.world == 42 assert not args def test_parse_special_destination(self, parser): parser.addoption("--ultimate-answer", type=int) args = parser.parse(["--ultimate-answer", "42"]) assert args.ultimate_answer == 42 def test_parse_split_positional_arguments(self, parser): parser.addoption("-R", action="store_true") parser.addoption("-S", action="store_false") args = parser.parse(["-R", "4", "2", "-S"]) assert getattr(args, parseopt.FILE_OR_DIR) == ["4", "2"] args = parser.parse(["-R", "-S", "4", "2", "-R"]) assert getattr(args, parseopt.FILE_OR_DIR) == ["4", "2"] assert args.R is True assert args.S is False args = parser.parse(["-R", "4", "-S", "2"]) assert getattr(args, parseopt.FILE_OR_DIR) == ["4", "2"] assert args.R is True assert args.S is False def test_parse_defaultgetter(self): def defaultget(option): if not hasattr(option, "type"): return if option.type is int: option.default = 42 elif option.type is str: option.default = "world" parser = parseopt.Parser(processopt=defaultget) parser.addoption("--this", dest="this", type=int, action="store") parser.addoption("--hello", dest="hello", type=str, action="store") parser.addoption("--no", dest="no", action="store_true") option = parser.parse([]) assert option.hello == "world" assert option.this == 42 assert option.no is False def test_drop_short_helper(self): parser = argparse.ArgumentParser( formatter_class=parseopt.DropShorterLongHelpFormatter ) parser.add_argument( "-t", "--twoword", "--duo", "--two-word", "--two", help="foo" ).map_long_option = { "two": "two-word" } # throws error on --deux only! parser.add_argument( "-d", "--deuxmots", "--deux-mots", action="store_true", help="foo" ).map_long_option = { "deux": "deux-mots" } parser.add_argument("-s", action="store_true", help="single short") parser.add_argument("--abc", "-a", action="store_true", help="bar") parser.add_argument("--klm", "-k", "--kl-m", action="store_true", help="bar") parser.add_argument( "-P", "--pq-r", "-p", "--pqr", action="store_true", help="bar" ) parser.add_argument( "--zwei-wort", "--zweiwort", "--zweiwort", action="store_true", help="bar" ) parser.add_argument( "-x", "--exit-on-first", "--exitfirst", action="store_true", help="spam" ).map_long_option = { "exitfirst": "exit-on-first" } parser.add_argument("files_and_dirs", nargs="*") args = parser.parse_args(["-k", "--duo", "hallo", "--exitfirst"]) assert args.twoword == "hallo" assert args.klm is True assert args.zwei_wort is False assert args.exit_on_first is True assert args.s is False args = parser.parse_args(["--deux-mots"]) with pytest.raises(AttributeError): assert args.deux_mots is True assert args.deuxmots is True args = parser.parse_args(["file", "dir"]) assert "|".join(args.files_and_dirs) == "file|dir" def test_drop_short_0(self, parser): parser.addoption("--funcarg", "--func-arg", action="store_true") parser.addoption("--abc-def", "--abc-def", action="store_true") parser.addoption("--klm-hij", action="store_true") args = parser.parse(["--funcarg", "--k"]) assert args.funcarg is True assert args.abc_def is False assert args.klm_hij is True def test_drop_short_2(self, parser): parser.addoption("--func-arg", "--doit", action="store_true") args = parser.parse(["--doit"]) assert args.func_arg is True def test_drop_short_3(self, parser): parser.addoption("--func-arg", "--funcarg", "--doit", action="store_true") args = parser.parse(["abcd"]) assert args.func_arg is False assert args.file_or_dir == ["abcd"] def test_drop_short_help0(self, parser, capsys): parser.addoption("--func-args", "--doit", help="foo", action="store_true") parser.parse([]) help = parser.optparser.format_help() assert "--func-args, --doit foo" in help # testing would be more helpful with all help generated def test_drop_short_help1(self, parser, capsys): group = parser.getgroup("general") group.addoption("--doit", "--func-args", action="store_true", help="foo") group._addoption( "-h", "--help", action="store_true", dest="help", help="show help message and configuration info", ) parser.parse(["-h"]) help = parser.optparser.format_help() assert "-doit, --func-args foo" in help def test_multiple_metavar_help(self, parser): """ Help text for options with a metavar tuple should display help in the form "--preferences=value1 value2 value3" (#2004). """ group = parser.getgroup("general") group.addoption( "--preferences", metavar=("value1", "value2", "value3"), nargs=3 ) group._addoption("-h", "--help", action="store_true", dest="help") parser.parse(["-h"]) help = parser.optparser.format_help() assert "--preferences=value1 value2 value3" in help def test_argcomplete(testdir, monkeypatch): if not py.path.local.sysfind("bash"): pytest.skip("bash not available") script = str(testdir.tmpdir.join("test_argcomplete")) pytest_bin = sys.argv[0] if "pytest" not in os.path.basename(pytest_bin): pytest.skip("need to be run with pytest executable, not %s" % (pytest_bin,)) with open(str(script), "w") as fp: # redirect output from argcomplete to stdin and stderr is not trivial # http://stackoverflow.com/q/12589419/1307905 # so we use bash fp.write('COMP_WORDBREAKS="$COMP_WORDBREAKS" %s 8>&1 9>&2' % pytest_bin) # alternative would be exteneded Testdir.{run(),_run(),popen()} to be able # to handle a keyword argument env that replaces os.environ in popen or # extends the copy, advantage: could not forget to restore monkeypatch.setenv("_ARGCOMPLETE", "1") monkeypatch.setenv("_ARGCOMPLETE_IFS", "\x0b") monkeypatch.setenv("COMP_WORDBREAKS", " \\t\\n\"\\'><=;|&(:") arg = "--fu" monkeypatch.setenv("COMP_LINE", "pytest " + arg) monkeypatch.setenv("COMP_POINT", str(len("pytest " + arg))) result = testdir.run("bash", str(script), arg) if result.ret == 255: # argcomplete not found pytest.skip("argcomplete not available") elif not result.stdout.str(): pytest.skip("bash provided no output, argcomplete not available?") else: result.stdout.fnmatch_lines(["--funcargs", "--fulltrace"]) os.mkdir("test_argcomplete.d") arg = "test_argc" monkeypatch.setenv("COMP_LINE", "pytest " + arg) monkeypatch.setenv("COMP_POINT", str(len("pytest " + arg))) result = testdir.run("bash", str(script), arg) result.stdout.fnmatch_lines(["test_argcomplete", "test_argcomplete.d/"])
bsd-3-clause