code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def started(generator_function): @wraps(generator_function) def wrapper(*args, **kwargs): g = generator_function(*args, **kwargs) next(g) return g return wrapper
starts a generator when created
def add_log_error(self, x, flag_also_show=False, E=None): if len(x) == 0: x = "(empty error)" tb.print_stack() x_ = x if E is not None: a99.get_python_logger().exception(x_) else: a99.get_python_logger().info("ERROR: {}".format(x_)) x = '<span style="color: {0!s}">{1!s}</span>'.format(a99.COLOR_ERROR, x) self._add_log_no_logger(x, False) if flag_also_show: a99.show_error(x_)
Sets text of labelError.
def add_log(self, x, flag_also_show=False): self._add_log_no_logger(x, flag_also_show) a99.get_python_logger().info(x)
Logs to 4 different outputs: conditionally to 3, and certainly to get_python_logger()
def config(name='CACHE_URL', default='locmem://'): config = {} s = env(name, default) if s: config = parse_cache_url(s) return config
Returns configured CACHES dictionary from CACHE_URL
def create_country(cls, country, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_country_with_http_info(country, **kwargs) else: (data) = cls._create_country_with_http_info(country, **kwargs) return data
Create Country Create a new Country This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_country(country, async=True) >>> result = thread.get() :param async bool :param Country country: Attributes of country to create (required) :return: Country If the method is called asynchronously, returns the request thread.
def delete_country_by_id(cls, country_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_country_by_id_with_http_info(country_id, **kwargs) else: (data) = cls._delete_country_by_id_with_http_info(country_id, **kwargs) return data
Delete Country Delete an instance of Country by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_country_by_id(country_id, async=True) >>> result = thread.get() :param async bool :param str country_id: ID of country to delete. (required) :return: None If the method is called asynchronously, returns the request thread.
def get_country_by_id(cls, country_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_country_by_id_with_http_info(country_id, **kwargs) else: (data) = cls._get_country_by_id_with_http_info(country_id, **kwargs) return data
Find Country Return single instance of Country by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_country_by_id(country_id, async=True) >>> result = thread.get() :param async bool :param str country_id: ID of country to return (required) :return: Country If the method is called asynchronously, returns the request thread.
def list_all_countries(cls, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_countries_with_http_info(**kwargs) else: (data) = cls._list_all_countries_with_http_info(**kwargs) return data
List Countries Return a list of Countries This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_countries(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[Country] If the method is called asynchronously, returns the request thread.
def replace_country_by_id(cls, country_id, country, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_country_by_id_with_http_info(country_id, country, **kwargs) else: (data) = cls._replace_country_by_id_with_http_info(country_id, country, **kwargs) return data
Replace Country Replace all attributes of Country This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_country_by_id(country_id, country, async=True) >>> result = thread.get() :param async bool :param str country_id: ID of country to replace (required) :param Country country: Attributes of country to replace (required) :return: Country If the method is called asynchronously, returns the request thread.
def update_country_by_id(cls, country_id, country, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_country_by_id_with_http_info(country_id, country, **kwargs) else: (data) = cls._update_country_by_id_with_http_info(country_id, country, **kwargs) return data
Update Country Update attributes of Country This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_country_by_id(country_id, country, async=True) >>> result = thread.get() :param async bool :param str country_id: ID of country to update. (required) :param Country country: Attributes of country to update. (required) :return: Country If the method is called asynchronously, returns the request thread.
def check_user(user, password): return ((user == attowiki.user or attowiki.user is None) and (password == attowiki.password or attowiki.password is None))
check the auth for user and password.
def view_meta_index(): rst_files = [filename[2:-4] for filename in sorted(glob.glob("./*.rst"))] rst_files.reverse() return template('index', type="view", filelist=rst_files, name="__index__", extended_name=None, history=[], gitref=None, is_repo=check_repo())
List all the available .rst files in the directory. view_meta_index is called by the 'meta' url : /__index__
def view_cancel_edit(name=None): if name is None: return redirect('/') else: files = glob.glob("{0}.rst".format(name)) if len(files) > 0: reset_to_last_commit() return redirect('/' + name) else: return abort(404)
Cancel the edition of an existing page. Then render the last modification status .. note:: this is a bottle view if no page name is given, do nothing (it may leave some .tmp. files in the directory). Keyword Arguments: :name: (str) -- name of the page (OPTIONAL) Returns: bottle response object
def view_edit(name=None): response.set_header('Cache-control', 'no-cache') response.set_header('Pragma', 'no-cache') if name is None: # new page return template('edit', type="edit", name=name, extended_name=None, is_repo=check_repo(), history=[], gitref=None, today=datetime.datetime.now().strftime("%Y%m%d"), content="") else: files = glob.glob("{0}.rst".format(name)) if len(files) > 0: file_handle = open(files[0], 'r') return template('edit', type="edit", name=name, extended_name=None, is_repo=check_repo(), history=[], gitref=None, today=datetime.datetime.now().strftime("%Y%m%d"), content=file_handle.read()) else: return abort(404)
Edit or creates a new page. .. note:: this is a bottle view if no page name is given, creates a new page. Keyword Arguments: :name: (str) -- name of the page (OPTIONAL) Returns: bottle response object
def view_pdf(name=None): if name is None: return view_meta_index() files = glob.glob("{0}.rst".format(name)) if len(files) > 0: file_handle = open(files[0], 'r') dest_filename = name + '.pdf' doctree = publish_doctree(file_handle.read()) try: produce_pdf(doctree_content=doctree, filename=dest_filename) except: raise else: return static_file(dest_filename, root='', download=True) else: return abort(404)
Render a pdf file based on the given page. .. note:: this is a bottle view Keyword Arguments: :name: (str) -- name of the rest file (without the .rst extension) MANDATORY
def view_history(name, gitref): response.set_header('Cache-control', 'no-cache') response.set_header('Pragma', 'no-cache') content = read_committed_file(gitref, name + '.rst') if content: html_body = publish_parts(content, writer=AttowikiWriter(), settings=None, settings_overrides=None)['html_body'] history = commit_history(name + '.rst') return template('page', type="history", name=name, extended_name=None, is_repo=check_repo(), history=history, gitref=gitref, content=html_body) else: return abort(404)
Serve a page name from git repo (an old version of a page). .. note:: this is a bottle view * this is a GET only method : you can not change a committed page Keyword Arguments: :name: (str) -- name of the rest file (without the .rst extension) :gitref: (str) -- hexsha of the git commit to look into Returns: bottle response object or 404 error page
def view_history_source(name, gitref=None): response.set_header('Cache-control', 'no-cache') response.set_header('Pragma', 'no-cache') response.set_header('Content-Type', 'text/html; charset=utf-8') if gitref is None: files = glob.glob("{0}.rst".format(name)) if len(files) > 0: file_handle = open(files[0], 'r') content = file_handle.read() else: return abort(404) else: content = read_committed_file(gitref, name + '.rst') if content: return template('source_view', type="history", name=name, extended_name='__source__', is_repo=check_repo(), history=commit_history("{0}.rst".format(name)), gitref=gitref, content=content.decode('utf-8')) else: return abort(404)
Serve a page name from git repo (an old version of a page). then return the reST source code This function does not use any template it returns only plain text .. note:: this is a bottle view * this is a GET only method : you can not change a committed page Keyword Arguments: :name: (str) -- name of the rest file (without the .rst extension) :gitref: (str) -- hexsha of the git commit to look into Returns: bottle response object or 404 error page
def view_history_diff(name, gitref): response.set_header('Cache-control', 'no-cache') response.set_header('Pragma', 'no-cache') response.set_header('Content-Type', 'text/html; charset=utf-8') old_content = read_committed_file(gitref, name + '.rst') if old_content: old_content = old_content.decode('utf-8') files = glob.glob("{0}.rst".format(name)) if len(files) > 0: file_handle = open(files[0], 'r') current_content = file_handle.read().decode('utf-8') differ = difflib.Differ() result = list(differ.compare(old_content.splitlines(), current_content.splitlines())) return template('diff_view', type="history", name=name, extended_name='__diff__', is_repo=check_repo(), history=commit_history("{0}.rst".format(name)), gitref=gitref, content=result) else: return abort(404) else: return abort(404)
Serve a page name from git repo (an old version of a page). then return the diff between current source and the old commited source This function does not use any template it returns only plain text .. note:: this is a bottle view * this is a GET only method : you can not change a committed page Keyword Arguments: :name: (str) -- name of the rest file (without the .rst extension) :gitref: (str) -- hexsha of the git commit to look into Returns: bottle response object or 404 error page
def view_quick_save_page(name=None): response.set_header('Cache-control', 'no-cache') response.set_header('Pragma', 'no-cache') if request.method == 'PUT': if name is None: # new file if len(request.forms.filename) > 0: name = request.forms.filename if name is not None: filename = "{0}.rst".format(name) file_handle = open(filename, 'w') content = request.body.read() content = content.decode('utf-8') file_handle.write(content.encode('utf-8')) file_handle.close() return "OK" else: return abort(404)
Quick save a page. .. note:: this is a bottle view * this view must be called with the PUT method write the new page content to the file, and not not commit or redirect Keyword Arguments: :name: (str) -- name of the rest file (without the .rst extension) Returns: bottle response object (200 OK)
def describeTable(TableName): print('-----------------------------------------') print(TableName+' summary:') try: print('-----------------------------------------') print('Comment: \n'+LOCAL_TABLE_CACHE[TableName]['header']['comment']) except: pass print('Number of rows: '+str(LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows'])) print('Table type: '+str(LOCAL_TABLE_CACHE[TableName]['header']['table_type'])) print('-----------------------------------------') print(' PAR_NAME PAR_FORMAT') print('') for par_name in LOCAL_TABLE_CACHE[TableName]['header']['order']: par_format = LOCAL_TABLE_CACHE[TableName]['header']['format'][par_name] print('%20s %20s' % (par_name,par_format)) print('-----------------------------------------')
INPUT PARAMETERS: TableName: name of the table to describe OUTPUT PARAMETERS: none --- DESCRIPTION: Print information about table, including parameter names, formats and wavenumber range. --- EXAMPLE OF USAGE: describeTable('sampletab') ---
def getColumns(TableName,ParameterNames): Columns = [] for par_name in ParameterNames: Columns.append(LOCAL_TABLE_CACHE[TableName]['data'][par_name]) return Columns
INPUT PARAMETERS: TableName: source table name (required) ParameterNames: list of column names to get (required) OUTPUT PARAMETERS: ListColumnData: tuple of lists of values from specified column --- DESCRIPTION: Returns columns with a names in ParameterNames from table TableName. Columns are returned as a tuple of lists. --- EXAMPLE OF USAGE: p1,p2,p3 = getColumns('sampletab',('p1','p2','p3')) ---
def select(TableName,DestinationTableName=QUERY_BUFFER,ParameterNames=None,Conditions=None,Output=True,File=None): # TODO: Variables defined in ParameterNames ('LET') MUST BE VISIBLE IN Conditions !! # check if table exists if TableName not in LOCAL_TABLE_CACHE.keys(): raise Exception('%s: no such table. Check tableList() for more info.' % TableName) if not ParameterNames: ParameterNames=LOCAL_TABLE_CACHE[TableName]['header']['order'] LOCAL_TABLE_CACHE[DestinationTableName] = {} # clear QUERY_BUFFER for the new result RowObjectDefault = getDefaultRowObject(TableName) VarDictionary = getVarDictionary(RowObjectDefault) ContextFormat = getContextFormat(RowObjectDefault) RowObjectDefaultNew = newRowObject(ParameterNames,RowObjectDefault,VarDictionary,ContextFormat) dropTable(DestinationTableName) # redundant createTable(DestinationTableName,RowObjectDefaultNew) selectInto(DestinationTableName,TableName,ParameterNames,Conditions) if DestinationTableName!=QUERY_BUFFER: if File: outputTable(DestinationTableName,File=File) elif Output: outputTable(DestinationTableName,File=File)
INPUT PARAMETERS: TableName: name of source table (required) DestinationTableName: name of resulting table (optional) ParameterNames: list of parameters or expressions (optional) Conditions: list of logincal expressions (optional) Output: enable (True) or suppress (False) text output (optional) File: enable (True) or suppress (False) file output (optional) OUTPUT PARAMETERS: none --- DESCRIPTION: Select or filter the data in some table either to standard output or to file (if specified) --- EXAMPLE OF USAGE: select('sampletab',DestinationTableName='outtab',ParameterNames=(p1,p2), Conditions=(('and',('>=','p1',1),('<',('*','p1','p2'),20)))) Conditions means (p1>=1 and p1*p2<20) ---
def sort(TableName,DestinationTableName=None,ParameterNames=None,Accending=True,Output=False,File=None): number_of_rows = LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows'] index = range(0,number_of_rows) #print 'num = '+str(number_of_rows) if not DestinationTableName: DestinationTableName = TableName # if names are not provided use all parameters in sorting if not ParameterNames: ParameterNames = LOCAL_TABLE_CACHE[TableName]['header']['order'] elif type(ParameterNames) not in set([list,tuple]): ParameterNames = [ParameterNames] # fix of stupid bug where ('p1',) != ('p1') #print 'SRT: ParameterNames = '+str(ParameterNames) #print 'parnames: '+str(ParameterNames) index_sorted = quickSort(index,TableName,ParameterNames,Accending) arrangeTable(TableName,DestinationTableName,index_sorted) if Output: outputTable(DestinationTableName,File=File)
INPUT PARAMETERS: TableName: name of source table (required) DestinationTableName: name of resulting table (optional) ParameterNames: list of parameters or expressions to sort by (optional) Accending: sort in ascending (True) or descending (False) order (optional) Output: enable (True) or suppress (False) text output (optional) File: enable (True) or suppress (False) file output (optional) OUTPUT PARAMETERS: none --- DESCRIPTION: Sort a table by a list of it's parameters or expressions. The sorted table is saved in DestinationTableName (if specified). --- EXAMPLE OF USAGE: sort('sampletab',ParameterNames=(p1,('+',p1,p2))) ---
def fetch_by_ids(TableName,iso_id_list,numin,numax,ParameterGroups=[],Parameters=[]): if type(iso_id_list) not in set([list,tuple]): iso_id_list = [iso_id_list] queryHITRAN(TableName,iso_id_list,numin,numax, pargroups=ParameterGroups,params=Parameters) iso_names = [ISO_ID[i][ISO_ID_INDEX['iso_name']] for i in iso_id_list] Comment = 'Contains lines for '+','.join(iso_names) Comment += ('\n in %.3f-%.3f wavenumber range' % (numin,numax)) comment(TableName,Comment)
INPUT PARAMETERS: TableName: local table name to fetch in (required) iso_id_list: list of isotopologue id's (required) numin: lower wavenumber bound (required) numax: upper wavenumber bound (required) OUTPUT PARAMETERS: none --- DESCRIPTION: Download line-by-line data from HITRANonline server and save it to local table. The input parameter iso_id_list contains list of "global" isotopologue Ids (see help on ISO_ID). Note: this function is required if user wants to download multiple species into single table. --- EXAMPLE OF USAGE: fetch_by_ids('water',[1,2,3,4],4000,4100) ---
def fetch(TableName,M,I,numin,numax,ParameterGroups=[],Parameters=[]): queryHITRAN(TableName,[ISO[(M,I)][ISO_INDEX['id']]],numin,numax, pargroups=ParameterGroups,params=Parameters) iso_name = ISO[(M,I)][ISO_INDEX['iso_name']] Comment = 'Contains lines for '+iso_name Comment += ('\n in %.3f-%.3f wavenumber range' % (numin,numax)) comment(TableName,Comment)
INPUT PARAMETERS: TableName: local table name to fetch in (required) M: HITRAN molecule number (required) I: HITRAN isotopologue number (required) numin: lower wavenumber bound (required) numax: upper wavenumber bound (required) OUTPUT PARAMETERS: none --- DESCRIPTION: Download line-by-line data from HITRANonline server and save it to local table. The input parameters M and I are the HITRAN molecule and isotopologue numbers. This function results in a table containing single isotopologue specie. To have multiple species in a single table use fetch_by_ids instead. --- EXAMPLE OF USAGE: fetch('HOH',1,1,4000,4100) ---
def partitionSum(M,I,T,step=None): # partitionSum if not step: if type(T) not in set([list,tuple]): return BD_TIPS_2011_PYTHON(M,I,T)[1] else: return [BD_TIPS_2011_PYTHON(M,I,temp)[1] for temp in T] else: #n = (T[1]-T[0])/step #TT = linspace(T[0],T[1],n) TT = arange(T[0],T[1],step) return TT,array([BD_TIPS_2011_PYTHON(M,I,temp)[1] for temp in TT])
INPUT PARAMETERS: M: HITRAN molecule number (required) I: HITRAN isotopologue number (required) T: temperature conditions (required) step: step to calculate temperatures (optional) OUTPUT PARAMETERS: TT: list of temperatures (present only if T is a list) PartSum: partition sums calculated on a list of temperatures --- DESCRIPTION: Calculate range of partition sums at different temperatures. This function uses a python implementation of TIPS-2011 code: Reference: A. L. Laraia, R. R. Gamache, J. Lamouroux, I. E. Gordon, L. S. Rothman. Total internal partition sums to support planetary remote sensing. Icarus, Volume 215, Issue 1, September 2011, Pages 391–400 http://dx.doi.org/10.1016/j.icarus.2011.06.004 Output depends on a structure of input parameter T so that: 1) If T is a scalar/list and step IS NOT provided, then calculate partition sums over each value of T. 2) If T is a list and step parameter IS provided, then calculate partition sums between T[0] and T[1] with a given step. --- EXAMPLE OF USAGE: PartSum = partitionSum(1,1,[296,1000]) TT,PartSum = partitionSum(1,1,[296,1000],step=0.1) ---
def hum1_wei(x,y,n=24): t = y-1.0j*x cerf=1/sqrt(pi)*t/(0.5+t**2) mask = abs(x)+y<15.0 if any(mask): w24 = weideman(x[mask],y[mask],n) place(cerf,mask,w24) return cerf.real,cerf.imag
z = x+1j*y cerf = 1j*z/sqrt(pi)/(z**2-0.5)
def PROFILE_SDRAUTIAN(sg0,GamD,Gam0,Gam2,Shift0,Shift2,anuVC,sg): return pcqsdhc(sg0,GamD,Gam0,Gam2,Shift0,Shift2,anuVC,cZero,sg)
# Speed dependent Rautian profile based on HTP. # Input parameters: # sg0 : Unperturbed line position in cm-1 (Input). # GamD : Doppler HWHM in cm-1 (Input) # Gam0 : Speed-averaged line-width in cm-1 (Input). # Gam2 : Speed dependence of the line-width in cm-1 (Input). # anuVC : Velocity-changing frequency in cm-1 (Input). # Shift0 : Speed-averaged line-shift in cm-1 (Input). # Shift2 : Speed dependence of the line-shift in cm-1 (Input) # sg : Current WaveNumber of the Computation in cm-1 (Input).
def PROFILE_RAUTIAN(sg0,GamD,Gam0,Shift0,anuVC,eta,sg): return pcqsdhc(sg0,GamD,Gam0,cZero,Shift0,cZero,anuVC,cZero,sg)
# Rautian profile based on HTP. # Input parameters: # sg0 : Unperturbed line position in cm-1 (Input). # GamD : Doppler HWHM in cm-1 (Input) # Gam0 : Speed-averaged line-width in cm-1 (Input). # anuVC : Velocity-changing frequency in cm-1 (Input). # Shift0 : Speed-averaged line-shift in cm-1 (Input). # sg : Current WaveNumber of the Computation in cm-1 (Input).
def PROFILE_SDVOIGT(sg0,GamD,Gam0,Gam2,Shift0,Shift2,sg): return pcqsdhc(sg0,GamD,Gam0,Gam2,Shift0,Shift2,cZero,cZero,sg)
# Speed dependent Voigt profile based on HTP. # Input parameters: # sg0 : Unperturbed line position in cm-1 (Input). # GamD : Doppler HWHM in cm-1 (Input) # Gam0 : Speed-averaged line-width in cm-1 (Input). # Gam2 : Speed dependence of the line-width in cm-1 (Input). # Shift0 : Speed-averaged line-shift in cm-1 (Input). # Shift2 : Speed dependence of the line-shift in cm-1 (Input) # sg : Current WaveNumber of the Computation in cm-1 (Input).
def PROFILE_VOIGT(sg0,GamD,Gam0,sg): return PROFILE_HTP(sg0,GamD,Gam0,cZero,cZero,cZero,cZero,cZero,sg)
# Voigt profile based on HTP. # Input parameters: # sg0: Unperturbed line position in cm-1 (Input). # GamD: Doppler HWHM in cm-1 (Input) # Gam0: Speed-averaged line-width in cm-1 (Input). # sg: Current WaveNumber of the Computation in cm-1 (Input).
def PROFILE_DOPPLER(sg0,GamD,sg): return cSqrtLn2divSqrtPi*exp(-cLn2*((sg-sg0)/GamD)**2)/GamD
# Doppler profile. # Input parameters: # sg0: Unperturbed line position in cm-1 (Input). # GamD: Doppler HWHM in cm-1 (Input) # sg: Current WaveNumber of the Computation in cm-1 (Input).
def transmittanceSpectrum(Omegas,AbsorptionCoefficient,Environment={'l':100.}, File=None, Format='%e %e', Wavenumber=None): # compatibility with older versions if Wavenumber: Omegas=Wavenumber l = Environment['l'] Xsect = exp(-AbsorptionCoefficient*l) if File: save_to_file(File,Format,Omegas,Xsect) return Omegas,Xsect
INPUT PARAMETERS: Wavenumber/Omegas: wavenumber grid (required) AbsorptionCoefficient: absorption coefficient on grid (required) Environment: dictionary containing path length in cm. Default={'l':100.} File: name of the output file (optional) Format: c format used in file output, default '%e %e' (optional) OUTPUT PARAMETERS: Wavenum: wavenumber grid Xsect: transmittance spectrum calculated on the grid --- DESCRIPTION: Calculate a transmittance spectrum (dimensionless) based on previously calculated absorption coefficient. Transmittance spectrum is calculated at an arbitrary optical path length 'l' (1 m by default) --- EXAMPLE OF USAGE: nu,trans = transmittanceSpectrum(nu,coef) ---
def radianceSpectrum(Omegas,AbsorptionCoefficient,Environment={'l':100.,'T':296.}, File=None, Format='%e %e', Wavenumber=None): # compatibility with older versions if Wavenumber: Omegas=Wavenumber l = Environment['l'] T = Environment['T'] Alw = 1-exp(-AbsorptionCoefficient*l) LBBTw = 2*hh*cc**2*Omegas**3 / (exp(hh*cc*Omegas/(cBolts*T)) - 1) * 1.0E-7 Xsect = Alw*LBBTw # W/sr/cm**2/cm**-1 if File: save_to_file(File,Format,Omegas,Xsect) return Omegas,Xsect
INPUT PARAMETERS: Wavenumber/Omegas: wavenumber grid (required) AbsorptionCoefficient: absorption coefficient on grid (required) Environment: dictionary containing path length in cm. and temperature in Kelvin. Default={'l':100.,'T':296.} File: name of the output file (optional) Format: c format used in file output, default '%e %e' (optional) OUTPUT PARAMETERS: Wavenum: wavenumber grid Xsect: radiance spectrum calculated on the grid --- DESCRIPTION: Calculate a radiance spectrum (in W/sr/cm^2/cm-1) based on previously calculated absorption coefficient. Radiance spectrum is calculated at an arbitrary optical path length 'l' (1 m by default) and temperature 'T' (296 K by default). For obtaining a physically meaningful result 'T' must be the same as a temperature which was used in absorption coefficient. --- EXAMPLE OF USAGE: nu,radi = radianceSpectrum(nu,coef) ---
def getStickXY(TableName): cent,intens = getColumns(TableName,('nu','sw')) n = len(cent) cent_ = zeros(n*3) intens_ = zeros(n*3) for i in range(n): intens_[3*i] = 0 intens_[3*i+1] = intens[i] intens_[3*i+2] = 0 cent_[(3*i):(3*i+3)] = cent[i] return cent_,intens_
Get X and Y for fine plotting of a stick spectrum. Usage: X,Y = getStickXY(TableName).
def read_hotw(filename): import sys f = open(filename,'r') nu = [] coef = [] for line in f: pars = line.split() try: nu.append(float(pars[0])) coef.append(float(pars[1])) except: if False: print(sys.exc_info()) else: pass return array(nu),array(coef)
Read cross-section file fetched from HITRAN-on-the-Web. The format of the file line must be as follows: nu, coef Other lines are omitted.
def SLIT_RECTANGULAR(x,g): index_inner = abs(x) <= g/2 index_outer = ~index_inner y = zeros(len(x)) y[index_inner] = 1/g y[index_outer] = 0 return y
Instrumental (slit) function. B(x) = 1/γ , if |x| ≤ γ/2 & B(x) = 0, if |x| > γ/2, where γ is a slit width or the instrumental resolution.
def SLIT_GAUSSIAN(x,g): g /= 2 return sqrt(log(2))/(sqrt(pi)*g)*exp(-log(2)*(x/g)**2)
Instrumental (slit) function. B(x) = sqrt(ln(2)/pi)/γ*exp(-ln(2)*(x/γ)**2), where γ/2 is a gaussian half-width at half-maximum.
def SLIT_DIFFRACTION(x,g): y = zeros(len(x)) index_zero = x==0 index_nonzero = ~index_zero dk_ = pi/g x_ = dk_*x[index_nonzero] w_ = sin(x_) r_ = w_**2/x_**2 y[index_zero] = 1 y[index_nonzero] = r_/g return y
Instrumental (slit) function.
def SLIT_MICHELSON(x,g): y = zeros(len(x)) index_zero = x==0 index_nonzero = ~index_zero dk_ = 2*pi/g x_ = dk_*x[index_nonzero] y[index_zero] = 1 y[index_nonzero] = 2/g*sin(x_)/x_ return y
Instrumental (slit) function. B(x) = 2/γ*sin(2pi*x/γ)/(2pi*x/γ) if x!=0 else 1, where 1/γ is the maximum optical path difference.
def convolveSpectrumSame(Omega,CrossSection,Resolution=0.1,AF_wing=10., SlitFunction=SLIT_RECTANGULAR): step = Omega[1]-Omega[0] x = arange(-AF_wing,AF_wing+step,step) slit = SlitFunction(x,Resolution) print('step=') print(step) print('x=') print(x) print('slitfunc=') print(SlitFunction) CrossSectionLowRes = convolve(CrossSection,slit,mode='same')*step return Omega,CrossSectionLowRes,None,None,slit
Convolves cross section with a slit function with given parameters.
def setup_db(self, couch, dbname): # Avoid race condition of two creating db my_db = None self.log.debug('Setting up DB: %s' % dbname) if dbname not in couch: self.log.info("DB doesn't exist so creating DB: %s", dbname) try: my_db = couch.create(dbname) except: self.log.critical("Race condition caught") raise RuntimeError("Race condition caught when creating DB") try: auth_doc = {} auth_doc['_id'] = '_design/auth' auth_doc['language'] = 'javascript' auth_doc['validate_doc_update'] = """ function(newDoc, oldDoc, userCtx) { if (userCtx.roles.indexOf('_admin') !== -1) { return; } else { throw({forbidden: 'Only admins may edit the database'}); } } """ my_db.save(auth_doc) except: self.log.error('Could not set permissions of %s' % dbname) else: my_db = couch[dbname] return my_db
Setup and configure DB
def commit(self, force=False): self.log.debug('Bulk commit requested') size = sys.getsizeof(self.docs) self.log.debug('Size of docs in KB: %d', size) if size > self.commit_threshold or force: self.log.info('Commiting %d KB to CouchDB' % size) self.my_db.update(self.docs) self.docs = []
Commit data to couchdb Compared to threshold (unless forced) then sends data to couch
def save(self, doc): self.log.debug('save()') self.docs.append(doc) self.commit()
Save a doc to cache
def apply_to_last(stream, fn): ''' applies a given function to the last item in a generator/stream ''' assert iterable(stream), 'apply_to_last needs stream to be iterable' assert callable(fn), 'apply_to_last needs fn to be callable' stream = iter(stream) previous = next(stream) for current in stream: yield previous previous = current yield fn(previousf apply_to_last(stream, fn): ''' applies a given function to the last item in a generator/stream ''' assert iterable(stream), 'apply_to_last needs stream to be iterable' assert callable(fn), 'apply_to_last needs fn to be callable' stream = iter(stream) previous = next(stream) for current in stream: yield previous previous = current yield fn(previous)
applies a given function to the last item in a generator/stream
def start(address, channel, key, loop=None): if loop is None: loop = asyncio.get_event_loop() socket = yield from websockets.connect(address+"/robot", loop=loop) conn = Connection(socket, loop) yield from conn.send(_create_handshake(channel, key)) return conn
Starts a new Interactive client. Takes the remote address of the Tetris robot, as well as the channel number and auth key to use. Additionally, it takes a list of handler. This should be a dict of protobuf wire IDs to handler functions (from the .proto package).
def _create_handshake(channel, key): hsk = Handshake() hsk.channel = channel hsk.streamKey = key return hsk
Creates and returns a Handshake packet that authenticates on the channel with the given stream key.
def import_module(filename): module_name = "xyz" module_spec = importlib.util.spec_from_file_location(module_name, filename) if module_spec is None: raise RuntimeError("Python cannot import file '{}'".format(filename)) module = importlib.util.module_from_spec(module_spec) module_spec.loader.exec_module(module) # print(dir(module)) # # msg = 'The {module_name} module has the following methods:' \ # ' {methods}' # print(msg.format(module_name=module_name, # methods=dir(module))) return module
Returns module object Source: https://www.blog.pythonlibrary.org/2016/05/27/python-201-an-intro-to-importlib/
def get_exe_info(dir_, flag_protected=False): ret = [] # gets all scripts in script directory ff = glob.glob(os.path.join(dir_, "*.py")) # discards scripts whose file name starts with a "_" ff = [f for f in ff if flag_protected or not os.path.basename(f).startswith("_")] ff.sort() for f in ff: _, filename = os.path.split(f) flag_error = False flag_gui = None descr = "(no doc)" try: # Checks if it is a graphical application with open(f, "r") as h: flag_gui = "QApplication" in h.read() try: script_ = None script_ = import_module(f) # imp.load_source('script_', f) # module object except SystemExit: descr = "? (called sys.exit())" else: if script_.__doc__ is not None: descr = script_.__doc__.strip().split("\n")[0] # first line of docstring except Exception as e: flag_error = True descr = "*{0!s}*: {1!s}".format(e.__class__.__name__, str(e)) if len(descr) == 0: descr = "(no doc)" ret.append(ExeInfo(filename, descr, flag_error, flag_gui)) # Sorts command-line and graphical applications by name separately sisi_gra = [si for si in ret if si.flag_gui] sisi_cmd = [si for si in ret if not si.flag_gui] sisi_gra = sorted(sisi_gra, key=lambda x: x.filename) sisi_cmd = sorted(sisi_cmd, key=lambda x: x.filename) ret = sisi_cmd+sisi_gra return ret
Returns a list of ExeInfo objects, which represent Python scripts within dir_ Args: dir_: string, path to directory flag_protected: whether or not to include files starting with a '_' Returns: list of ExeInfo objects The ExeInfo objects represent the ".py" files in directory dir_,
def collect_doc(module, base_class=None, prefix="", flag_exclude_prefix=False): ret = [] for attrname in module.__all__: if prefix and not attrname.startswith(prefix): continue attr = module.__getattribute__(attrname) if base_class is not None and not issubclass(attr, base_class): continue spec = inspect.signature(attr) ret.append((attrname if not flag_exclude_prefix else attrname[len(prefix):], spec, attr.__doc__)) return ret
Collects class names and docstrings in module for classes starting with prefix Arguments: module -- Python module prefix -- argument for str.startswith(); if not passed, does not filter base_class -- filters only descendants of this class flag_exclude_prefix -- whether or not to exclude prefix from class name in result Returns: [(classname0, signature, docstring0), ...]
def get_classes_in_module(module, superclass=object): ret = [] for classname in dir(module): attr = module.__getattribute__(classname) try: if issubclass(attr, superclass) and (attr != superclass): ret.append(attr) except TypeError: # "issubclass() arg 1 must be a class" pass except RuntimeError: # a99.get_python_logger().exception("Failed probing attribute '{}'".format(classname)) # raise pass return ret
Returns a list with all classes in module that descend from parent Args: module: builtins.module superclass: a class Returns: list
def get_obj_doc0(obj, alt="(no doc)"): ret = obj.__doc__.strip().split("\n")[0] if obj.__doc__ is not None else alt return ret
Returns first line of cls.__doc__, or alternative text
def get_subpackages_names(dir_): def is_package(d): d = os.path.join(dir_, d) return os.path.isdir(d) and glob.glob(os.path.join(d, '__init__.py*')) ret = list(filter(is_package, os.listdir(dir_))) ret.sort() return ret
Figures out the names of the subpackages of a package Args: dir_: (str) path to package directory Source: http://stackoverflow.com/questions/832004/python-finding-all-packages-inside-a-package
def load_params(filepath): # Read the file with open(filepath) as file: content = file.read() # Detect all environment variables referenced (using %EXAMPLE%, use windows style since it is easier to match) q = [m.start() for m in re.finditer("%", content)] env_vars = [] for i in range(0, len(q), 2): env_var = content[q[i]+1:q[i+1]] if env_var not in env_vars: if env_var in os.environ: env_vars.append(env_var) else: print("WARNING: Detected an environment variable which is not set.") # Fill in environment variables for env_var in env_vars: s = "%" + env_var + "%" # Use unix style path linebreaks, since windows style might break stuff (and linux is more common anyways.) content = content.replace(s, os.environ[env_var].replace("\\", "/")) # Try to match linux path style with anything that matches for env_var in list(os.environ.keys()): s = "$" + env_var content = content.replace(s, os.environ[env_var].replace("\\", "/")) # Finally load hyperparams return HyperParams(json.loads(jsmin(content)))
Load your hyper parameters from a json file. :param filepath: Path to the json file. :return: A hyper parameters object.
def get(self, key, default=_sentinel): if default is _sentinel: default = HyperParams({}) return self.__dict__[key] if key in self.__dict__ else default
Get the value specified in the dictionary or a default. :param key: The key which should be retrieved. :param default: The default that is returned if the key is not set. :return: The value from the dict or the default.
def shell_source(script): pipe = subprocess.Popen( ". %s; env" % script, stdout=subprocess.PIPE, shell=True) output = pipe.communicate()[0].decode() env = {} for line in output.splitlines(): try: keyval = line.split("=", 1) env[keyval[0]] = keyval[1] except: pass os.environ.update(env)
Sometime you want to emulate the action of "source" in bash, settings some environment variables. Here is a way to do it.
def nll(data, model): try: log_lik_vals = model.logpmf(data) except: log_lik_vals = model.logpdf(data) return -np.sum(log_lik_vals)
Negative log likelihood given data and a model Parameters ---------- {0} {1} Returns ------- float Negative log likelihood Examples --------- >>> import macroeco.models as md >>> import macroeco.compare as comp >>> # Generate random data >>> rand_samp = md.logser.rvs(p=0.9, size=100) >>> # Get nll for p = 0.9 >>> comp.nll(rand_samp, md.logser(p=0.9)) 237.6871819262054 >>> # Get the nll for the MLE for p >>> mle_p = md.logser.fit_mle(rand_samp) >>> comp.nll(rand_samp, md.logser(*mle_p)) 235.2841347820297
def AIC_compare(aic_list): aic_values = np.array(aic_list) minimum = np.min(aic_values) delta = aic_values - minimum values = np.exp(-delta / 2) weights = values / np.sum(values) return delta, weights
Calculates delta AIC and AIC weights from a list of AIC values Parameters ----------------- aic_list : iterable AIC values from set of candidat models Returns ------------- tuple First element contains the delta AIC values, second element contains the relative AIC weights. Notes ----- AIC weights can be interpreted as the probability that a given model is the best model in the set. Examples -------- >>> # Generate random data >>> rand_samp = md.nbinom_ztrunc.rvs(20, 0.5, size=100) >>> # Fit Zero-truncated NBD (Full model) >>> mle_nbd = md.nbinom_ztrunc.fit_mle(rand_samp) >>> # Fit a logseries (limiting case of Zero-truncated NBD, reduced model) >>> mle_logser = md.logser.fit_mle(rand_samp) >>> # Get AIC for ztrunc_nbinom >>> nbd_aic = comp.AIC(rand_samp, md.nbinom_ztrunc(*mle_nbd)) >>> # Get AIC for logser >>> logser_aic = comp.AIC(rand_samp, md.logser(*mle_logser)) >>> # Make AIC list and get weights >>> aic_list = [nbd_aic, logser_aic] >>> comp.AIC_compare(aic_list) (array([ 0. , 19.11806518]), array([ 9.99929444e-01, 7.05560486e-05])) >>> # Zero-truncated NBD is a far superior model based on AIC weights
def sum_of_squares(obs, pred): return np.sum((np.array(obs) - np.array(pred)) ** 2)
Sum of squares between observed and predicted data Parameters ---------- obs : iterable Observed data pred : iterable Predicted data Returns ------- float Sum of squares Notes ----- The length of observed and predicted data must match.
def preston_bin(data, max_num): log_ub = np.ceil(np.log2(max_num)) # Make an exclusive lower bound in keeping with Preston if log_ub == 0: boundaries = np.array([0, 1]) elif log_ub == 1: boundaries = np.arange(1, 4) else: boundaries = 2 ** np.arange(0, log_ub + 1) boundaries = np.insert(boundaries, 2, 3) boundaries[3:] = boundaries[3:] + 1 hist_data = np.histogram(data, bins=boundaries) return hist_data
Bins data on base 2 using Preston's method Parameters ---------- data : array-like Data to be binned max_num : float The maximum upper value of the data Returns ------- tuple (binned_data, bin_edges) Notes ----- Uses Preston's method of binning, which has exclusive lower boundaries and inclusive upper boundaries. Densities are not split between bins. Examples -------- >>> import macroeco.compare as comp >>> import numpy as np >>> # Load some data and get Preston bins >>> data = np.array([1, 1, 1, 1, 4, 5, 6, 7, 12, 34, 56]) >>> comp.preston_bin(data, np.max(data)) (array([4, 0, 1, 3, 1, 0, 2]), array([ 1., 2., 3., 5., 9., 17., 33., 65.])) References ---------- .. [#] Preston, F. (1962). The canonical distribution of commonness and rarity. Ecology, 43, 185-215
def pueyo_bins(data): log_ub = np.ceil(np.log2(np.max(data))) bins = 2**np.arange(log_ub + 1) binned_data = np.histogram(data, bins=bins)[0] epdf = (1 / bins[:-1]) * binned_data / len(data) return binned_data, epdf
Binning method based on Pueyo (2006) Parameters ---------- data : array-like data Data to be binned Returns ------- : tuple of arrays binned data, empirical probability density Notes ----- Bins the data in into bins of length 2**i, i=0, 1, 2 ... The empirical probability densities will sum to 1 if multiplied by the respective 2**i.
def get_base_url(url, include_path=False): if not url: return None parts = _urlsplit(url) base_url = _urlunsplit(( parts.scheme, parts.netloc, (parts.path if include_path else ''), None, None )) return base_url if base_url.endswith('/') else base_url + '/'
:return: the url without the query or fragment segments
def update_url_params(url, replace_all=False, **url_params): # Ensure 'replace_all' can be sent as a url param if not (replace_all is True or replace_all is False): url_params['replace_all'] = replace_all if not url or not url_params: return url or None scheme, netloc, url_path, url_query, fragment = _urlsplit(url) if replace_all is True: url_query = url_params else: url_query = _parse_qs(url_query) url_query.update(url_params) return _urlunsplit((scheme, netloc, url_path, _unquote(_urlencode(url_query, doseq=True)), fragment))
:return: url with its query updated from url_query (non-matching params are retained)
def url_to_parts(url): if not url: return None scheme, netloc, path, query, fragment = _urlsplit(url) if not path or path == '/': path = [] else: path = path.strip('/').split('/') if not query: query = {} else: query = _parse_qs(query) return _urllib_parse.SplitResult(scheme, netloc, path, query, fragment)
Split url urlsplit style, but return path as a list and query as a dict
def parts_to_url(parts=None, scheme=None, netloc=None, path=None, query=None, fragment=None): if isinstance(parts, _urllib_parse.SplitResult): scheme, netloc, path, query, fragment = parts elif parts and isinstance(parts, dict): scheme = parts.get('scheme', 'http') netloc = parts.get('netloc', '') path = parts.get('path', []) query = parts.get('query', {}) fragment = parts.get('fragment', '') if isinstance(path, (list, tuple)): path = '/' + '/'.join(path).strip('/') if isinstance(query, (dict, tuple)): query = _unquote(_urlencode(query, doseq=True)) return _urlunsplit((scheme, netloc, path, query, fragment)) or None
Build url urlunsplit style, but optionally handle path as a list and/or query as a dict
def search(self, path, values=None, unique=False, raise_absent=False, vfunc=lambda x: x): path_and_value_list = iterutils.search( self.data, path=path, required_values=values) # print 'search found ', [x[0] for x in path_and_value_list] return self.__return_value(path_and_value_list, unique, raise_absent, vfunc)
Return single model object instance matching given criteria :param path: tuple or dpath expression representing the hierarchy/chain of parent keys :param values: single value or list of values to match. If exact is False then .contains method is used as filter :param raise_absent: if True then raise exception if no match is found :return: list matching ojects directly from data/config in the form of ((k1, k2, .., kn), value)
def obj(self, path=None, model=None, values=None, raise_absent=False): return self.search(path=path, unique=True, raise_absent=raise_absent, values=values, vfunc=lambda x: self.path_index[x[0]].instance(model=model) if x[0] in self.path_index else None)
Return single model object instance matching given criteria :param path: tuple or dpath expression representing the hierarchy/chain of parent keys :param values: single value or list of values to match. If exact is False then .contains method is used as filter :param raise_absent: if True then raise exception if no match is found :return: matching object from cache if already created or new if not
def objs(self, path=None, model=None, values=None, raise_absent=False): return self.search(path=path, unique=False, raise_absent=raise_absent, values=values, vfunc=lambda x: self.path_index[x[0]].instance(model=model, reraise=False) if x[0] in self.path_index else None)
Return list of model object instances matching given criteria :param path: tuple or dpath expression representing the hierarchy/chain of parent keys :param values: single value or list of values to match. If exact is False then .contains method is used as filter :param raise_absent: if True then raise exception if no match is found :return: list of matching objects
def fobj(self, path=None, values=None, unique=True, raise_absent=False): return self.path_index[self.search(path=path, unique=unique, values=values, raise_absent=raise_absent)[0]]
Return model instance/registration object matching given criteria :param path: tuple or dpath expression representing the hierarchy/chain of parent keys :param values: single value or list of values to match. If exact is False then .contains method is used as filter :param raise_absent: if True then raise exception if no match is found :return: single model instance/registration object
def __visit_index_model_instance(self, models, p, k, v): # print 'model visit {} on {}'.format(model, v) cp = p + (k,) for model in models: try: if model.validator(v): if cp in self.path_index: # if self.path_index[cp].val != v: # raise ValueError('unexpected value change at path_index[{}]'.format(cp)) self.path_index[cp].add_model(model, v) else: # The object should already be in the index but don't complain for now. self.path_index[cp] = PathCacheObject(val=v, path=cp, regs=[model]) except: pass
Called during model research on merged data
def compute_edge_reduction(self) -> float: nb_init_edge = self.init_edge_number() nb_poweredge = self.edge_number() return (nb_init_edge - nb_poweredge) / (nb_init_edge)
Compute the edge reduction. Costly computation
def init_edge_number(self) -> int: return len(frozenset(frozenset(edge) for edge in self.initial_edges()))
Return the number of edges present in the non-compressed graph
def initial_edges(self) -> iter: nodes_in = lambda n: ([n] if self.is_node(n) else self.nodes_in(n)) for node, succs in self.edges.items(): twos = tuple(two for succ in succs for two in nodes_in(succ)) for one in nodes_in(node): for two in twos: yield one, two
Yield edges in the initial (uncompressed) graphs. Possible doublons.
def connected_components(self) -> (dict, dict): inclusions = utils.completed_graph(self.inclusions) # allow bottom-up movement edges = utils.completed_graph(self.edges) if self.oriented else self.edges cc = {} # maps cc root with nodes in the cc subroots = defaultdict(set) # maps cc root with other roots of the cc walked_roots = set() # all roots that have been walked already for root in self.roots: if root in walked_roots: continue # this cc have been done already # walk in the graph starting at root cc[root] = set([root]) walked = cc[root] stack = list(edges.get(root, ())) + list(inclusions.get(root, ())) while len(stack) > 0: *stack, curr = stack walked.add(curr) if curr in self.roots: # don't do the walk for already found roots walked_roots.add(curr) subroots[root].add(curr) for succ in it.chain(edges.get(curr, ()), inclusions.get(curr, ())): if succ not in walked: stack.append(succ) return cc, dict(subroots)
Return for one root of each connected component all the linked objects, and the mapping linking a connected component root with the roots that it contains.
def assert_powernode(self, name:str) -> None or ValueError: if name not in self.inclusions: raise ValueError("Powernode '{}' does not exists.".format(name)) if self.is_node(name): raise ValueError("Given name '{}' is a node.".format(name))
Do nothing if given name refers to a powernode in given graph. Raise a ValueError in any other case.
def powernode_data(self, name:str) -> Powernode: self.assert_powernode(name) contained_nodes = frozenset(self.nodes_in(name)) return Powernode( size=len(contained_nodes), contained=frozenset(self.all_in(name)), contained_pnodes=frozenset(self.powernodes_in(name)), contained_nodes=contained_nodes, )
Return a Powernode object describing the given powernode
def node_number(self, *, count_pnode=True) -> int: return (sum(1 for n in self.nodes()) + (sum(1 for n in self.powernodes()) if count_pnode else 0))
Return the number of node
def edge_number(self) -> int: edges = set() for node, succs in self.edges.items(): for succ in succs: edges.add(frozenset((node, succ))) return len(edges)
Return the number of (power) edges
def nodes(self) -> iter: yield from (elem for elem, subs in self.inclusions.items() if subs == ())
Yield all nodes in the graph (not the powernodes)
def powernodes(self) -> iter: yield from (elem for elem, subs in self.inclusions.items() if subs != ())
Yield all powernodes in the graph (not the nodes)
def nodes_in(self, name) -> iter: yield from (node for node in self.all_in(name) if self.is_node(node))
Yield all nodes contained in given (power) node
def powernodes_in(self, name) -> iter: yield from (node for node in self.all_in(name) if self.is_powernode(node))
Yield all power nodes contained in given (power) node
def all_in(self, name) -> iter: for elem in self.inclusions[name]: yield elem yield from self.all_in(elem)
Yield all (power) nodes contained in given (power) node
def powernodes_containing(self, name, directly=False) -> iter: if directly: yield from (node for node in self.all_in(name) if name in self.inclusions[node]) else: # This algorithm is very bad. Inverting the inclusion dict could # be far better. @functools.lru_cache(maxsize=self.node_number(count_pnode=True)) def contains_target(node, target): succs = self.inclusions[node] if target in succs: return True else: return any(contains_target(succ, target) for succ in succs) # populate the cache for root in self.roots: contains_target(root, name) # output all that contains target at some level yield from (node for node in self.inclusions.keys() if contains_target(node, name))
Yield all power nodes containing (power) node of given *name*. If *directly* is True, will only yield the direct parent of given name.
def write_bubble(self, filename:str): from bubbletools import converter converter.tree_to_bubble(self, filename)
Write in given filename the lines of bubble describing this instance
def from_bubble_file(bblfile:str, oriented:bool=False, symmetric_edges:bool=True) -> 'BubbleTree': return BubbleTree.from_bubble_data(utils.data_from_bubble(bblfile), oriented=bool(oriented), symmetric_edges=symmetric_edges)
Extract data from given bubble file, then call from_bubble_data method
def from_bubble_lines(bbllines:iter, oriented:bool=False, symmetric_edges:bool=True) -> 'BubbleTree': return BubbleTree.from_bubble_data((utils.line_data(line) for line in bbllines), oriented=bool(oriented), symmetric_edges=symmetric_edges)
Return a BubbleTree instance. bbllines -- iterable of raw line, bubble-formatted oriented -- True: returned BubbleTree is oriented
def same_network(atree, btree) -> bool: return same_hierarchy(atree, btree) and same_topology(atree, btree)
True if given trees share the same structure of powernodes, independently of (power)node names, and same edge topology between (power)nodes.
def set_from_tree(root:str, graph:dict) -> frozenset: Node = namedtuple('Node', 'id succs') succs = graph[root] if succs: return (len(succs), sorted(tuple(set_from_tree(succ, graph) for succ in succs))) else: return 0, ()
Return a recursive structure describing given tree
def get_suitable_vis_classes(obj): ret = [] for class_ in classes_vis(): if isinstance(obj, class_.input_classes): ret.append(class_) return ret
Retuns a list of Vis classes that can handle obj.
def get_suitable_vis_list_classes(objs): from f311 import explorer as ex ret = [] for class_ in classes_vis(): if isinstance(class_, ex.VisList): flag_can = True for obj in objs: if not isinstance(obj, class_.item_input_classes): flag_can = False break if flag_can: ret.append(class_) return ret
Retuns a list of VisList classes that can handle a list of objects.
def classes_file(flag_leaf=False): if __flag_first: __setup() if not flag_leaf: return _classes_file return [cls for cls in _classes_file if cls not in _classes_file_superclass]
All known File* classes Args: flag_leaf: returns only classes that do not have subclasses ("leaf" nodes as in a class tree graph)
def _collect_classes(m): from f311 import filetypes as ft from f311 import explorer as ex def _extend(classes, newclasses): """Filters out classes already present in list. This shouldn't be necessary, but collaborators may accidentally import already loaded classes into the datatypes namespace""" classes.extend([class_ for class_ in newclasses if class_ not in classes]) # classes.extend(newclasses) file_classes = [class_ for class_ in a99.get_classes_in_module(m, ft.DataFile) if class_.flag_collect] # Classes to consider when attempts to load a text file (see load_any_file()) _extend(_classes_txt, [class_ for class_ in file_classes if class_.flag_txt]) # Classes to consider when attempts to load a binary file (see load_any_file()) _extend(_classes_bin, [class_ for class_ in file_classes if not class_.flag_txt]) # Adds Classes to consider when attempts to load a spectrum file (see load_spectrum()) _extend(_classes_sp, [class_ for class_ in file_classes if issubclass(class_, ft.FileSpectrum)]) # All kwown File* classes _extend(_classes_file, file_classes) # All kwnown Vis* classes _extend(_classes_vis, a99.get_classes_in_module(m, ex.Vis)) global _classes_file_superclass _classes_file_superclass = [cls.__bases__[0] for cls in _classes_file]
Adds entries to _classes_* Args: m: module object that must contain the following sub-modules: datatypes, vis
def __setup(): global __collaborators, __flag_first import f311 __flag_first = False for pkgname in f311.COLLABORATORS_C: try: pkg = importlib.import_module(pkgname) a99.get_python_logger().info("Imported collaborator package '{}'".format(pkgname)) try: if hasattr(pkg, "_setup_filetypes"): pkg._setup_filetypes() else: _collect_classes(pkg) __collaborators[pkgname] = pkg except: a99.get_python_logger().exception( "Actually, package '{}' gave error".format(pkgname)) raise except: a99.get_python_logger().warning("Failed to import package '{}".format(pkgname))
Will be executed in the first time someone calls classes_*()
def _get_programs_dict(): global __programs_dict if __programs_dict is not None: return __programs_dict d = __programs_dict = OrderedDict() for pkgname in COLLABORATORS_S: try: package = importlib.import_module(pkgname) except ImportError: # I think it is better to be silent when a collaborator package is not installed continue path_ = os.path.join(os.path.split(package.__file__)[0], "scripts") bulk = a99.get_exe_info(path_, flag_protected=True) d[pkgname] = {"description": a99.get_obj_doc0(package), "exeinfo": bulk} return __programs_dict
Builds and returns programs dictionary This will have to import the packages in COLLABORATORS_S in order to get their absolute path. Returns: dictionary: {"packagename": [ExeInfo0, ...], ...} "packagename" examples: "f311.explorer", "numpy"
def get_programs_dict(pkgname_only=None, flag_protected=False): ___ret = _get_programs_dict() __ret = ___ret if pkgname_only is None else OrderedDict(((pkgname_only, ___ret[pkgname_only]),)) if flag_protected: _ret = __ret else: _ret = copy.deepcopy(__ret) for value in _ret.values(): value["exeinfo"] = [exeinfo for exeinfo in value["exeinfo"] if not exeinfo.filename.startswith("_")] # Removes packages that may have gone out of scripts after filtering ret = _ret if pkgname_only is None and flag_protected is None else \ OrderedDict(((key, value) for key, value in _ret.items() if len(value["exeinfo"]) > 0)) return ret
Scans COLLABORATORS_S packages for scripts, eventually filtering if arguments passed Args: pkgname_only: name of single package within COLLABORATORS_S flag_protected: include scripts starting with "_"? Returns: dictionary: {"packagename0": {"exeinfo": [ExeInfo00, ...], "description": description0}, ...}
def get(self, term): # type: (Any) -> Type[ChomskyTermNonterminal] if self._items[term].used is False: cont = self._items[term] self._grammar.nonterminals.add(cont.nonterminal) self._grammar.rules.add(cont.rule) cont.used = True return self._items[term].nonterminal
Get nonterminal rewritable to term. If the rules is not in the grammar, nonterminal and rule rewritable to terminal are add into grammar. :param term: Term for which get the nonterminal. :return: ChomskyTermNonterminal class for terminal.
def _build_url(*args, **kwargs) -> str: resource_url = API_RESOURCES_URLS for key in args: resource_url = resource_url[key] if kwargs: resource_url = resource_url.format(**kwargs) return urljoin(URL, resource_url)
Return a valid url.
def _get(url:str, headers:dict) -> dict: response = requests.get(url, headers=headers) data = response.json() if response.status_code != 200: raise GoogleApiError({"status_code": response.status_code, "error": data.get("error", "")}) return data
Make a GET call.
def _post(url:str, params:dict, headers:dict) -> dict: response = requests.post(url, params=params, headers=headers) data = response.json() if response.status_code != 200 or "error" in data: raise GoogleApiError({"status_code": response.status_code, "error": data.get("error", "")}) return data
Make a POST call.