repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
econ-ark/HARK
HARK/cAndCwithStickyE/StickyEtools.py
https://github.com/econ-ark/HARK/blob/3d184153a189e618a87c9540df1cd12044039cc5/HARK/cAndCwithStickyE/StickyEtools.py#L46-L196
def makeStickyEdataFile(Economy,ignore_periods,description='',filename=None,save_data=False,calc_micro_stats=True,meas_err_base=None): ''' Makes descriptive statistics and macroeconomic data file. Behaves slightly differently for heterogeneous agents vs representative agent models. Parameters ---------- Economy : Market or AgentType A representation of the model economy. For heterogeneous agents specifications, this will be an instance of a subclass of Market. For representative agent specifications, this will be an instance of an AgentType subclass. ignore_periods : int Number of periods at the start of the simulation to throw out. description : str Description of the economy that is prepended on the output string. filename : str Name of the output log file, if any; .txt will be appended automatically. save_data : bool When True, save simulation data to filename + 'Data.txt' for use in Stata. calc_micro_stats : bool When True, calculate microeconomic statistics like in Table 2 of the paper draft. meas_err_base : float or None Base value of measurement error standard deviation, which will be adjusted. When None (default), value is calculated as stdev(DeltaLogC). Returns ------- None ''' # Extract time series data from the economy if hasattr(Economy,'agents'): # If this is a heterogeneous agent specification... if len(Economy.agents) > 1: pLvlAll_hist = np.concatenate([this_type.pLvlTrue_hist for this_type in Economy.agents],axis=1) aLvlAll_hist = np.concatenate([this_type.aLvlNow_hist for this_type in Economy.agents],axis=1) cLvlAll_hist = np.concatenate([this_type.cLvlNow_hist for this_type in Economy.agents],axis=1) yLvlAll_hist = np.concatenate([this_type.yLvlNow_hist for this_type in Economy.agents],axis=1) else: # Don't duplicate the data unless necessary (with one type, concatenating is useless) pLvlAll_hist = Economy.agents[0].pLvlTrue_hist aLvlAll_hist = Economy.agents[0].aLvlNow_hist cLvlAll_hist = Economy.agents[0].cLvlNow_hist yLvlAll_hist = Economy.agents[0].yLvlNow_hist # PermShkAggHist needs to be shifted one period forward PlvlAgg_hist = np.cumprod(np.concatenate(([1.0],Economy.PermShkAggHist[:-1]),axis=0)) AlvlAgg_hist = np.mean(aLvlAll_hist,axis=1) # Level of aggregate assets AnrmAgg_hist = AlvlAgg_hist/PlvlAgg_hist # Normalized level of aggregate assets ClvlAgg_hist = np.mean(cLvlAll_hist,axis=1) # Level of aggregate consumption CnrmAgg_hist = ClvlAgg_hist/PlvlAgg_hist # Normalized level of aggregate consumption YlvlAgg_hist = np.mean(yLvlAll_hist,axis=1) # Level of aggregate income YnrmAgg_hist = YlvlAgg_hist/PlvlAgg_hist # Normalized level of aggregate income if calc_micro_stats: # Only calculate stats if requested. This is a memory hog with many simulated periods micro_stat_periods = int((Economy.agents[0].T_sim-ignore_periods)*0.1) not_newborns = (np.concatenate([this_type.t_age_hist[(ignore_periods+1):(ignore_periods+micro_stat_periods),:] for this_type in Economy.agents],axis=1) > 1).flatten() Logc = np.log(cLvlAll_hist[ignore_periods:(ignore_periods+micro_stat_periods),:]) DeltaLogc = (Logc[1:] - Logc[0:-1]).flatten() DeltaLogc_trimmed = DeltaLogc[not_newborns] Loga = np.log(aLvlAll_hist[ignore_periods:(ignore_periods+micro_stat_periods),:]) DeltaLoga = (Loga[1:] - Loga[0:-1]).flatten() DeltaLoga_trimmed = DeltaLoga[not_newborns] Logp = np.log(pLvlAll_hist[ignore_periods:(ignore_periods+micro_stat_periods),:]) DeltaLogp = (Logp[1:] - Logp[0:-1]).flatten() DeltaLogp_trimmed = DeltaLogp[not_newborns] Logy = np.log(yLvlAll_hist[ignore_periods:(ignore_periods+micro_stat_periods),:]) Logy_trimmed = Logy Logy_trimmed[np.isinf(Logy)] = np.nan birth_events = np.concatenate([this_type.t_age_hist == 1 for this_type in Economy.agents],axis=1) vBirth = calcValueAtBirth(cLvlAll_hist[ignore_periods:,:],birth_events[ignore_periods:,:],PlvlAgg_hist[ignore_periods:],Economy.MrkvNow_hist[ignore_periods:],Economy.agents[0].DiscFac,Economy.agents[0].CRRA) BigTheta_hist = Economy.TranShkAggHist if hasattr(Economy,'MrkvNow'): Mrkv_hist = Economy.MrkvNow_hist if ~hasattr(Economy,'Rfree'): # If this is a markov DSGE specification... # Find the expected interest rate - approximate by assuming growth = expected growth ExpectedGrowth_hist = Economy.PermGroFacAgg[Mrkv_hist] ExpectedKLRatio_hist = AnrmAgg_hist/ExpectedGrowth_hist ExpectedR_hist = Economy.Rfunc(ExpectedKLRatio_hist) else: # If this is a representative agent specification... PlvlAgg_hist = Economy.pLvlTrue_hist.flatten() ClvlAgg_hist = Economy.cLvlNow_hist.flatten() CnrmAgg_hist = ClvlAgg_hist/PlvlAgg_hist.flatten() YnrmAgg_hist = Economy.yNrmTrue_hist.flatten() YlvlAgg_hist = YnrmAgg_hist*PlvlAgg_hist.flatten() AlvlAgg_hist = Economy.aLvlNow_hist.flatten() AnrmAgg_hist = AlvlAgg_hist/PlvlAgg_hist.flatten() BigTheta_hist = Economy.TranShkNow_hist.flatten() if hasattr(Economy,'MrkvNow'): Mrkv_hist = Economy.MrkvNow_hist # Process aggregate data into forms used by regressions LogC = np.log(ClvlAgg_hist[ignore_periods:]) LogA = np.log(AlvlAgg_hist[ignore_periods:]) LogY = np.log(YlvlAgg_hist[ignore_periods:]) DeltaLogC = LogC[1:] - LogC[0:-1] DeltaLogA = LogA[1:] - LogA[0:-1] DeltaLogY = LogY[1:] - LogY[0:-1] A = AnrmAgg_hist[(ignore_periods+1):] # This is a relabeling for the regression code BigTheta = BigTheta_hist[(ignore_periods+1):] if hasattr(Economy,'MrkvNow'): Mrkv = Mrkv_hist[(ignore_periods+1):] # This is a relabeling for the regression code if ~hasattr(Economy,'Rfree') and hasattr(Economy,'agents'): # If this is a markov DSGE specification... R = ExpectedR_hist[(ignore_periods+1):] Delta8LogC = (np.log(ClvlAgg_hist[8:]) - np.log(ClvlAgg_hist[:-8]))[(ignore_periods-7):] Delta8LogY = (np.log(YlvlAgg_hist[8:]) - np.log(YlvlAgg_hist[:-8]))[(ignore_periods-7):] # Add measurement error to LogC if meas_err_base is None: meas_err_base = np.std(DeltaLogC) sigma_meas_err = meas_err_base*0.375 # This approximately matches the change in IV vs OLS in U.S. empirical coefficients np.random.seed(10) Measurement_Error = sigma_meas_err*np.random.normal(0.,1.,LogC.size) LogC_me = LogC + Measurement_Error DeltaLogC_me = LogC_me[1:] - LogC_me[0:-1] # Apply measurement error to long delta LogC LogC_long = np.log(ClvlAgg_hist) LogC_long_me = LogC_long + sigma_meas_err*np.random.normal(0.,1.,LogC_long.size) Delta8LogC_me = (LogC_long_me[8:] - LogC_long_me[:-8])[(ignore_periods-7):] # Make summary statistics for the results file csv_output_string = str(np.mean(AnrmAgg_hist[ignore_periods:])) +","+ str(np.mean(CnrmAgg_hist[ignore_periods:]))+ ","+str(np.std(np.log(AnrmAgg_hist[ignore_periods:])))+ ","+str(np.std(DeltaLogC))+ ","+str(np.std(DeltaLogY)) +","+ str(np.std(DeltaLogA)) if hasattr(Economy,'agents') and calc_micro_stats: # This block only runs for heterogeneous agents specifications csv_output_string += ","+str(np.mean(np.std(Loga,axis=1)))+ ","+str(np.mean(np.std(Logc,axis=1))) + ","+str(np.mean(np.std(Logp,axis=1))) +","+ str(np.mean(np.nanstd(Logy_trimmed,axis=1))) +","+ str(np.std(DeltaLoga_trimmed))+","+ str(np.std(DeltaLogc_trimmed))+ ","+str(np.std(DeltaLogp_trimmed)) # Save the results to a logfile if requested if filename is not None: with open(results_dir + filename + 'Results.csv','w') as f: f.write(csv_output_string) f.close() if calc_micro_stats and hasattr(Economy,'agents'): with open(results_dir + filename + 'BirthValue.csv','w') as f: my_writer = csv.writer(f, delimiter = ',') my_writer.writerow(vBirth) f.close() if save_data: DataArray = (np.vstack((np.arange(DeltaLogC.size),DeltaLogC_me,DeltaLogC,DeltaLogY,A,BigTheta,Delta8LogC,Delta8LogY,Delta8LogC_me,Measurement_Error[1:]))).transpose() VarNames = ['time_period','DeltaLogC_me','DeltaLogC','DeltaLogY','A','BigTheta','Delta8LogC','Delta8LogY','Delta8LogC_me','Measurement_Error'] if hasattr(Economy,'MrkvNow'): DataArray = np.hstack((DataArray,np.reshape(Mrkv,(Mrkv.size,1)))) VarNames.append('MrkvState') if hasattr(Economy,'MrkvNow') & ~hasattr(Economy,'Rfree') and hasattr(Economy,'agents'): DataArray = np.hstack((DataArray,np.reshape(R,(R.size,1)))) VarNames.append('R') with open(results_dir + filename + 'Data.txt','w') as f: my_writer = csv.writer(f, delimiter = '\t') my_writer.writerow(VarNames) for i in range(DataArray.shape[0]): my_writer.writerow(DataArray[i,:]) f.close()
[ "def", "makeStickyEdataFile", "(", "Economy", ",", "ignore_periods", ",", "description", "=", "''", ",", "filename", "=", "None", ",", "save_data", "=", "False", ",", "calc_micro_stats", "=", "True", ",", "meas_err_base", "=", "None", ")", ":", "# Extract time series data from the economy", "if", "hasattr", "(", "Economy", ",", "'agents'", ")", ":", "# If this is a heterogeneous agent specification...", "if", "len", "(", "Economy", ".", "agents", ")", ">", "1", ":", "pLvlAll_hist", "=", "np", ".", "concatenate", "(", "[", "this_type", ".", "pLvlTrue_hist", "for", "this_type", "in", "Economy", ".", "agents", "]", ",", "axis", "=", "1", ")", "aLvlAll_hist", "=", "np", ".", "concatenate", "(", "[", "this_type", ".", "aLvlNow_hist", "for", "this_type", "in", "Economy", ".", "agents", "]", ",", "axis", "=", "1", ")", "cLvlAll_hist", "=", "np", ".", "concatenate", "(", "[", "this_type", ".", "cLvlNow_hist", "for", "this_type", "in", "Economy", ".", "agents", "]", ",", "axis", "=", "1", ")", "yLvlAll_hist", "=", "np", ".", "concatenate", "(", "[", "this_type", ".", "yLvlNow_hist", "for", "this_type", "in", "Economy", ".", "agents", "]", ",", "axis", "=", "1", ")", "else", ":", "# Don't duplicate the data unless necessary (with one type, concatenating is useless)", "pLvlAll_hist", "=", "Economy", ".", "agents", "[", "0", "]", ".", "pLvlTrue_hist", "aLvlAll_hist", "=", "Economy", ".", "agents", "[", "0", "]", ".", "aLvlNow_hist", "cLvlAll_hist", "=", "Economy", ".", "agents", "[", "0", "]", ".", "cLvlNow_hist", "yLvlAll_hist", "=", "Economy", ".", "agents", "[", "0", "]", ".", "yLvlNow_hist", "# PermShkAggHist needs to be shifted one period forward", "PlvlAgg_hist", "=", "np", ".", "cumprod", "(", "np", ".", "concatenate", "(", "(", "[", "1.0", "]", ",", "Economy", ".", "PermShkAggHist", "[", ":", "-", "1", "]", ")", ",", "axis", "=", "0", ")", ")", "AlvlAgg_hist", "=", "np", ".", "mean", "(", "aLvlAll_hist", ",", "axis", "=", "1", ")", "# Level of aggregate assets", "AnrmAgg_hist", "=", "AlvlAgg_hist", "/", "PlvlAgg_hist", "# Normalized level of aggregate assets", "ClvlAgg_hist", "=", "np", ".", "mean", "(", "cLvlAll_hist", ",", "axis", "=", "1", ")", "# Level of aggregate consumption", "CnrmAgg_hist", "=", "ClvlAgg_hist", "/", "PlvlAgg_hist", "# Normalized level of aggregate consumption", "YlvlAgg_hist", "=", "np", ".", "mean", "(", "yLvlAll_hist", ",", "axis", "=", "1", ")", "# Level of aggregate income", "YnrmAgg_hist", "=", "YlvlAgg_hist", "/", "PlvlAgg_hist", "# Normalized level of aggregate income", "if", "calc_micro_stats", ":", "# Only calculate stats if requested. This is a memory hog with many simulated periods", "micro_stat_periods", "=", "int", "(", "(", "Economy", ".", "agents", "[", "0", "]", ".", "T_sim", "-", "ignore_periods", ")", "*", "0.1", ")", "not_newborns", "=", "(", "np", ".", "concatenate", "(", "[", "this_type", ".", "t_age_hist", "[", "(", "ignore_periods", "+", "1", ")", ":", "(", "ignore_periods", "+", "micro_stat_periods", ")", ",", ":", "]", "for", "this_type", "in", "Economy", ".", "agents", "]", ",", "axis", "=", "1", ")", ">", "1", ")", ".", "flatten", "(", ")", "Logc", "=", "np", ".", "log", "(", "cLvlAll_hist", "[", "ignore_periods", ":", "(", "ignore_periods", "+", "micro_stat_periods", ")", ",", ":", "]", ")", "DeltaLogc", "=", "(", "Logc", "[", "1", ":", "]", "-", "Logc", "[", "0", ":", "-", "1", "]", ")", ".", "flatten", "(", ")", "DeltaLogc_trimmed", "=", "DeltaLogc", "[", "not_newborns", "]", "Loga", "=", "np", ".", "log", "(", "aLvlAll_hist", "[", "ignore_periods", ":", "(", "ignore_periods", "+", "micro_stat_periods", ")", ",", ":", "]", ")", "DeltaLoga", "=", "(", "Loga", "[", "1", ":", "]", "-", "Loga", "[", "0", ":", "-", "1", "]", ")", ".", "flatten", "(", ")", "DeltaLoga_trimmed", "=", "DeltaLoga", "[", "not_newborns", "]", "Logp", "=", "np", ".", "log", "(", "pLvlAll_hist", "[", "ignore_periods", ":", "(", "ignore_periods", "+", "micro_stat_periods", ")", ",", ":", "]", ")", "DeltaLogp", "=", "(", "Logp", "[", "1", ":", "]", "-", "Logp", "[", "0", ":", "-", "1", "]", ")", ".", "flatten", "(", ")", "DeltaLogp_trimmed", "=", "DeltaLogp", "[", "not_newborns", "]", "Logy", "=", "np", ".", "log", "(", "yLvlAll_hist", "[", "ignore_periods", ":", "(", "ignore_periods", "+", "micro_stat_periods", ")", ",", ":", "]", ")", "Logy_trimmed", "=", "Logy", "Logy_trimmed", "[", "np", ".", "isinf", "(", "Logy", ")", "]", "=", "np", ".", "nan", "birth_events", "=", "np", ".", "concatenate", "(", "[", "this_type", ".", "t_age_hist", "==", "1", "for", "this_type", "in", "Economy", ".", "agents", "]", ",", "axis", "=", "1", ")", "vBirth", "=", "calcValueAtBirth", "(", "cLvlAll_hist", "[", "ignore_periods", ":", ",", ":", "]", ",", "birth_events", "[", "ignore_periods", ":", ",", ":", "]", ",", "PlvlAgg_hist", "[", "ignore_periods", ":", "]", ",", "Economy", ".", "MrkvNow_hist", "[", "ignore_periods", ":", "]", ",", "Economy", ".", "agents", "[", "0", "]", ".", "DiscFac", ",", "Economy", ".", "agents", "[", "0", "]", ".", "CRRA", ")", "BigTheta_hist", "=", "Economy", ".", "TranShkAggHist", "if", "hasattr", "(", "Economy", ",", "'MrkvNow'", ")", ":", "Mrkv_hist", "=", "Economy", ".", "MrkvNow_hist", "if", "~", "hasattr", "(", "Economy", ",", "'Rfree'", ")", ":", "# If this is a markov DSGE specification...", "# Find the expected interest rate - approximate by assuming growth = expected growth", "ExpectedGrowth_hist", "=", "Economy", ".", "PermGroFacAgg", "[", "Mrkv_hist", "]", "ExpectedKLRatio_hist", "=", "AnrmAgg_hist", "/", "ExpectedGrowth_hist", "ExpectedR_hist", "=", "Economy", ".", "Rfunc", "(", "ExpectedKLRatio_hist", ")", "else", ":", "# If this is a representative agent specification...", "PlvlAgg_hist", "=", "Economy", ".", "pLvlTrue_hist", ".", "flatten", "(", ")", "ClvlAgg_hist", "=", "Economy", ".", "cLvlNow_hist", ".", "flatten", "(", ")", "CnrmAgg_hist", "=", "ClvlAgg_hist", "/", "PlvlAgg_hist", ".", "flatten", "(", ")", "YnrmAgg_hist", "=", "Economy", ".", "yNrmTrue_hist", ".", "flatten", "(", ")", "YlvlAgg_hist", "=", "YnrmAgg_hist", "*", "PlvlAgg_hist", ".", "flatten", "(", ")", "AlvlAgg_hist", "=", "Economy", ".", "aLvlNow_hist", ".", "flatten", "(", ")", "AnrmAgg_hist", "=", "AlvlAgg_hist", "/", "PlvlAgg_hist", ".", "flatten", "(", ")", "BigTheta_hist", "=", "Economy", ".", "TranShkNow_hist", ".", "flatten", "(", ")", "if", "hasattr", "(", "Economy", ",", "'MrkvNow'", ")", ":", "Mrkv_hist", "=", "Economy", ".", "MrkvNow_hist", "# Process aggregate data into forms used by regressions", "LogC", "=", "np", ".", "log", "(", "ClvlAgg_hist", "[", "ignore_periods", ":", "]", ")", "LogA", "=", "np", ".", "log", "(", "AlvlAgg_hist", "[", "ignore_periods", ":", "]", ")", "LogY", "=", "np", ".", "log", "(", "YlvlAgg_hist", "[", "ignore_periods", ":", "]", ")", "DeltaLogC", "=", "LogC", "[", "1", ":", "]", "-", "LogC", "[", "0", ":", "-", "1", "]", "DeltaLogA", "=", "LogA", "[", "1", ":", "]", "-", "LogA", "[", "0", ":", "-", "1", "]", "DeltaLogY", "=", "LogY", "[", "1", ":", "]", "-", "LogY", "[", "0", ":", "-", "1", "]", "A", "=", "AnrmAgg_hist", "[", "(", "ignore_periods", "+", "1", ")", ":", "]", "# This is a relabeling for the regression code", "BigTheta", "=", "BigTheta_hist", "[", "(", "ignore_periods", "+", "1", ")", ":", "]", "if", "hasattr", "(", "Economy", ",", "'MrkvNow'", ")", ":", "Mrkv", "=", "Mrkv_hist", "[", "(", "ignore_periods", "+", "1", ")", ":", "]", "# This is a relabeling for the regression code", "if", "~", "hasattr", "(", "Economy", ",", "'Rfree'", ")", "and", "hasattr", "(", "Economy", ",", "'agents'", ")", ":", "# If this is a markov DSGE specification...", "R", "=", "ExpectedR_hist", "[", "(", "ignore_periods", "+", "1", ")", ":", "]", "Delta8LogC", "=", "(", "np", ".", "log", "(", "ClvlAgg_hist", "[", "8", ":", "]", ")", "-", "np", ".", "log", "(", "ClvlAgg_hist", "[", ":", "-", "8", "]", ")", ")", "[", "(", "ignore_periods", "-", "7", ")", ":", "]", "Delta8LogY", "=", "(", "np", ".", "log", "(", "YlvlAgg_hist", "[", "8", ":", "]", ")", "-", "np", ".", "log", "(", "YlvlAgg_hist", "[", ":", "-", "8", "]", ")", ")", "[", "(", "ignore_periods", "-", "7", ")", ":", "]", "# Add measurement error to LogC", "if", "meas_err_base", "is", "None", ":", "meas_err_base", "=", "np", ".", "std", "(", "DeltaLogC", ")", "sigma_meas_err", "=", "meas_err_base", "*", "0.375", "# This approximately matches the change in IV vs OLS in U.S. empirical coefficients", "np", ".", "random", ".", "seed", "(", "10", ")", "Measurement_Error", "=", "sigma_meas_err", "*", "np", ".", "random", ".", "normal", "(", "0.", ",", "1.", ",", "LogC", ".", "size", ")", "LogC_me", "=", "LogC", "+", "Measurement_Error", "DeltaLogC_me", "=", "LogC_me", "[", "1", ":", "]", "-", "LogC_me", "[", "0", ":", "-", "1", "]", "# Apply measurement error to long delta LogC", "LogC_long", "=", "np", ".", "log", "(", "ClvlAgg_hist", ")", "LogC_long_me", "=", "LogC_long", "+", "sigma_meas_err", "*", "np", ".", "random", ".", "normal", "(", "0.", ",", "1.", ",", "LogC_long", ".", "size", ")", "Delta8LogC_me", "=", "(", "LogC_long_me", "[", "8", ":", "]", "-", "LogC_long_me", "[", ":", "-", "8", "]", ")", "[", "(", "ignore_periods", "-", "7", ")", ":", "]", "# Make summary statistics for the results file", "csv_output_string", "=", "str", "(", "np", ".", "mean", "(", "AnrmAgg_hist", "[", "ignore_periods", ":", "]", ")", ")", "+", "\",\"", "+", "str", "(", "np", ".", "mean", "(", "CnrmAgg_hist", "[", "ignore_periods", ":", "]", ")", ")", "+", "\",\"", "+", "str", "(", "np", ".", "std", "(", "np", ".", "log", "(", "AnrmAgg_hist", "[", "ignore_periods", ":", "]", ")", ")", ")", "+", "\",\"", "+", "str", "(", "np", ".", "std", "(", "DeltaLogC", ")", ")", "+", "\",\"", "+", "str", "(", "np", ".", "std", "(", "DeltaLogY", ")", ")", "+", "\",\"", "+", "str", "(", "np", ".", "std", "(", "DeltaLogA", ")", ")", "if", "hasattr", "(", "Economy", ",", "'agents'", ")", "and", "calc_micro_stats", ":", "# This block only runs for heterogeneous agents specifications", "csv_output_string", "+=", "\",\"", "+", "str", "(", "np", ".", "mean", "(", "np", ".", "std", "(", "Loga", ",", "axis", "=", "1", ")", ")", ")", "+", "\",\"", "+", "str", "(", "np", ".", "mean", "(", "np", ".", "std", "(", "Logc", ",", "axis", "=", "1", ")", ")", ")", "+", "\",\"", "+", "str", "(", "np", ".", "mean", "(", "np", ".", "std", "(", "Logp", ",", "axis", "=", "1", ")", ")", ")", "+", "\",\"", "+", "str", "(", "np", ".", "mean", "(", "np", ".", "nanstd", "(", "Logy_trimmed", ",", "axis", "=", "1", ")", ")", ")", "+", "\",\"", "+", "str", "(", "np", ".", "std", "(", "DeltaLoga_trimmed", ")", ")", "+", "\",\"", "+", "str", "(", "np", ".", "std", "(", "DeltaLogc_trimmed", ")", ")", "+", "\",\"", "+", "str", "(", "np", ".", "std", "(", "DeltaLogp_trimmed", ")", ")", "# Save the results to a logfile if requested", "if", "filename", "is", "not", "None", ":", "with", "open", "(", "results_dir", "+", "filename", "+", "'Results.csv'", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "csv_output_string", ")", "f", ".", "close", "(", ")", "if", "calc_micro_stats", "and", "hasattr", "(", "Economy", ",", "'agents'", ")", ":", "with", "open", "(", "results_dir", "+", "filename", "+", "'BirthValue.csv'", ",", "'w'", ")", "as", "f", ":", "my_writer", "=", "csv", ".", "writer", "(", "f", ",", "delimiter", "=", "','", ")", "my_writer", ".", "writerow", "(", "vBirth", ")", "f", ".", "close", "(", ")", "if", "save_data", ":", "DataArray", "=", "(", "np", ".", "vstack", "(", "(", "np", ".", "arange", "(", "DeltaLogC", ".", "size", ")", ",", "DeltaLogC_me", ",", "DeltaLogC", ",", "DeltaLogY", ",", "A", ",", "BigTheta", ",", "Delta8LogC", ",", "Delta8LogY", ",", "Delta8LogC_me", ",", "Measurement_Error", "[", "1", ":", "]", ")", ")", ")", ".", "transpose", "(", ")", "VarNames", "=", "[", "'time_period'", ",", "'DeltaLogC_me'", ",", "'DeltaLogC'", ",", "'DeltaLogY'", ",", "'A'", ",", "'BigTheta'", ",", "'Delta8LogC'", ",", "'Delta8LogY'", ",", "'Delta8LogC_me'", ",", "'Measurement_Error'", "]", "if", "hasattr", "(", "Economy", ",", "'MrkvNow'", ")", ":", "DataArray", "=", "np", ".", "hstack", "(", "(", "DataArray", ",", "np", ".", "reshape", "(", "Mrkv", ",", "(", "Mrkv", ".", "size", ",", "1", ")", ")", ")", ")", "VarNames", ".", "append", "(", "'MrkvState'", ")", "if", "hasattr", "(", "Economy", ",", "'MrkvNow'", ")", "&", "~", "hasattr", "(", "Economy", ",", "'Rfree'", ")", "and", "hasattr", "(", "Economy", ",", "'agents'", ")", ":", "DataArray", "=", "np", ".", "hstack", "(", "(", "DataArray", ",", "np", ".", "reshape", "(", "R", ",", "(", "R", ".", "size", ",", "1", ")", ")", ")", ")", "VarNames", ".", "append", "(", "'R'", ")", "with", "open", "(", "results_dir", "+", "filename", "+", "'Data.txt'", ",", "'w'", ")", "as", "f", ":", "my_writer", "=", "csv", ".", "writer", "(", "f", ",", "delimiter", "=", "'\\t'", ")", "my_writer", ".", "writerow", "(", "VarNames", ")", "for", "i", "in", "range", "(", "DataArray", ".", "shape", "[", "0", "]", ")", ":", "my_writer", ".", "writerow", "(", "DataArray", "[", "i", ",", ":", "]", ")", "f", ".", "close", "(", ")" ]
Makes descriptive statistics and macroeconomic data file. Behaves slightly differently for heterogeneous agents vs representative agent models. Parameters ---------- Economy : Market or AgentType A representation of the model economy. For heterogeneous agents specifications, this will be an instance of a subclass of Market. For representative agent specifications, this will be an instance of an AgentType subclass. ignore_periods : int Number of periods at the start of the simulation to throw out. description : str Description of the economy that is prepended on the output string. filename : str Name of the output log file, if any; .txt will be appended automatically. save_data : bool When True, save simulation data to filename + 'Data.txt' for use in Stata. calc_micro_stats : bool When True, calculate microeconomic statistics like in Table 2 of the paper draft. meas_err_base : float or None Base value of measurement error standard deviation, which will be adjusted. When None (default), value is calculated as stdev(DeltaLogC). Returns ------- None
[ "Makes", "descriptive", "statistics", "and", "macroeconomic", "data", "file", ".", "Behaves", "slightly", "differently", "for", "heterogeneous", "agents", "vs", "representative", "agent", "models", "." ]
python
train
61.695364
portfors-lab/sparkle
sparkle/gui/stim/dynamic_stacker.py
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/stim/dynamic_stacker.py#L7-L16
def widgetForName(self, name): """Gets a widget with *name* :param name: the widgets in this container should all have a name() method. This is the string to match to that result :type name: str """ for iwidget in range(len(self)): if self.widget(iwidget).name() == name: return self.widget(iwidget)
[ "def", "widgetForName", "(", "self", ",", "name", ")", ":", "for", "iwidget", "in", "range", "(", "len", "(", "self", ")", ")", ":", "if", "self", ".", "widget", "(", "iwidget", ")", ".", "name", "(", ")", "==", "name", ":", "return", "self", ".", "widget", "(", "iwidget", ")" ]
Gets a widget with *name* :param name: the widgets in this container should all have a name() method. This is the string to match to that result :type name: str
[ "Gets", "a", "widget", "with", "*", "name", "*" ]
python
train
36.8
newville/wxmplot
examples/floatcontrol.py
https://github.com/newville/wxmplot/blob/8e0dc037453e5cdf18c968dc5a3d29efd761edee/examples/floatcontrol.py#L135-L149
def SetValue(self, value=None, act=True): " main method to set value " if value is None: value = wx.TextCtrl.GetValue(self).strip() self.__CheckValid(value) self.__GetMark() if value is not None: wx.TextCtrl.SetValue(self, self.format % set_float(value)) if self.is_valid and hasattr(self.__action, '__call__') and act: self.__action(value=self.__val) elif not self.is_valid and self.bell_on_invalid: wx.Bell() self.__SetMark()
[ "def", "SetValue", "(", "self", ",", "value", "=", "None", ",", "act", "=", "True", ")", ":", "if", "value", "is", "None", ":", "value", "=", "wx", ".", "TextCtrl", ".", "GetValue", "(", "self", ")", ".", "strip", "(", ")", "self", ".", "__CheckValid", "(", "value", ")", "self", ".", "__GetMark", "(", ")", "if", "value", "is", "not", "None", ":", "wx", ".", "TextCtrl", ".", "SetValue", "(", "self", ",", "self", ".", "format", "%", "set_float", "(", "value", ")", ")", "if", "self", ".", "is_valid", "and", "hasattr", "(", "self", ".", "__action", ",", "'__call__'", ")", "and", "act", ":", "self", ".", "__action", "(", "value", "=", "self", ".", "__val", ")", "elif", "not", "self", ".", "is_valid", "and", "self", ".", "bell_on_invalid", ":", "wx", ".", "Bell", "(", ")", "self", ".", "__SetMark", "(", ")" ]
main method to set value
[ "main", "method", "to", "set", "value" ]
python
train
35.133333
juju/charm-helpers
charmhelpers/contrib/hahelpers/cluster.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/hahelpers/cluster.py#L189-L195
def peer_ips(peer_relation='cluster', addr_key='private-address'): '''Return a dict of peers and their private-address''' peers = {} for r_id in relation_ids(peer_relation): for unit in relation_list(r_id): peers[unit] = relation_get(addr_key, rid=r_id, unit=unit) return peers
[ "def", "peer_ips", "(", "peer_relation", "=", "'cluster'", ",", "addr_key", "=", "'private-address'", ")", ":", "peers", "=", "{", "}", "for", "r_id", "in", "relation_ids", "(", "peer_relation", ")", ":", "for", "unit", "in", "relation_list", "(", "r_id", ")", ":", "peers", "[", "unit", "]", "=", "relation_get", "(", "addr_key", ",", "rid", "=", "r_id", ",", "unit", "=", "unit", ")", "return", "peers" ]
Return a dict of peers and their private-address
[ "Return", "a", "dict", "of", "peers", "and", "their", "private", "-", "address" ]
python
train
43.857143
ScienceLogic/amiuploader
amiimporter/amiupload.py
https://github.com/ScienceLogic/amiuploader/blob/c36c247b2226107b38571cbc6119118b1fe07182/amiimporter/amiupload.py#L69-L77
def vmdk_to_ami(args): """ Calls methods to perform vmdk import :param args: :return: """ aws_importer = AWSUtilities.AWSUtils(args.directory, args.aws_profile, args.s3_bucket, args.aws_regions, args.ami_name, args.vmdk_upload_file) aws_importer.import_vmdk()
[ "def", "vmdk_to_ami", "(", "args", ")", ":", "aws_importer", "=", "AWSUtilities", ".", "AWSUtils", "(", "args", ".", "directory", ",", "args", ".", "aws_profile", ",", "args", ".", "s3_bucket", ",", "args", ".", "aws_regions", ",", "args", ".", "ami_name", ",", "args", ".", "vmdk_upload_file", ")", "aws_importer", ".", "import_vmdk", "(", ")" ]
Calls methods to perform vmdk import :param args: :return:
[ "Calls", "methods", "to", "perform", "vmdk", "import", ":", "param", "args", ":", ":", "return", ":" ]
python
train
35.555556
NORDUnet/python-norduniclient
norduniclient/core.py
https://github.com/NORDUnet/python-norduniclient/blob/ee5084a6f45caac614b4fda4a023749ca52f786c/norduniclient/core.py#L655-L671
def create_relationship(manager, handle_id, other_handle_id, rel_type): """ Makes a relationship from node to other_node depending on which meta_type the nodes are. Returns the relationship or raises NoRelationshipPossible exception. """ meta_type = get_node_meta_type(manager, handle_id) if meta_type == 'Location': return create_location_relationship(manager, handle_id, other_handle_id, rel_type) elif meta_type == 'Logical': return create_logical_relationship(manager, handle_id, other_handle_id, rel_type) elif meta_type == 'Relation': return create_relation_relationship(manager, handle_id, other_handle_id, rel_type) elif meta_type == 'Physical': return create_physical_relationship(manager, handle_id, other_handle_id, rel_type) other_meta_type = get_node_meta_type(manager, other_handle_id) raise exceptions.NoRelationshipPossible(handle_id, meta_type, other_handle_id, other_meta_type, rel_type)
[ "def", "create_relationship", "(", "manager", ",", "handle_id", ",", "other_handle_id", ",", "rel_type", ")", ":", "meta_type", "=", "get_node_meta_type", "(", "manager", ",", "handle_id", ")", "if", "meta_type", "==", "'Location'", ":", "return", "create_location_relationship", "(", "manager", ",", "handle_id", ",", "other_handle_id", ",", "rel_type", ")", "elif", "meta_type", "==", "'Logical'", ":", "return", "create_logical_relationship", "(", "manager", ",", "handle_id", ",", "other_handle_id", ",", "rel_type", ")", "elif", "meta_type", "==", "'Relation'", ":", "return", "create_relation_relationship", "(", "manager", ",", "handle_id", ",", "other_handle_id", ",", "rel_type", ")", "elif", "meta_type", "==", "'Physical'", ":", "return", "create_physical_relationship", "(", "manager", ",", "handle_id", ",", "other_handle_id", ",", "rel_type", ")", "other_meta_type", "=", "get_node_meta_type", "(", "manager", ",", "other_handle_id", ")", "raise", "exceptions", ".", "NoRelationshipPossible", "(", "handle_id", ",", "meta_type", ",", "other_handle_id", ",", "other_meta_type", ",", "rel_type", ")" ]
Makes a relationship from node to other_node depending on which meta_type the nodes are. Returns the relationship or raises NoRelationshipPossible exception.
[ "Makes", "a", "relationship", "from", "node", "to", "other_node", "depending", "on", "which", "meta_type", "the", "nodes", "are", ".", "Returns", "the", "relationship", "or", "raises", "NoRelationshipPossible", "exception", "." ]
python
train
57
mfcloud/python-zvm-sdk
smtLayer/vmUtils.py
https://github.com/mfcloud/python-zvm-sdk/blob/de9994ceca764f5460ce51bd74237986341d8e3c/smtLayer/vmUtils.py#L724-L779
def isLoggedOn(rh, userid): """ Determine whether a virtual machine is logged on. Input: Request Handle: userid being queried Output: Dictionary containing the following: overallRC - overall return code, 0: success, non-zero: failure rc - 0: if we got status. Otherwise, it is the error return code from the commands issued. rs - Based on rc value. For rc==0, rs is: 0: if we determined it is logged on. 1: if we determined it is logged off. """ rh.printSysLog("Enter vmUtils.isLoggedOn, userid: " + userid) results = { 'overallRC': 0, 'rc': 0, 'rs': 0, } cmd = ["sudo", "/sbin/vmcp", "query", "user", userid] strCmd = ' '.join(cmd) rh.printSysLog("Invoking: " + strCmd) try: subprocess.check_output( cmd, close_fds=True, stderr=subprocess.STDOUT) except CalledProcessError as e: search_pattern = '(^HCP\w\w\w045E|^HCP\w\w\w361E)'.encode() match = re.search(search_pattern, e.output) if match: # Not logged on results['rs'] = 1 else: # Abnormal failure rh.printLn("ES", msgs.msg['0415'][1] % (modId, strCmd, e.returncode, e.output)) results = msgs.msg['0415'][0] results['rs'] = e.returncode except Exception as e: # All other exceptions. results = msgs.msg['0421'][0] rh.printLn("ES", msgs.msg['0421'][1] % (modId, strCmd, type(e).__name__, str(e))) rh.printSysLog("Exit vmUtils.isLoggedOn, overallRC: " + str(results['overallRC']) + " rc: " + str(results['rc']) + " rs: " + str(results['rs'])) return results
[ "def", "isLoggedOn", "(", "rh", ",", "userid", ")", ":", "rh", ".", "printSysLog", "(", "\"Enter vmUtils.isLoggedOn, userid: \"", "+", "userid", ")", "results", "=", "{", "'overallRC'", ":", "0", ",", "'rc'", ":", "0", ",", "'rs'", ":", "0", ",", "}", "cmd", "=", "[", "\"sudo\"", ",", "\"/sbin/vmcp\"", ",", "\"query\"", ",", "\"user\"", ",", "userid", "]", "strCmd", "=", "' '", ".", "join", "(", "cmd", ")", "rh", ".", "printSysLog", "(", "\"Invoking: \"", "+", "strCmd", ")", "try", ":", "subprocess", ".", "check_output", "(", "cmd", ",", "close_fds", "=", "True", ",", "stderr", "=", "subprocess", ".", "STDOUT", ")", "except", "CalledProcessError", "as", "e", ":", "search_pattern", "=", "'(^HCP\\w\\w\\w045E|^HCP\\w\\w\\w361E)'", ".", "encode", "(", ")", "match", "=", "re", ".", "search", "(", "search_pattern", ",", "e", ".", "output", ")", "if", "match", ":", "# Not logged on", "results", "[", "'rs'", "]", "=", "1", "else", ":", "# Abnormal failure", "rh", ".", "printLn", "(", "\"ES\"", ",", "msgs", ".", "msg", "[", "'0415'", "]", "[", "1", "]", "%", "(", "modId", ",", "strCmd", ",", "e", ".", "returncode", ",", "e", ".", "output", ")", ")", "results", "=", "msgs", ".", "msg", "[", "'0415'", "]", "[", "0", "]", "results", "[", "'rs'", "]", "=", "e", ".", "returncode", "except", "Exception", "as", "e", ":", "# All other exceptions.", "results", "=", "msgs", ".", "msg", "[", "'0421'", "]", "[", "0", "]", "rh", ".", "printLn", "(", "\"ES\"", ",", "msgs", ".", "msg", "[", "'0421'", "]", "[", "1", "]", "%", "(", "modId", ",", "strCmd", ",", "type", "(", "e", ")", ".", "__name__", ",", "str", "(", "e", ")", ")", ")", "rh", ".", "printSysLog", "(", "\"Exit vmUtils.isLoggedOn, overallRC: \"", "+", "str", "(", "results", "[", "'overallRC'", "]", ")", "+", "\" rc: \"", "+", "str", "(", "results", "[", "'rc'", "]", ")", "+", "\" rs: \"", "+", "str", "(", "results", "[", "'rs'", "]", ")", ")", "return", "results" ]
Determine whether a virtual machine is logged on. Input: Request Handle: userid being queried Output: Dictionary containing the following: overallRC - overall return code, 0: success, non-zero: failure rc - 0: if we got status. Otherwise, it is the error return code from the commands issued. rs - Based on rc value. For rc==0, rs is: 0: if we determined it is logged on. 1: if we determined it is logged off.
[ "Determine", "whether", "a", "virtual", "machine", "is", "logged", "on", "." ]
python
train
32.678571
cltl/KafNafParserPy
KafNafParserPy/KafNafParserMod.py
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/KafNafParserMod.py#L621-L629
def get_properties(self): """ Returns all the properties of the features layer (iterator) @rtype: L{Cproperty} @return: list of properties """ if self.features_layer is not None: for property in self.features_layer.get_properties(): yield propertyfound_entities.get(mention.string)
[ "def", "get_properties", "(", "self", ")", ":", "if", "self", ".", "features_layer", "is", "not", "None", ":", "for", "property", "in", "self", ".", "features_layer", ".", "get_properties", "(", ")", ":", "yield", "propertyfound_entities", ".", "get", "(", "mention", ".", "string", ")" ]
Returns all the properties of the features layer (iterator) @rtype: L{Cproperty} @return: list of properties
[ "Returns", "all", "the", "properties", "of", "the", "features", "layer", "(", "iterator", ")" ]
python
train
38.777778
tobiasfeistmantl/python-actioncable-zwei
actioncable/connection.py
https://github.com/tobiasfeistmantl/python-actioncable-zwei/blob/04876b4425a295485af8976acceb0b46d2ef1c8d/actioncable/connection.py#L149-L157
def _on_close(self, socket): """ Called when the connection was closed. """ self.logger.debug('Connection closed.') for subscription in self.subscriptions.values(): if subscription.state == 'subscribed': subscription.state = 'connection_pending'
[ "def", "_on_close", "(", "self", ",", "socket", ")", ":", "self", ".", "logger", ".", "debug", "(", "'Connection closed.'", ")", "for", "subscription", "in", "self", ".", "subscriptions", ".", "values", "(", ")", ":", "if", "subscription", ".", "state", "==", "'subscribed'", ":", "subscription", ".", "state", "=", "'connection_pending'" ]
Called when the connection was closed.
[ "Called", "when", "the", "connection", "was", "closed", "." ]
python
train
34
Capitains/MyCapytain
MyCapytain/common/utils/xml.py
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/common/utils/xml.py#L82-L107
def xmlparser(xml, objectify=True): """ Parse xml :param xml: XML element :type xml: Union[text_type, lxml.etree._Element] :rtype: lxml.etree._Element :returns: An element object :raises: TypeError if element is not in accepted type """ doclose = None if isinstance(xml, (etree._Element, ObjectifiedElement, etree._ElementTree)): return xml elif isinstance(xml, text_type): xml = StringIO(xml) doclose = True elif not isinstance(xml, IOBase): raise TypeError("Unsupported type of resource {}".format(type(xml))) if objectify is False: parsed = etree.parse(xml).getroot() else: parsed = parse(xml).getroot() if doclose: xml.close() return parsed
[ "def", "xmlparser", "(", "xml", ",", "objectify", "=", "True", ")", ":", "doclose", "=", "None", "if", "isinstance", "(", "xml", ",", "(", "etree", ".", "_Element", ",", "ObjectifiedElement", ",", "etree", ".", "_ElementTree", ")", ")", ":", "return", "xml", "elif", "isinstance", "(", "xml", ",", "text_type", ")", ":", "xml", "=", "StringIO", "(", "xml", ")", "doclose", "=", "True", "elif", "not", "isinstance", "(", "xml", ",", "IOBase", ")", ":", "raise", "TypeError", "(", "\"Unsupported type of resource {}\"", ".", "format", "(", "type", "(", "xml", ")", ")", ")", "if", "objectify", "is", "False", ":", "parsed", "=", "etree", ".", "parse", "(", "xml", ")", ".", "getroot", "(", ")", "else", ":", "parsed", "=", "parse", "(", "xml", ")", ".", "getroot", "(", ")", "if", "doclose", ":", "xml", ".", "close", "(", ")", "return", "parsed" ]
Parse xml :param xml: XML element :type xml: Union[text_type, lxml.etree._Element] :rtype: lxml.etree._Element :returns: An element object :raises: TypeError if element is not in accepted type
[ "Parse", "xml" ]
python
train
28.384615
square/pylink
pylink/jlink.py
https://github.com/square/pylink/blob/81dda0a191d923a8b2627c52cb778aba24d279d7/pylink/jlink.py#L3545-L3589
def software_breakpoint_set(self, addr, thumb=False, arm=False, flash=False, ram=False): """Sets a software breakpoint at the specified address. If ``thumb`` is ``True``, the breakpoint is set in THUMB-mode, while if ``arm`` is ``True``, the breakpoint is set in ARM-mode, otherwise a normal breakpoint is set. If ``flash`` is ``True``, the breakpoint is set in flash, otherwise if ``ram`` is ``True``, the breakpoint is set in RAM. If both are ``True`` or both are ``False``, then the best option is chosen for setting the breakpoint in software. Args: self (JLink): the ``JLink`` instance addr (int): the address where the breakpoint will be set thumb (bool): boolean indicating to set the breakpoint in THUMB mode arm (bool): boolean indicating to set the breakpoint in ARM mode flash (bool): boolean indicating to set the breakpoint in flash ram (bool): boolean indicating to set the breakpoint in RAM Returns: An integer specifying the breakpoint handle. This handle should sbe retained for future breakpoint operations. Raises: TypeError: if the given address is not an integer. JLinkException: if the breakpoint could not be set. """ if flash and not ram: flags = enums.JLinkBreakpoint.SW_FLASH elif not flash and ram: flags = enums.JLinkBreakpoint.SW_RAM else: flags = enums.JLinkBreakpoint.SW if thumb: flags = flags | enums.JLinkBreakpoint.THUMB elif arm: flags = flags | enums.JLinkBreakpoint.ARM handle = self._dll.JLINKARM_SetBPEx(int(addr), flags) if handle <= 0: raise errors.JLinkException('Software breakpoint could not be set.') return handle
[ "def", "software_breakpoint_set", "(", "self", ",", "addr", ",", "thumb", "=", "False", ",", "arm", "=", "False", ",", "flash", "=", "False", ",", "ram", "=", "False", ")", ":", "if", "flash", "and", "not", "ram", ":", "flags", "=", "enums", ".", "JLinkBreakpoint", ".", "SW_FLASH", "elif", "not", "flash", "and", "ram", ":", "flags", "=", "enums", ".", "JLinkBreakpoint", ".", "SW_RAM", "else", ":", "flags", "=", "enums", ".", "JLinkBreakpoint", ".", "SW", "if", "thumb", ":", "flags", "=", "flags", "|", "enums", ".", "JLinkBreakpoint", ".", "THUMB", "elif", "arm", ":", "flags", "=", "flags", "|", "enums", ".", "JLinkBreakpoint", ".", "ARM", "handle", "=", "self", ".", "_dll", ".", "JLINKARM_SetBPEx", "(", "int", "(", "addr", ")", ",", "flags", ")", "if", "handle", "<=", "0", ":", "raise", "errors", ".", "JLinkException", "(", "'Software breakpoint could not be set.'", ")", "return", "handle" ]
Sets a software breakpoint at the specified address. If ``thumb`` is ``True``, the breakpoint is set in THUMB-mode, while if ``arm`` is ``True``, the breakpoint is set in ARM-mode, otherwise a normal breakpoint is set. If ``flash`` is ``True``, the breakpoint is set in flash, otherwise if ``ram`` is ``True``, the breakpoint is set in RAM. If both are ``True`` or both are ``False``, then the best option is chosen for setting the breakpoint in software. Args: self (JLink): the ``JLink`` instance addr (int): the address where the breakpoint will be set thumb (bool): boolean indicating to set the breakpoint in THUMB mode arm (bool): boolean indicating to set the breakpoint in ARM mode flash (bool): boolean indicating to set the breakpoint in flash ram (bool): boolean indicating to set the breakpoint in RAM Returns: An integer specifying the breakpoint handle. This handle should sbe retained for future breakpoint operations. Raises: TypeError: if the given address is not an integer. JLinkException: if the breakpoint could not be set.
[ "Sets", "a", "software", "breakpoint", "at", "the", "specified", "address", "." ]
python
train
41.288889
jopohl/urh
src/urh/models/TableModel.py
https://github.com/jopohl/urh/blob/2eb33b125c8407964cd1092843cde5010eb88aae/src/urh/models/TableModel.py#L337-L373
def find_differences(self, refindex: int): """ Search all differences between protocol messages regarding a reference message :param refindex: index of reference message :rtype: dict[int, set[int]] """ differences = defaultdict(set) if refindex >= len(self.protocol.messages): return differences if self.proto_view == 0: proto = self.protocol.decoded_proto_bits_str elif self.proto_view == 1: proto = self.protocol.decoded_hex_str elif self.proto_view == 2: proto = self.protocol.decoded_ascii_str else: return differences ref_message = proto[refindex] ref_offset = self.get_alignment_offset_at(refindex) for i, message in enumerate(proto): if i == refindex: continue msg_offset = self.get_alignment_offset_at(i) short, long = sorted([len(ref_message) + ref_offset, len(message) + msg_offset]) differences[i] = { j for j in range(max(msg_offset, ref_offset), long) if j >= short or message[j - msg_offset] != ref_message[j - ref_offset] } return differences
[ "def", "find_differences", "(", "self", ",", "refindex", ":", "int", ")", ":", "differences", "=", "defaultdict", "(", "set", ")", "if", "refindex", ">=", "len", "(", "self", ".", "protocol", ".", "messages", ")", ":", "return", "differences", "if", "self", ".", "proto_view", "==", "0", ":", "proto", "=", "self", ".", "protocol", ".", "decoded_proto_bits_str", "elif", "self", ".", "proto_view", "==", "1", ":", "proto", "=", "self", ".", "protocol", ".", "decoded_hex_str", "elif", "self", ".", "proto_view", "==", "2", ":", "proto", "=", "self", ".", "protocol", ".", "decoded_ascii_str", "else", ":", "return", "differences", "ref_message", "=", "proto", "[", "refindex", "]", "ref_offset", "=", "self", ".", "get_alignment_offset_at", "(", "refindex", ")", "for", "i", ",", "message", "in", "enumerate", "(", "proto", ")", ":", "if", "i", "==", "refindex", ":", "continue", "msg_offset", "=", "self", ".", "get_alignment_offset_at", "(", "i", ")", "short", ",", "long", "=", "sorted", "(", "[", "len", "(", "ref_message", ")", "+", "ref_offset", ",", "len", "(", "message", ")", "+", "msg_offset", "]", ")", "differences", "[", "i", "]", "=", "{", "j", "for", "j", "in", "range", "(", "max", "(", "msg_offset", ",", "ref_offset", ")", ",", "long", ")", "if", "j", ">=", "short", "or", "message", "[", "j", "-", "msg_offset", "]", "!=", "ref_message", "[", "j", "-", "ref_offset", "]", "}", "return", "differences" ]
Search all differences between protocol messages regarding a reference message :param refindex: index of reference message :rtype: dict[int, set[int]]
[ "Search", "all", "differences", "between", "protocol", "messages", "regarding", "a", "reference", "message" ]
python
train
32.891892
crate/crate-python
src/crate/client/cursor.py
https://github.com/crate/crate-python/blob/68e39c95f5bbe88b74bbfa26de4347fc644636a8/src/crate/client/cursor.py#L43-L56
def execute(self, sql, parameters=None, bulk_parameters=None): """ Prepare and execute a database operation (query or command). """ if self.connection._closed: raise ProgrammingError("Connection closed") if self._closed: raise ProgrammingError("Cursor closed") self._result = self.connection.client.sql(sql, parameters, bulk_parameters) if "rows" in self._result: self.rows = iter(self._result["rows"])
[ "def", "execute", "(", "self", ",", "sql", ",", "parameters", "=", "None", ",", "bulk_parameters", "=", "None", ")", ":", "if", "self", ".", "connection", ".", "_closed", ":", "raise", "ProgrammingError", "(", "\"Connection closed\"", ")", "if", "self", ".", "_closed", ":", "raise", "ProgrammingError", "(", "\"Cursor closed\"", ")", "self", ".", "_result", "=", "self", ".", "connection", ".", "client", ".", "sql", "(", "sql", ",", "parameters", ",", "bulk_parameters", ")", "if", "\"rows\"", "in", "self", ".", "_result", ":", "self", ".", "rows", "=", "iter", "(", "self", ".", "_result", "[", "\"rows\"", "]", ")" ]
Prepare and execute a database operation (query or command).
[ "Prepare", "and", "execute", "a", "database", "operation", "(", "query", "or", "command", ")", "." ]
python
train
38.071429
inveniosoftware/invenio-records-files
invenio_records_files/api.py
https://github.com/inveniosoftware/invenio-records-files/blob/c410eba986ea43be7e97082d5dcbbdc19ccec39c/invenio_records_files/api.py#L180-L192
def sort_by(self, *ids): """Update files order. :param ids: List of ids specifying the final status of the list. """ # Support sorting by file_ids or keys. files = {str(f_.file_id): f_.key for f_ in self} # self.record['_files'] = [{'key': files.get(id_, id_)} for id_ in ids] self.filesmap = OrderedDict([ (files.get(id_, id_), self[files.get(id_, id_)].dumps()) for id_ in ids ]) self.flush()
[ "def", "sort_by", "(", "self", ",", "*", "ids", ")", ":", "# Support sorting by file_ids or keys.", "files", "=", "{", "str", "(", "f_", ".", "file_id", ")", ":", "f_", ".", "key", "for", "f_", "in", "self", "}", "# self.record['_files'] = [{'key': files.get(id_, id_)} for id_ in ids]", "self", ".", "filesmap", "=", "OrderedDict", "(", "[", "(", "files", ".", "get", "(", "id_", ",", "id_", ")", ",", "self", "[", "files", ".", "get", "(", "id_", ",", "id_", ")", "]", ".", "dumps", "(", ")", ")", "for", "id_", "in", "ids", "]", ")", "self", ".", "flush", "(", ")" ]
Update files order. :param ids: List of ids specifying the final status of the list.
[ "Update", "files", "order", "." ]
python
train
36.846154
sixty-north/cosmic-ray
plugins/execution-engines/celery4/cosmic_ray_celery4_engine/worker.py
https://github.com/sixty-north/cosmic-ray/blob/c654e074afbb7b7fcbc23359083c1287c0d3e991/plugins/execution-engines/celery4/cosmic_ray_celery4_engine/worker.py#L25-L48
def worker_task(work_item, config): """The celery task which performs a single mutation and runs a test suite. This runs `cosmic-ray worker` in a subprocess and returns the results, passing `config` to it via stdin. Args: work_item: A dict describing a WorkItem. config: The configuration to use for the test execution. Returns: An updated WorkItem """ global _workspace _ensure_workspace(config) result = worker( work_item.module_path, config.python_version, work_item.operator_name, work_item.occurrence, config.test_command, config.timeout) return work_item.job_id, result
[ "def", "worker_task", "(", "work_item", ",", "config", ")", ":", "global", "_workspace", "_ensure_workspace", "(", "config", ")", "result", "=", "worker", "(", "work_item", ".", "module_path", ",", "config", ".", "python_version", ",", "work_item", ".", "operator_name", ",", "work_item", ".", "occurrence", ",", "config", ".", "test_command", ",", "config", ".", "timeout", ")", "return", "work_item", ".", "job_id", ",", "result" ]
The celery task which performs a single mutation and runs a test suite. This runs `cosmic-ray worker` in a subprocess and returns the results, passing `config` to it via stdin. Args: work_item: A dict describing a WorkItem. config: The configuration to use for the test execution. Returns: An updated WorkItem
[ "The", "celery", "task", "which", "performs", "a", "single", "mutation", "and", "runs", "a", "test", "suite", "." ]
python
train
27.541667
inspirehep/harvesting-kit
harvestingkit/inspire_cds_package/from_cds.py
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/inspire_cds_package/from_cds.py#L111-L157
def determine_collections(self): """Try to determine which collections this record should belong to.""" for value in record_get_field_values(self.record, '980', code='a'): if 'NOTE' in value.upper(): self.collections.add('NOTE') if 'THESIS' in value.upper(): self.collections.add('THESIS') if 'CONFERENCEPAPER' in value.upper(): self.collections.add('ConferencePaper') if "HIDDEN" in value.upper(): self.hidden = True if self.is_published(): self.collections.add("PUBLISHED") self.collections.add("CITEABLE") if 'NOTE' not in self.collections: from itertools import product # TODO: Move this to a KB kb = ['ATLAS-CONF-', 'CMS-PAS-', 'ATL-', 'CMS-DP-', 'ALICE-INT-', 'LHCb-PUB-'] values = record_get_field_values(self.record, "088", code='a') for val, rep in product(values, kb): if val.startswith(rep): self.collections.add('NOTE') break # 980 Arxiv tag if record_get_field_values(self.record, '035', filter_subfield_code="a", filter_subfield_value="arXiv"): self.collections.add("arXiv") # 980 HEP && CORE self.collections.add('HEP') self.collections.add('CORE') # 980 Conference Note if 'ConferencePaper' not in self.collections: for value in record_get_field_values(self.record, tag='962', code='n'): if value[-2:].isdigit(): self.collections.add('ConferencePaper') break # Clear out any existing ones. record_delete_fields(self.record, "980")
[ "def", "determine_collections", "(", "self", ")", ":", "for", "value", "in", "record_get_field_values", "(", "self", ".", "record", ",", "'980'", ",", "code", "=", "'a'", ")", ":", "if", "'NOTE'", "in", "value", ".", "upper", "(", ")", ":", "self", ".", "collections", ".", "add", "(", "'NOTE'", ")", "if", "'THESIS'", "in", "value", ".", "upper", "(", ")", ":", "self", ".", "collections", ".", "add", "(", "'THESIS'", ")", "if", "'CONFERENCEPAPER'", "in", "value", ".", "upper", "(", ")", ":", "self", ".", "collections", ".", "add", "(", "'ConferencePaper'", ")", "if", "\"HIDDEN\"", "in", "value", ".", "upper", "(", ")", ":", "self", ".", "hidden", "=", "True", "if", "self", ".", "is_published", "(", ")", ":", "self", ".", "collections", ".", "add", "(", "\"PUBLISHED\"", ")", "self", ".", "collections", ".", "add", "(", "\"CITEABLE\"", ")", "if", "'NOTE'", "not", "in", "self", ".", "collections", ":", "from", "itertools", "import", "product", "# TODO: Move this to a KB", "kb", "=", "[", "'ATLAS-CONF-'", ",", "'CMS-PAS-'", ",", "'ATL-'", ",", "'CMS-DP-'", ",", "'ALICE-INT-'", ",", "'LHCb-PUB-'", "]", "values", "=", "record_get_field_values", "(", "self", ".", "record", ",", "\"088\"", ",", "code", "=", "'a'", ")", "for", "val", ",", "rep", "in", "product", "(", "values", ",", "kb", ")", ":", "if", "val", ".", "startswith", "(", "rep", ")", ":", "self", ".", "collections", ".", "add", "(", "'NOTE'", ")", "break", "# 980 Arxiv tag", "if", "record_get_field_values", "(", "self", ".", "record", ",", "'035'", ",", "filter_subfield_code", "=", "\"a\"", ",", "filter_subfield_value", "=", "\"arXiv\"", ")", ":", "self", ".", "collections", ".", "add", "(", "\"arXiv\"", ")", "# 980 HEP && CORE", "self", ".", "collections", ".", "add", "(", "'HEP'", ")", "self", ".", "collections", ".", "add", "(", "'CORE'", ")", "# 980 Conference Note", "if", "'ConferencePaper'", "not", "in", "self", ".", "collections", ":", "for", "value", "in", "record_get_field_values", "(", "self", ".", "record", ",", "tag", "=", "'962'", ",", "code", "=", "'n'", ")", ":", "if", "value", "[", "-", "2", ":", "]", ".", "isdigit", "(", ")", ":", "self", ".", "collections", ".", "add", "(", "'ConferencePaper'", ")", "break", "# Clear out any existing ones.", "record_delete_fields", "(", "self", ".", "record", ",", "\"980\"", ")" ]
Try to determine which collections this record should belong to.
[ "Try", "to", "determine", "which", "collections", "this", "record", "should", "belong", "to", "." ]
python
valid
41
bloomberg/bqplot
bqplot/pyplot.py
https://github.com/bloomberg/bqplot/blob/8eb8b163abe9ee6306f6918067e2f36c1caef2ef/bqplot/pyplot.py#L743-L778
def imshow(image, format, **kwargs): """Draw an image in the current context figure. Parameters ---------- image: image data Image data, depending on the passed format, can be one of: - an instance of an ipywidgets Image - a file name - a raw byte string format: {'widget', 'filename', ...} Type of the input argument. If not 'widget' or 'filename', must be a format supported by the ipywidgets Image. options: dict (default: {}) Options for the scales to be created. If a scale labeled 'x' is required for that mark, options['x'] contains optional keyword arguments for the constructor of the corresponding scale type. axes_options: dict (default: {}) Options for the axes to be created. If an axis labeled 'x' is required for that mark, axes_options['x'] contains optional keyword arguments for the constructor of the corresponding axis type. """ if format == 'widget': ipyimage = image elif format == 'filename': with open(image, 'rb') as f: data = f.read() ipyimage = ipyImage(value=data) else: ipyimage = ipyImage(value=image, format=format) kwargs['image'] = ipyimage kwargs.setdefault('x', [0., 1.]) kwargs.setdefault('y', [0., 1.]) return _draw_mark(Image, **kwargs)
[ "def", "imshow", "(", "image", ",", "format", ",", "*", "*", "kwargs", ")", ":", "if", "format", "==", "'widget'", ":", "ipyimage", "=", "image", "elif", "format", "==", "'filename'", ":", "with", "open", "(", "image", ",", "'rb'", ")", "as", "f", ":", "data", "=", "f", ".", "read", "(", ")", "ipyimage", "=", "ipyImage", "(", "value", "=", "data", ")", "else", ":", "ipyimage", "=", "ipyImage", "(", "value", "=", "image", ",", "format", "=", "format", ")", "kwargs", "[", "'image'", "]", "=", "ipyimage", "kwargs", ".", "setdefault", "(", "'x'", ",", "[", "0.", ",", "1.", "]", ")", "kwargs", ".", "setdefault", "(", "'y'", ",", "[", "0.", ",", "1.", "]", ")", "return", "_draw_mark", "(", "Image", ",", "*", "*", "kwargs", ")" ]
Draw an image in the current context figure. Parameters ---------- image: image data Image data, depending on the passed format, can be one of: - an instance of an ipywidgets Image - a file name - a raw byte string format: {'widget', 'filename', ...} Type of the input argument. If not 'widget' or 'filename', must be a format supported by the ipywidgets Image. options: dict (default: {}) Options for the scales to be created. If a scale labeled 'x' is required for that mark, options['x'] contains optional keyword arguments for the constructor of the corresponding scale type. axes_options: dict (default: {}) Options for the axes to be created. If an axis labeled 'x' is required for that mark, axes_options['x'] contains optional keyword arguments for the constructor of the corresponding axis type.
[ "Draw", "an", "image", "in", "the", "current", "context", "figure", ".", "Parameters", "----------", "image", ":", "image", "data", "Image", "data", "depending", "on", "the", "passed", "format", "can", "be", "one", "of", ":", "-", "an", "instance", "of", "an", "ipywidgets", "Image", "-", "a", "file", "name", "-", "a", "raw", "byte", "string", "format", ":", "{", "widget", "filename", "...", "}", "Type", "of", "the", "input", "argument", ".", "If", "not", "widget", "or", "filename", "must", "be", "a", "format", "supported", "by", "the", "ipywidgets", "Image", ".", "options", ":", "dict", "(", "default", ":", "{}", ")", "Options", "for", "the", "scales", "to", "be", "created", ".", "If", "a", "scale", "labeled", "x", "is", "required", "for", "that", "mark", "options", "[", "x", "]", "contains", "optional", "keyword", "arguments", "for", "the", "constructor", "of", "the", "corresponding", "scale", "type", ".", "axes_options", ":", "dict", "(", "default", ":", "{}", ")", "Options", "for", "the", "axes", "to", "be", "created", ".", "If", "an", "axis", "labeled", "x", "is", "required", "for", "that", "mark", "axes_options", "[", "x", "]", "contains", "optional", "keyword", "arguments", "for", "the", "constructor", "of", "the", "corresponding", "axis", "type", "." ]
python
train
37.916667
langloisjp/pysvcmetrics
statsdclient.py
https://github.com/langloisjp/pysvcmetrics/blob/a126fc029ab645d9db46c0f5712c416cdf80e370/statsdclient.py#L40-L48
def gauge(self, stats, value): """ Log gauges >>> client = StatsdClient() >>> client.gauge('example.gauge', 47) >>> client.gauge(('example.gauge41', 'example.gauge43'), 47) """ self.update_stats(stats, value, self.SC_GAUGE)
[ "def", "gauge", "(", "self", ",", "stats", ",", "value", ")", ":", "self", ".", "update_stats", "(", "stats", ",", "value", ",", "self", ".", "SC_GAUGE", ")" ]
Log gauges >>> client = StatsdClient() >>> client.gauge('example.gauge', 47) >>> client.gauge(('example.gauge41', 'example.gauge43'), 47)
[ "Log", "gauges" ]
python
train
30.222222
MonashBI/arcana
arcana/repository/xnat.py
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/repository/xnat.py#L582-L592
def convert_subject_ids(self, subject_ids): """ Convert subject ids to strings if they are integers """ # TODO: need to make this generalisable via a # splitting+mapping function passed to the repository if subject_ids is not None: subject_ids = set( ('{:03d}'.format(s) if isinstance(s, int) else s) for s in subject_ids) return subject_ids
[ "def", "convert_subject_ids", "(", "self", ",", "subject_ids", ")", ":", "# TODO: need to make this generalisable via a", "# splitting+mapping function passed to the repository", "if", "subject_ids", "is", "not", "None", ":", "subject_ids", "=", "set", "(", "(", "'{:03d}'", ".", "format", "(", "s", ")", "if", "isinstance", "(", "s", ",", "int", ")", "else", "s", ")", "for", "s", "in", "subject_ids", ")", "return", "subject_ids" ]
Convert subject ids to strings if they are integers
[ "Convert", "subject", "ids", "to", "strings", "if", "they", "are", "integers" ]
python
train
39.818182
Cito/DBUtils
DBUtils/SteadyPg.py
https://github.com/Cito/DBUtils/blob/90e8825e038f08c82044b8e50831480175fa026a/DBUtils/SteadyPg.py#L188-L207
def reopen(self): """Reopen the tough connection. It will not complain if the connection cannot be reopened. """ try: self._con.reopen() except Exception: if self._transcation: self._transaction = False try: self._con.query('rollback') except Exception: pass else: self._transaction = False self._closed = False self._setsession() self._usage = 0
[ "def", "reopen", "(", "self", ")", ":", "try", ":", "self", ".", "_con", ".", "reopen", "(", ")", "except", "Exception", ":", "if", "self", ".", "_transcation", ":", "self", ".", "_transaction", "=", "False", "try", ":", "self", ".", "_con", ".", "query", "(", "'rollback'", ")", "except", "Exception", ":", "pass", "else", ":", "self", ".", "_transaction", "=", "False", "self", ".", "_closed", "=", "False", "self", ".", "_setsession", "(", ")", "self", ".", "_usage", "=", "0" ]
Reopen the tough connection. It will not complain if the connection cannot be reopened.
[ "Reopen", "the", "tough", "connection", "." ]
python
train
26.85
ssalentin/plip
plip/modules/mp.py
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/mp.py#L30-L32
def pool_args(function, sequence, kwargs): """Return a single iterator of n elements of lists of length 3, given a sequence of len n.""" return zip(itertools.repeat(function), sequence, itertools.repeat(kwargs))
[ "def", "pool_args", "(", "function", ",", "sequence", ",", "kwargs", ")", ":", "return", "zip", "(", "itertools", ".", "repeat", "(", "function", ")", ",", "sequence", ",", "itertools", ".", "repeat", "(", "kwargs", ")", ")" ]
Return a single iterator of n elements of lists of length 3, given a sequence of len n.
[ "Return", "a", "single", "iterator", "of", "n", "elements", "of", "lists", "of", "length", "3", "given", "a", "sequence", "of", "len", "n", "." ]
python
train
72.333333
tensorflow/mesh
mesh_tensorflow/transformer/utils.py
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/transformer/utils.py#L524-L712
def run(tpu_job_name, tpu, gcp_project, tpu_zone, model_dir, model_type="bitransformer", vocabulary=gin.REQUIRED, train_dataset_fn=None, eval_dataset_fn=None, dataset_split="train", autostack=True, checkpoint_path="", mode="train", iterations_per_loop=100, save_checkpoints_steps=1000, eval_steps=10, train_steps=1000000, batch_size=auto_batch_size, sequence_length=gin.REQUIRED, mesh_shape=gin.REQUIRED, layout_rules=gin.REQUIRED, get_components_fn=None): """Run training/eval/inference. Args: tpu_job_name: string, name of TPU worker binary tpu: string, the Cloud TPU to use for training gcp_project: string, project name for the Cloud TPU-enabled project tpu_zone: string, GCE zone where the Cloud TPU is located in model_dir: string, estimator model_dir model_type: a string - either "bitransformer", "lm" or "aligned" vocabulary: a vocabulary.Vocabulary or (inputs_vocabulary, targets_vocabulary) tuple. train_dataset_fn: A function returning a tf.data.Dataset. Must be provided for mode=train eval_dataset_fn: A function returning a tf.data.Dataset. Must be provided for model=eval dataset_split: a string autostack: boolean, internally combine variables checkpoint_path: a string - which checkpoint to load for inference mode: string, train/evaluate/infer iterations_per_loop: integer, steps per train loop save_checkpoints_steps: integer, steps per checkpoint eval_steps: integer, number of evaluation steps train_steps: Total number of training steps. batch_size: An integer or a function with the same signature as auto_batch_size(). Mini-batch size for the training. Note that this is the global batch size and not the per-shard batch size. sequence_length: an integer mesh_shape: an input to mtf.convert_to_shape() layout_rules: an input to mtf.convert_to_layout_rules() get_components_fn: an optional function that gets a list of tuples of (metric_names, component) for each component. Required if mode is "continuous_eval" """ if not isinstance(batch_size, int): batch_size = batch_size(sequence_length, mesh_shape, layout_rules) tf.logging.info("mode=%s" % mode,) tf.logging.info("batch_size=%s" % batch_size,) tf.logging.info("sequence_length=%s" % sequence_length,) tf.logging.info("mesh_shape=%s" % mesh_shape,) tf.logging.info("layout_rules=%s" % layout_rules,) if mode == "train" and dataset_split != "train": raise ValueError("mode==\"train\" requires dataset_split==\"train\"") mesh_shape = mtf.convert_to_shape(mesh_shape) layout_rules = mtf.convert_to_layout_rules(layout_rules) cluster = tf.contrib.cluster_resolver.TPUClusterResolver( tpu if (tpu) else "", zone=tpu_zone, project=gcp_project) tf.logging.info( "Building TPUConfig with tpu_job_name={}".format(tpu_job_name) ) my_tpu_config = tpu_config.TPUConfig( tpu_job_name=tpu_job_name, iterations_per_loop=iterations_per_loop, num_cores_per_replica=1, per_host_input_for_training=tpu_config.InputPipelineConfig.BROADCAST, ) run_config = tpu_config.RunConfig( cluster=cluster, model_dir=model_dir, save_checkpoints_steps=save_checkpoints_steps, tpu_config=my_tpu_config) transformer_model = build_model( model_type=model_type, input_vocab_size=inputs_vocabulary(vocabulary).vocab_size, output_vocab_size=targets_vocabulary(vocabulary).vocab_size, layout_rules=layout_rules, mesh_shape=mesh_shape) model_fn = tpu_estimator_model_fn( model_type=model_type, transformer_model=transformer_model, model_dir=model_dir, use_tpu=tpu, mesh_shape=mesh_shape, layout_rules=layout_rules, batch_size=batch_size, sequence_length=sequence_length, autostack=autostack, metric_names=None) estimator = tpu_estimator.TPUEstimator( model_fn=model_fn, config=run_config, train_batch_size=batch_size, eval_batch_size=batch_size, predict_batch_size=batch_size, use_tpu=tpu, export_to_tpu=False, params={}) if mode == "train": if train_dataset_fn is None: raise ValueError("Must provide train_dataset_fn through gin for train.") def input_fn(params): del params dataset = train_dataset_fn(batch_size=batch_size, sequence_length=sequence_length, vocabulary=vocabulary, dataset_split=dataset_split) return dataset estimator.train(input_fn=input_fn, max_steps=train_steps) elif mode == "continuous_eval": if get_components_fn is None: raise ValueError("Must provide get_components_fn through gin for eval.") if eval_dataset_fn is None: raise ValueError("Must provide eval_dataset_fn through gin for eval.") metrics_inputs = get_components_fn() for _ in tf.contrib.training.checkpoints_iterator(estimator.model_dir): for metric_names, component in metrics_inputs: tf.logging.info("Evaluating {}".format(component.__dict__)) tf.logging.info("on split {}".format(dataset_split)) # Prepend eval tag and split name to metric names metric_names = [ "eval/{}/{}".format(dataset_split, n) for n in metric_names ] # Regenerate the estimator model_fn = tpu_estimator_model_fn( model_type=model_type, transformer_model=transformer_model, model_dir=model_dir, use_tpu=tpu, mesh_shape=mesh_shape, layout_rules=layout_rules, batch_size=batch_size, sequence_length=sequence_length, autostack=autostack, metric_names=metric_names) estimator = tpu_estimator.TPUEstimator( model_fn=model_fn, config=run_config, train_batch_size=batch_size, eval_batch_size=batch_size, predict_batch_size=batch_size, use_tpu=tpu, export_to_tpu=False, params={}) def input_fn(params): del params dataset = eval_dataset_fn(component, # pylint: disable=cell-var-from-loop batch_size=batch_size, sequence_length=sequence_length, vocabulary=vocabulary, dataset_split=dataset_split, pack=False) return dataset eval_args = {"eval": (input_fn, eval_steps)} _ = evaluate(estimator, eval_args) elif mode == "infer": decode_from_file( estimator, vocabulary=vocabulary, model_type=model_type, batch_size=batch_size, sequence_length=sequence_length, checkpoint_path=checkpoint_path) else: raise ValueError( "unknown mode %s - must be train/evaluate/continuous_eval/infer" % mode)
[ "def", "run", "(", "tpu_job_name", ",", "tpu", ",", "gcp_project", ",", "tpu_zone", ",", "model_dir", ",", "model_type", "=", "\"bitransformer\"", ",", "vocabulary", "=", "gin", ".", "REQUIRED", ",", "train_dataset_fn", "=", "None", ",", "eval_dataset_fn", "=", "None", ",", "dataset_split", "=", "\"train\"", ",", "autostack", "=", "True", ",", "checkpoint_path", "=", "\"\"", ",", "mode", "=", "\"train\"", ",", "iterations_per_loop", "=", "100", ",", "save_checkpoints_steps", "=", "1000", ",", "eval_steps", "=", "10", ",", "train_steps", "=", "1000000", ",", "batch_size", "=", "auto_batch_size", ",", "sequence_length", "=", "gin", ".", "REQUIRED", ",", "mesh_shape", "=", "gin", ".", "REQUIRED", ",", "layout_rules", "=", "gin", ".", "REQUIRED", ",", "get_components_fn", "=", "None", ")", ":", "if", "not", "isinstance", "(", "batch_size", ",", "int", ")", ":", "batch_size", "=", "batch_size", "(", "sequence_length", ",", "mesh_shape", ",", "layout_rules", ")", "tf", ".", "logging", ".", "info", "(", "\"mode=%s\"", "%", "mode", ",", ")", "tf", ".", "logging", ".", "info", "(", "\"batch_size=%s\"", "%", "batch_size", ",", ")", "tf", ".", "logging", ".", "info", "(", "\"sequence_length=%s\"", "%", "sequence_length", ",", ")", "tf", ".", "logging", ".", "info", "(", "\"mesh_shape=%s\"", "%", "mesh_shape", ",", ")", "tf", ".", "logging", ".", "info", "(", "\"layout_rules=%s\"", "%", "layout_rules", ",", ")", "if", "mode", "==", "\"train\"", "and", "dataset_split", "!=", "\"train\"", ":", "raise", "ValueError", "(", "\"mode==\\\"train\\\" requires dataset_split==\\\"train\\\"\"", ")", "mesh_shape", "=", "mtf", ".", "convert_to_shape", "(", "mesh_shape", ")", "layout_rules", "=", "mtf", ".", "convert_to_layout_rules", "(", "layout_rules", ")", "cluster", "=", "tf", ".", "contrib", ".", "cluster_resolver", ".", "TPUClusterResolver", "(", "tpu", "if", "(", "tpu", ")", "else", "\"\"", ",", "zone", "=", "tpu_zone", ",", "project", "=", "gcp_project", ")", "tf", ".", "logging", ".", "info", "(", "\"Building TPUConfig with tpu_job_name={}\"", ".", "format", "(", "tpu_job_name", ")", ")", "my_tpu_config", "=", "tpu_config", ".", "TPUConfig", "(", "tpu_job_name", "=", "tpu_job_name", ",", "iterations_per_loop", "=", "iterations_per_loop", ",", "num_cores_per_replica", "=", "1", ",", "per_host_input_for_training", "=", "tpu_config", ".", "InputPipelineConfig", ".", "BROADCAST", ",", ")", "run_config", "=", "tpu_config", ".", "RunConfig", "(", "cluster", "=", "cluster", ",", "model_dir", "=", "model_dir", ",", "save_checkpoints_steps", "=", "save_checkpoints_steps", ",", "tpu_config", "=", "my_tpu_config", ")", "transformer_model", "=", "build_model", "(", "model_type", "=", "model_type", ",", "input_vocab_size", "=", "inputs_vocabulary", "(", "vocabulary", ")", ".", "vocab_size", ",", "output_vocab_size", "=", "targets_vocabulary", "(", "vocabulary", ")", ".", "vocab_size", ",", "layout_rules", "=", "layout_rules", ",", "mesh_shape", "=", "mesh_shape", ")", "model_fn", "=", "tpu_estimator_model_fn", "(", "model_type", "=", "model_type", ",", "transformer_model", "=", "transformer_model", ",", "model_dir", "=", "model_dir", ",", "use_tpu", "=", "tpu", ",", "mesh_shape", "=", "mesh_shape", ",", "layout_rules", "=", "layout_rules", ",", "batch_size", "=", "batch_size", ",", "sequence_length", "=", "sequence_length", ",", "autostack", "=", "autostack", ",", "metric_names", "=", "None", ")", "estimator", "=", "tpu_estimator", ".", "TPUEstimator", "(", "model_fn", "=", "model_fn", ",", "config", "=", "run_config", ",", "train_batch_size", "=", "batch_size", ",", "eval_batch_size", "=", "batch_size", ",", "predict_batch_size", "=", "batch_size", ",", "use_tpu", "=", "tpu", ",", "export_to_tpu", "=", "False", ",", "params", "=", "{", "}", ")", "if", "mode", "==", "\"train\"", ":", "if", "train_dataset_fn", "is", "None", ":", "raise", "ValueError", "(", "\"Must provide train_dataset_fn through gin for train.\"", ")", "def", "input_fn", "(", "params", ")", ":", "del", "params", "dataset", "=", "train_dataset_fn", "(", "batch_size", "=", "batch_size", ",", "sequence_length", "=", "sequence_length", ",", "vocabulary", "=", "vocabulary", ",", "dataset_split", "=", "dataset_split", ")", "return", "dataset", "estimator", ".", "train", "(", "input_fn", "=", "input_fn", ",", "max_steps", "=", "train_steps", ")", "elif", "mode", "==", "\"continuous_eval\"", ":", "if", "get_components_fn", "is", "None", ":", "raise", "ValueError", "(", "\"Must provide get_components_fn through gin for eval.\"", ")", "if", "eval_dataset_fn", "is", "None", ":", "raise", "ValueError", "(", "\"Must provide eval_dataset_fn through gin for eval.\"", ")", "metrics_inputs", "=", "get_components_fn", "(", ")", "for", "_", "in", "tf", ".", "contrib", ".", "training", ".", "checkpoints_iterator", "(", "estimator", ".", "model_dir", ")", ":", "for", "metric_names", ",", "component", "in", "metrics_inputs", ":", "tf", ".", "logging", ".", "info", "(", "\"Evaluating {}\"", ".", "format", "(", "component", ".", "__dict__", ")", ")", "tf", ".", "logging", ".", "info", "(", "\"on split {}\"", ".", "format", "(", "dataset_split", ")", ")", "# Prepend eval tag and split name to metric names", "metric_names", "=", "[", "\"eval/{}/{}\"", ".", "format", "(", "dataset_split", ",", "n", ")", "for", "n", "in", "metric_names", "]", "# Regenerate the estimator", "model_fn", "=", "tpu_estimator_model_fn", "(", "model_type", "=", "model_type", ",", "transformer_model", "=", "transformer_model", ",", "model_dir", "=", "model_dir", ",", "use_tpu", "=", "tpu", ",", "mesh_shape", "=", "mesh_shape", ",", "layout_rules", "=", "layout_rules", ",", "batch_size", "=", "batch_size", ",", "sequence_length", "=", "sequence_length", ",", "autostack", "=", "autostack", ",", "metric_names", "=", "metric_names", ")", "estimator", "=", "tpu_estimator", ".", "TPUEstimator", "(", "model_fn", "=", "model_fn", ",", "config", "=", "run_config", ",", "train_batch_size", "=", "batch_size", ",", "eval_batch_size", "=", "batch_size", ",", "predict_batch_size", "=", "batch_size", ",", "use_tpu", "=", "tpu", ",", "export_to_tpu", "=", "False", ",", "params", "=", "{", "}", ")", "def", "input_fn", "(", "params", ")", ":", "del", "params", "dataset", "=", "eval_dataset_fn", "(", "component", ",", "# pylint: disable=cell-var-from-loop", "batch_size", "=", "batch_size", ",", "sequence_length", "=", "sequence_length", ",", "vocabulary", "=", "vocabulary", ",", "dataset_split", "=", "dataset_split", ",", "pack", "=", "False", ")", "return", "dataset", "eval_args", "=", "{", "\"eval\"", ":", "(", "input_fn", ",", "eval_steps", ")", "}", "_", "=", "evaluate", "(", "estimator", ",", "eval_args", ")", "elif", "mode", "==", "\"infer\"", ":", "decode_from_file", "(", "estimator", ",", "vocabulary", "=", "vocabulary", ",", "model_type", "=", "model_type", ",", "batch_size", "=", "batch_size", ",", "sequence_length", "=", "sequence_length", ",", "checkpoint_path", "=", "checkpoint_path", ")", "else", ":", "raise", "ValueError", "(", "\"unknown mode %s - must be train/evaluate/continuous_eval/infer\"", "%", "mode", ")" ]
Run training/eval/inference. Args: tpu_job_name: string, name of TPU worker binary tpu: string, the Cloud TPU to use for training gcp_project: string, project name for the Cloud TPU-enabled project tpu_zone: string, GCE zone where the Cloud TPU is located in model_dir: string, estimator model_dir model_type: a string - either "bitransformer", "lm" or "aligned" vocabulary: a vocabulary.Vocabulary or (inputs_vocabulary, targets_vocabulary) tuple. train_dataset_fn: A function returning a tf.data.Dataset. Must be provided for mode=train eval_dataset_fn: A function returning a tf.data.Dataset. Must be provided for model=eval dataset_split: a string autostack: boolean, internally combine variables checkpoint_path: a string - which checkpoint to load for inference mode: string, train/evaluate/infer iterations_per_loop: integer, steps per train loop save_checkpoints_steps: integer, steps per checkpoint eval_steps: integer, number of evaluation steps train_steps: Total number of training steps. batch_size: An integer or a function with the same signature as auto_batch_size(). Mini-batch size for the training. Note that this is the global batch size and not the per-shard batch size. sequence_length: an integer mesh_shape: an input to mtf.convert_to_shape() layout_rules: an input to mtf.convert_to_layout_rules() get_components_fn: an optional function that gets a list of tuples of (metric_names, component) for each component. Required if mode is "continuous_eval"
[ "Run", "training", "/", "eval", "/", "inference", "." ]
python
train
37.15873
fabiobatalha/crossrefapi
crossref/restful.py
https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L1811-L1839
def request_doi_status_by_batch_id(self, doi_batch_id, data_type='result'): """ This method retrieve the DOI requests status. file_name: Used as unique ID to identify a deposit. data_type: [contents, result] contents - retrieve the XML submited by the publisher result - retrieve a XML with the status of the submission """ endpoint = self.get_endpoint('submissionDownload') params = { 'usr': self.api_user, 'pwd': self.api_key, 'doi_batch_id': doi_batch_id, 'type': data_type } result = self.do_http_request( 'get', endpoint, data=params, timeout=10, custom_header=str(self.etiquette) ) return result
[ "def", "request_doi_status_by_batch_id", "(", "self", ",", "doi_batch_id", ",", "data_type", "=", "'result'", ")", ":", "endpoint", "=", "self", ".", "get_endpoint", "(", "'submissionDownload'", ")", "params", "=", "{", "'usr'", ":", "self", ".", "api_user", ",", "'pwd'", ":", "self", ".", "api_key", ",", "'doi_batch_id'", ":", "doi_batch_id", ",", "'type'", ":", "data_type", "}", "result", "=", "self", ".", "do_http_request", "(", "'get'", ",", "endpoint", ",", "data", "=", "params", ",", "timeout", "=", "10", ",", "custom_header", "=", "str", "(", "self", ".", "etiquette", ")", ")", "return", "result" ]
This method retrieve the DOI requests status. file_name: Used as unique ID to identify a deposit. data_type: [contents, result] contents - retrieve the XML submited by the publisher result - retrieve a XML with the status of the submission
[ "This", "method", "retrieve", "the", "DOI", "requests", "status", "." ]
python
train
27.551724
JNRowe/upoints
upoints/gpx.py
https://github.com/JNRowe/upoints/blob/1e4b7a53ed2a06cd854523d54c36aabdccea3830/upoints/gpx.py#L207-L217
def range(self, location, distance): """Test whether locations are within a given range of ``location``. Args: location (Point): Location to test range against distance (float): Distance to test location is within Returns: list of list of Point: Groups of points in range per segment """ return (segment.range(location, distance) for segment in self)
[ "def", "range", "(", "self", ",", "location", ",", "distance", ")", ":", "return", "(", "segment", ".", "range", "(", "location", ",", "distance", ")", "for", "segment", "in", "self", ")" ]
Test whether locations are within a given range of ``location``. Args: location (Point): Location to test range against distance (float): Distance to test location is within Returns: list of list of Point: Groups of points in range per segment
[ "Test", "whether", "locations", "are", "within", "a", "given", "range", "of", "location", "." ]
python
train
38
crm416/semantic
semantic/units.py
https://github.com/crm416/semantic/blob/46deb8fefb3ea58aad2fedc8d0d62f3ee254b8fe/semantic/units.py#L125-L146
def convert(self, inp): """Converts a string representation of some quantity of units into a quantities object. Args: inp (str): A textual representation of some quantity of units, e.g., "fifty kilograms". Returns: A quantities object representing the described quantity and its units. """ inp = self._preprocess(inp) n = NumberService().longestNumber(inp) units = self.extractUnits(inp) # Convert to quantity object, attempt conversion quantity = pq.Quantity(float(n), units[0]) quantity.units = units[1] return quantity
[ "def", "convert", "(", "self", ",", "inp", ")", ":", "inp", "=", "self", ".", "_preprocess", "(", "inp", ")", "n", "=", "NumberService", "(", ")", ".", "longestNumber", "(", "inp", ")", "units", "=", "self", ".", "extractUnits", "(", "inp", ")", "# Convert to quantity object, attempt conversion", "quantity", "=", "pq", ".", "Quantity", "(", "float", "(", "n", ")", ",", "units", "[", "0", "]", ")", "quantity", ".", "units", "=", "units", "[", "1", "]", "return", "quantity" ]
Converts a string representation of some quantity of units into a quantities object. Args: inp (str): A textual representation of some quantity of units, e.g., "fifty kilograms". Returns: A quantities object representing the described quantity and its units.
[ "Converts", "a", "string", "representation", "of", "some", "quantity", "of", "units", "into", "a", "quantities", "object", "." ]
python
train
29.681818
Asana/python-asana
asana/resources/gen/sections.py
https://github.com/Asana/python-asana/blob/6deb7a34495db23f44858e53b6bb2c9eccff7872/asana/resources/gen/sections.py#L25-L34
def find_by_project(self, project, params={}, **options): """Returns the compact records for all sections in the specified project. Parameters ---------- project : {Id} The project to get sections from. [params] : {Object} Parameters for the request """ path = "/projects/%s/sections" % (project) return self.client.get(path, params, **options)
[ "def", "find_by_project", "(", "self", ",", "project", ",", "params", "=", "{", "}", ",", "*", "*", "options", ")", ":", "path", "=", "\"/projects/%s/sections\"", "%", "(", "project", ")", "return", "self", ".", "client", ".", "get", "(", "path", ",", "params", ",", "*", "*", "options", ")" ]
Returns the compact records for all sections in the specified project. Parameters ---------- project : {Id} The project to get sections from. [params] : {Object} Parameters for the request
[ "Returns", "the", "compact", "records", "for", "all", "sections", "in", "the", "specified", "project", "." ]
python
train
40.1
Alignak-monitoring/alignak
alignak/daemons/schedulerdaemon.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/daemons/schedulerdaemon.py#L503-L514
def clean_previous_run(self): """Clean variables from previous configuration :return: None """ # Execute the base class treatment... super(Alignak, self).clean_previous_run() # Clean all lists self.pollers.clear() self.reactionners.clear() self.brokers.clear()
[ "def", "clean_previous_run", "(", "self", ")", ":", "# Execute the base class treatment...", "super", "(", "Alignak", ",", "self", ")", ".", "clean_previous_run", "(", ")", "# Clean all lists", "self", ".", "pollers", ".", "clear", "(", ")", "self", ".", "reactionners", ".", "clear", "(", ")", "self", ".", "brokers", ".", "clear", "(", ")" ]
Clean variables from previous configuration :return: None
[ "Clean", "variables", "from", "previous", "configuration" ]
python
train
26.916667
mila/pyoo
pyoo.py
https://github.com/mila/pyoo/blob/1e024999f608c87ea72cd443e39c89eb0ba3cc62/pyoo.py#L1918-L1923
def open_spreadsheet(self, path, as_template=False): """ Opens an exiting spreadsheet document on the local file system. """ desktop = self.cls(self.hostname, self.port) return desktop.open_spreadsheet(path, as_template=as_template)
[ "def", "open_spreadsheet", "(", "self", ",", "path", ",", "as_template", "=", "False", ")", ":", "desktop", "=", "self", ".", "cls", "(", "self", ".", "hostname", ",", "self", ".", "port", ")", "return", "desktop", ".", "open_spreadsheet", "(", "path", ",", "as_template", "=", "as_template", ")" ]
Opens an exiting spreadsheet document on the local file system.
[ "Opens", "an", "exiting", "spreadsheet", "document", "on", "the", "local", "file", "system", "." ]
python
train
44.5
AmesCornish/buttersink
buttersink/progress.py
https://github.com/AmesCornish/buttersink/blob/5cc37e30d9f8071fcf3497dca8b8a91b910321ea/buttersink/progress.py#L82-L88
def close(self): """ Stop overwriting display, or update parent. """ if self.parent: self.parent.update(self.parent.offset + self.offset) return self.output.write("\n") self.output.flush()
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "parent", ":", "self", ".", "parent", ".", "update", "(", "self", ".", "parent", ".", "offset", "+", "self", ".", "offset", ")", "return", "self", ".", "output", ".", "write", "(", "\"\\n\"", ")", "self", ".", "output", ".", "flush", "(", ")" ]
Stop overwriting display, or update parent.
[ "Stop", "overwriting", "display", "or", "update", "parent", "." ]
python
train
34
geophysics-ubonn/crtomo_tools
src/td_plot.py
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/src/td_plot.py#L719-L836
def create_singleplots(plotman, cov, mag, pha, pha_fpi, alpha, options): '''Plot the data of the tomodir in individual plots. ''' magunit = 'log_rho' if not pha == []: [real, imag] = calc_complex(mag, pha) if not pha_fpi == []: [real_fpi, imag_fpi] = calc_complex(mag, pha_fpi) if options.cmaglin: mag = np.power(10, mag) magunit = 'rho' data = np.column_stack((mag, cov, pha, real, imag, pha_fpi, real_fpi, imag_fpi)) titles = ['Magnitude', 'Coverage', 'Phase', 'Real Part', 'Imaginary Part', 'FPI Phase', 'FPI Real Part', 'FPI Imaginary Part'] unites = [ magunit, 'cov', 'phi', 'log_real', 'log_imag', 'phi', 'log_real', 'log_imag' ] vmins = [options.mag_vmin, options.cov_vmin, options.pha_vmin, options.real_vmin, options.imag_vmin, options.pha_vmin, options.real_vmin, options.imag_vmin] vmaxs = [options.mag_vmax, options.cov_vmax, options.pha_vmax, options.real_vmax, options.imag_vmax, options.pha_vmax, options.real_vmax, options.imag_vmax] cmaps = ['jet', 'GnBu', 'jet_r', 'jet_r', 'plasma_r', 'plasma', 'jet_r', 'plasma_r'] saves = ['rho', 'cov', 'phi', 'real', 'imag', 'fpi_phi', 'fpi_real', 'fpi_imag'] else: if options.cmaglin: mag = np.power(10, mag) magunit = 'rho' data = np.column_stack((mag, cov, pha, real, imag)) titles = ['Magnitude', 'Coverage', 'Phase', 'Real Part', 'Imaginary Part'] unites = [magunit, 'cov', 'phi', 'log_real', 'log_imag'] vmins = [options.mag_vmin, options.cov_vmin, options.pha_vmin, options.real_vmin, options.imag_vmin] vmaxs = [options.mag_vmax, options.cov_vmax, options.pha_vmax, options.real_vmax, options.imag_vmax] cmaps = ['jet', 'GnBu', 'jet_r', 'jet_r', 'plasma_r'] saves = ['rho', 'cov', 'phi', 'real', 'imag'] else: data = np.column_stack((mag, cov)) titles = ['Magnitude', 'Coverage'] unites = [magunit, 'cov'] vmins = [options.mag_vmin, options.cov_vmin] vmaxs = [options.mag_vmax, options.cov_vmax] cmaps = ['jet', 'GnBu'] saves = ['rho', 'cov'] try: mod_rho = np.genfromtxt('rho/rho.dat', skip_header=1, usecols=([0])) mod_pha = np.genfromtxt('rho/rho.dat', skip_header=1, usecols=([1])) data = np.column_stack((data, mod_rho, mod_pha)) titles.append('Model') titles.append('Model') unites.append('rho') unites.append('phi') vmins.append(options.mag_vmin) vmins.append(options.pha_vmin) vmaxs.append(options.mag_vmax) vmaxs.append(options.pha_vmax) cmaps.append('jet') cmaps.append('plasma') saves.append('rhomod') saves.append('phamod') except: pass for datum, title, unit, vmin, vmax, cm, save in zip( np.transpose(data), titles, unites, vmins, vmaxs, cmaps, saves): sizex, sizez = getfigsize(plotman) f, ax = plt.subplots(1, figsize=(sizex, sizez)) cid = plotman.parman.add_data(datum) # handle options cblabel = units.get_label(unit) if options.title is not None: title = options.title zlabel = 'z [' + options.unit + ']' xlabel = 'x [' + options.unit + ']' xmin, xmax, zmin, zmax, vmin, vmax = check_minmax( plotman, cid, options.xmin, options.xmax, options.zmin, options.zmax, vmin, vmax ) # plot cmap = mpl_cm.get_cmap(cm) fig, ax, cnorm, cmap, cb, scalarMap = plotman.plot_elements_to_ax( cid=cid, cid_alpha=alpha, ax=ax, xmin=xmin, xmax=xmax, zmin=zmin, zmax=zmax, cblabel=cblabel, title=title, zlabel=zlabel, xlabel=xlabel, plot_colorbar=True, cmap_name=cm, over=cmap(1.0), under=cmap(0.0), no_elecs=options.no_elecs, cbmin=vmin, cbmax=vmax, ) f.tight_layout() f.savefig(save + '.png', dpi=300)
[ "def", "create_singleplots", "(", "plotman", ",", "cov", ",", "mag", ",", "pha", ",", "pha_fpi", ",", "alpha", ",", "options", ")", ":", "magunit", "=", "'log_rho'", "if", "not", "pha", "==", "[", "]", ":", "[", "real", ",", "imag", "]", "=", "calc_complex", "(", "mag", ",", "pha", ")", "if", "not", "pha_fpi", "==", "[", "]", ":", "[", "real_fpi", ",", "imag_fpi", "]", "=", "calc_complex", "(", "mag", ",", "pha_fpi", ")", "if", "options", ".", "cmaglin", ":", "mag", "=", "np", ".", "power", "(", "10", ",", "mag", ")", "magunit", "=", "'rho'", "data", "=", "np", ".", "column_stack", "(", "(", "mag", ",", "cov", ",", "pha", ",", "real", ",", "imag", ",", "pha_fpi", ",", "real_fpi", ",", "imag_fpi", ")", ")", "titles", "=", "[", "'Magnitude'", ",", "'Coverage'", ",", "'Phase'", ",", "'Real Part'", ",", "'Imaginary Part'", ",", "'FPI Phase'", ",", "'FPI Real Part'", ",", "'FPI Imaginary Part'", "]", "unites", "=", "[", "magunit", ",", "'cov'", ",", "'phi'", ",", "'log_real'", ",", "'log_imag'", ",", "'phi'", ",", "'log_real'", ",", "'log_imag'", "]", "vmins", "=", "[", "options", ".", "mag_vmin", ",", "options", ".", "cov_vmin", ",", "options", ".", "pha_vmin", ",", "options", ".", "real_vmin", ",", "options", ".", "imag_vmin", ",", "options", ".", "pha_vmin", ",", "options", ".", "real_vmin", ",", "options", ".", "imag_vmin", "]", "vmaxs", "=", "[", "options", ".", "mag_vmax", ",", "options", ".", "cov_vmax", ",", "options", ".", "pha_vmax", ",", "options", ".", "real_vmax", ",", "options", ".", "imag_vmax", ",", "options", ".", "pha_vmax", ",", "options", ".", "real_vmax", ",", "options", ".", "imag_vmax", "]", "cmaps", "=", "[", "'jet'", ",", "'GnBu'", ",", "'jet_r'", ",", "'jet_r'", ",", "'plasma_r'", ",", "'plasma'", ",", "'jet_r'", ",", "'plasma_r'", "]", "saves", "=", "[", "'rho'", ",", "'cov'", ",", "'phi'", ",", "'real'", ",", "'imag'", ",", "'fpi_phi'", ",", "'fpi_real'", ",", "'fpi_imag'", "]", "else", ":", "if", "options", ".", "cmaglin", ":", "mag", "=", "np", ".", "power", "(", "10", ",", "mag", ")", "magunit", "=", "'rho'", "data", "=", "np", ".", "column_stack", "(", "(", "mag", ",", "cov", ",", "pha", ",", "real", ",", "imag", ")", ")", "titles", "=", "[", "'Magnitude'", ",", "'Coverage'", ",", "'Phase'", ",", "'Real Part'", ",", "'Imaginary Part'", "]", "unites", "=", "[", "magunit", ",", "'cov'", ",", "'phi'", ",", "'log_real'", ",", "'log_imag'", "]", "vmins", "=", "[", "options", ".", "mag_vmin", ",", "options", ".", "cov_vmin", ",", "options", ".", "pha_vmin", ",", "options", ".", "real_vmin", ",", "options", ".", "imag_vmin", "]", "vmaxs", "=", "[", "options", ".", "mag_vmax", ",", "options", ".", "cov_vmax", ",", "options", ".", "pha_vmax", ",", "options", ".", "real_vmax", ",", "options", ".", "imag_vmax", "]", "cmaps", "=", "[", "'jet'", ",", "'GnBu'", ",", "'jet_r'", ",", "'jet_r'", ",", "'plasma_r'", "]", "saves", "=", "[", "'rho'", ",", "'cov'", ",", "'phi'", ",", "'real'", ",", "'imag'", "]", "else", ":", "data", "=", "np", ".", "column_stack", "(", "(", "mag", ",", "cov", ")", ")", "titles", "=", "[", "'Magnitude'", ",", "'Coverage'", "]", "unites", "=", "[", "magunit", ",", "'cov'", "]", "vmins", "=", "[", "options", ".", "mag_vmin", ",", "options", ".", "cov_vmin", "]", "vmaxs", "=", "[", "options", ".", "mag_vmax", ",", "options", ".", "cov_vmax", "]", "cmaps", "=", "[", "'jet'", ",", "'GnBu'", "]", "saves", "=", "[", "'rho'", ",", "'cov'", "]", "try", ":", "mod_rho", "=", "np", ".", "genfromtxt", "(", "'rho/rho.dat'", ",", "skip_header", "=", "1", ",", "usecols", "=", "(", "[", "0", "]", ")", ")", "mod_pha", "=", "np", ".", "genfromtxt", "(", "'rho/rho.dat'", ",", "skip_header", "=", "1", ",", "usecols", "=", "(", "[", "1", "]", ")", ")", "data", "=", "np", ".", "column_stack", "(", "(", "data", ",", "mod_rho", ",", "mod_pha", ")", ")", "titles", ".", "append", "(", "'Model'", ")", "titles", ".", "append", "(", "'Model'", ")", "unites", ".", "append", "(", "'rho'", ")", "unites", ".", "append", "(", "'phi'", ")", "vmins", ".", "append", "(", "options", ".", "mag_vmin", ")", "vmins", ".", "append", "(", "options", ".", "pha_vmin", ")", "vmaxs", ".", "append", "(", "options", ".", "mag_vmax", ")", "vmaxs", ".", "append", "(", "options", ".", "pha_vmax", ")", "cmaps", ".", "append", "(", "'jet'", ")", "cmaps", ".", "append", "(", "'plasma'", ")", "saves", ".", "append", "(", "'rhomod'", ")", "saves", ".", "append", "(", "'phamod'", ")", "except", ":", "pass", "for", "datum", ",", "title", ",", "unit", ",", "vmin", ",", "vmax", ",", "cm", ",", "save", "in", "zip", "(", "np", ".", "transpose", "(", "data", ")", ",", "titles", ",", "unites", ",", "vmins", ",", "vmaxs", ",", "cmaps", ",", "saves", ")", ":", "sizex", ",", "sizez", "=", "getfigsize", "(", "plotman", ")", "f", ",", "ax", "=", "plt", ".", "subplots", "(", "1", ",", "figsize", "=", "(", "sizex", ",", "sizez", ")", ")", "cid", "=", "plotman", ".", "parman", ".", "add_data", "(", "datum", ")", "# handle options", "cblabel", "=", "units", ".", "get_label", "(", "unit", ")", "if", "options", ".", "title", "is", "not", "None", ":", "title", "=", "options", ".", "title", "zlabel", "=", "'z ['", "+", "options", ".", "unit", "+", "']'", "xlabel", "=", "'x ['", "+", "options", ".", "unit", "+", "']'", "xmin", ",", "xmax", ",", "zmin", ",", "zmax", ",", "vmin", ",", "vmax", "=", "check_minmax", "(", "plotman", ",", "cid", ",", "options", ".", "xmin", ",", "options", ".", "xmax", ",", "options", ".", "zmin", ",", "options", ".", "zmax", ",", "vmin", ",", "vmax", ")", "# plot", "cmap", "=", "mpl_cm", ".", "get_cmap", "(", "cm", ")", "fig", ",", "ax", ",", "cnorm", ",", "cmap", ",", "cb", ",", "scalarMap", "=", "plotman", ".", "plot_elements_to_ax", "(", "cid", "=", "cid", ",", "cid_alpha", "=", "alpha", ",", "ax", "=", "ax", ",", "xmin", "=", "xmin", ",", "xmax", "=", "xmax", ",", "zmin", "=", "zmin", ",", "zmax", "=", "zmax", ",", "cblabel", "=", "cblabel", ",", "title", "=", "title", ",", "zlabel", "=", "zlabel", ",", "xlabel", "=", "xlabel", ",", "plot_colorbar", "=", "True", ",", "cmap_name", "=", "cm", ",", "over", "=", "cmap", "(", "1.0", ")", ",", "under", "=", "cmap", "(", "0.0", ")", ",", "no_elecs", "=", "options", ".", "no_elecs", ",", "cbmin", "=", "vmin", ",", "cbmax", "=", "vmax", ",", ")", "f", ".", "tight_layout", "(", ")", "f", ".", "savefig", "(", "save", "+", "'.png'", ",", "dpi", "=", "300", ")" ]
Plot the data of the tomodir in individual plots.
[ "Plot", "the", "data", "of", "the", "tomodir", "in", "individual", "plots", "." ]
python
train
40.042373
RJT1990/pyflux
pyflux/garch/segarch.py
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/garch/segarch.py#L219-L272
def _mean_prediction(self, lmda, Y, scores, h, t_params): """ Creates a h-step ahead mean prediction Parameters ---------- lmda : np.array The past predicted values Y : np.array The past data scores : np.array The past scores h : int How many steps ahead for the prediction t_params : np.array A vector of (transformed) latent variables Returns ---------- h-length vector of mean predictions """ # Create arrays to iteratre over lmda_exp = lmda.copy() scores_exp = scores.copy() Y_exp = Y.copy() m1 = (np.sqrt(t_params[-2])*sp.gamma((t_params[-2]-1.0)/2.0))/(np.sqrt(np.pi)*sp.gamma(t_params[-2]/2.0)) temp_theta = t_params[-1] + (t_params[-3] - (1.0/t_params[-3]))*np.exp(lmda_exp[-1]/2.0)*m1 # Loop over h time periods for t in range(0,h): new_value = t_params[0] if self.p != 0: for j in range(1,self.p+1): new_value += t_params[j]*lmda_exp[-j] if self.q != 0: for k in range(1,self.q+1): new_value += t_params[k+self.p]*scores_exp[-k] if self.leverage is True: m1 = (np.sqrt(t_params[-2])*sp.gamma((t_params[-2]-1.0)/2.0))/(np.sqrt(np.pi)*sp.gamma(t_params[-2]/2.0)) new_value += t_params[1+self.p+self.q]*np.sign(-(Y_exp[-1]-temp_theta))*(scores_exp[-1]+1) temp_theta = t_params[-1] + (t_params[-3] - (1.0/t_params[-3]))*np.exp(new_value/2.0)*m1 lmda_exp = np.append(lmda_exp,[new_value]) # For indexing consistency scores_exp = np.append(scores_exp,[0]) # expectation of score is zero Y_exp = np.append(Y_exp,[temp_theta]) return lmda_exp
[ "def", "_mean_prediction", "(", "self", ",", "lmda", ",", "Y", ",", "scores", ",", "h", ",", "t_params", ")", ":", "# Create arrays to iteratre over", "lmda_exp", "=", "lmda", ".", "copy", "(", ")", "scores_exp", "=", "scores", ".", "copy", "(", ")", "Y_exp", "=", "Y", ".", "copy", "(", ")", "m1", "=", "(", "np", ".", "sqrt", "(", "t_params", "[", "-", "2", "]", ")", "*", "sp", ".", "gamma", "(", "(", "t_params", "[", "-", "2", "]", "-", "1.0", ")", "/", "2.0", ")", ")", "/", "(", "np", ".", "sqrt", "(", "np", ".", "pi", ")", "*", "sp", ".", "gamma", "(", "t_params", "[", "-", "2", "]", "/", "2.0", ")", ")", "temp_theta", "=", "t_params", "[", "-", "1", "]", "+", "(", "t_params", "[", "-", "3", "]", "-", "(", "1.0", "/", "t_params", "[", "-", "3", "]", ")", ")", "*", "np", ".", "exp", "(", "lmda_exp", "[", "-", "1", "]", "/", "2.0", ")", "*", "m1", "# Loop over h time periods ", "for", "t", "in", "range", "(", "0", ",", "h", ")", ":", "new_value", "=", "t_params", "[", "0", "]", "if", "self", ".", "p", "!=", "0", ":", "for", "j", "in", "range", "(", "1", ",", "self", ".", "p", "+", "1", ")", ":", "new_value", "+=", "t_params", "[", "j", "]", "*", "lmda_exp", "[", "-", "j", "]", "if", "self", ".", "q", "!=", "0", ":", "for", "k", "in", "range", "(", "1", ",", "self", ".", "q", "+", "1", ")", ":", "new_value", "+=", "t_params", "[", "k", "+", "self", ".", "p", "]", "*", "scores_exp", "[", "-", "k", "]", "if", "self", ".", "leverage", "is", "True", ":", "m1", "=", "(", "np", ".", "sqrt", "(", "t_params", "[", "-", "2", "]", ")", "*", "sp", ".", "gamma", "(", "(", "t_params", "[", "-", "2", "]", "-", "1.0", ")", "/", "2.0", ")", ")", "/", "(", "np", ".", "sqrt", "(", "np", ".", "pi", ")", "*", "sp", ".", "gamma", "(", "t_params", "[", "-", "2", "]", "/", "2.0", ")", ")", "new_value", "+=", "t_params", "[", "1", "+", "self", ".", "p", "+", "self", ".", "q", "]", "*", "np", ".", "sign", "(", "-", "(", "Y_exp", "[", "-", "1", "]", "-", "temp_theta", ")", ")", "*", "(", "scores_exp", "[", "-", "1", "]", "+", "1", ")", "temp_theta", "=", "t_params", "[", "-", "1", "]", "+", "(", "t_params", "[", "-", "3", "]", "-", "(", "1.0", "/", "t_params", "[", "-", "3", "]", ")", ")", "*", "np", ".", "exp", "(", "new_value", "/", "2.0", ")", "*", "m1", "lmda_exp", "=", "np", ".", "append", "(", "lmda_exp", ",", "[", "new_value", "]", ")", "# For indexing consistency", "scores_exp", "=", "np", ".", "append", "(", "scores_exp", ",", "[", "0", "]", ")", "# expectation of score is zero", "Y_exp", "=", "np", ".", "append", "(", "Y_exp", ",", "[", "temp_theta", "]", ")", "return", "lmda_exp" ]
Creates a h-step ahead mean prediction Parameters ---------- lmda : np.array The past predicted values Y : np.array The past data scores : np.array The past scores h : int How many steps ahead for the prediction t_params : np.array A vector of (transformed) latent variables Returns ---------- h-length vector of mean predictions
[ "Creates", "a", "h", "-", "step", "ahead", "mean", "prediction" ]
python
train
34.203704
tantale/deprecated
docs/source/tutorial/v3/liberty.py
https://github.com/tantale/deprecated/blob/3dc742c571de7cebbbdaaf4c554f2f36fc61b3db/docs/source/tutorial/v3/liberty.py#L18-L25
def better_print(self, printer=None): """ Print the value using a *printer*. :param printer: Callable used to print the value, by default: :func:`pprint.pprint` """ printer = printer or pprint.pprint printer(self.value)
[ "def", "better_print", "(", "self", ",", "printer", "=", "None", ")", ":", "printer", "=", "printer", "or", "pprint", ".", "pprint", "printer", "(", "self", ".", "value", ")" ]
Print the value using a *printer*. :param printer: Callable used to print the value, by default: :func:`pprint.pprint`
[ "Print", "the", "value", "using", "a", "*", "printer", "*", "." ]
python
train
32.625
baguette-io/baguette-messaging
farine/connectors/sql/__init__.py
https://github.com/baguette-io/baguette-messaging/blob/8d1c4707ea7eace8617fed2d97df2fcc9d0cdee1/farine/connectors/sql/__init__.py#L19-L26
def to_json(self, extras=None): """ Convert a model into a json using the playhouse shortcut. """ extras = extras or {} to_dict = model_to_dict(self) to_dict.update(extras) return json.dumps(to_dict, cls=sel.serializers.JsonEncoder)
[ "def", "to_json", "(", "self", ",", "extras", "=", "None", ")", ":", "extras", "=", "extras", "or", "{", "}", "to_dict", "=", "model_to_dict", "(", "self", ")", "to_dict", ".", "update", "(", "extras", ")", "return", "json", ".", "dumps", "(", "to_dict", ",", "cls", "=", "sel", ".", "serializers", ".", "JsonEncoder", ")" ]
Convert a model into a json using the playhouse shortcut.
[ "Convert", "a", "model", "into", "a", "json", "using", "the", "playhouse", "shortcut", "." ]
python
train
35.125
inveniosoftware/invenio-pidrelations
invenio_pidrelations/models.py
https://github.com/inveniosoftware/invenio-pidrelations/blob/a49f3725cf595b663c5b04814280b231f88bc333/invenio_pidrelations/models.py#L106-L122
def create(cls, parent, child, relation_type, index=None): """Create a PID relation for given parent and child.""" try: with db.session.begin_nested(): obj = cls(parent_id=parent.id, child_id=child.id, relation_type=relation_type, index=index) db.session.add(obj) except IntegrityError: raise Exception("PID Relation already exists.") # msg = "PIDRelation already exists: " \ # "{0} -> {1} ({2})".format( # parent_pid, child_pid, relation_type) # logger.exception(msg) # raise Exception(msg) return obj
[ "def", "create", "(", "cls", ",", "parent", ",", "child", ",", "relation_type", ",", "index", "=", "None", ")", ":", "try", ":", "with", "db", ".", "session", ".", "begin_nested", "(", ")", ":", "obj", "=", "cls", "(", "parent_id", "=", "parent", ".", "id", ",", "child_id", "=", "child", ".", "id", ",", "relation_type", "=", "relation_type", ",", "index", "=", "index", ")", "db", ".", "session", ".", "add", "(", "obj", ")", "except", "IntegrityError", ":", "raise", "Exception", "(", "\"PID Relation already exists.\"", ")", "# msg = \"PIDRelation already exists: \" \\", "# \"{0} -> {1} ({2})\".format(", "# parent_pid, child_pid, relation_type)", "# logger.exception(msg)", "# raise Exception(msg)", "return", "obj" ]
Create a PID relation for given parent and child.
[ "Create", "a", "PID", "relation", "for", "given", "parent", "and", "child", "." ]
python
train
42.705882
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/api/admissionregistration_v1beta1_api.py
https://github.com/tomplus/kubernetes_asyncio/blob/f9ab15317ec921409714c7afef11aeb0f579985d/kubernetes_asyncio/client/api/admissionregistration_v1beta1_api.py#L1307-L1330
def read_mutating_webhook_configuration(self, name, **kwargs): # noqa: E501 """read_mutating_webhook_configuration # noqa: E501 read the specified MutatingWebhookConfiguration # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_mutating_webhook_configuration(name, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the MutatingWebhookConfiguration (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: V1beta1MutatingWebhookConfiguration If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_mutating_webhook_configuration_with_http_info(name, **kwargs) # noqa: E501 else: (data) = self.read_mutating_webhook_configuration_with_http_info(name, **kwargs) # noqa: E501 return data
[ "def", "read_mutating_webhook_configuration", "(", "self", ",", "name", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "read_mutating_webhook_configuration_with_http_info", "(", "name", ",", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "read_mutating_webhook_configuration_with_http_info", "(", "name", ",", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
read_mutating_webhook_configuration # noqa: E501 read the specified MutatingWebhookConfiguration # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_mutating_webhook_configuration(name, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the MutatingWebhookConfiguration (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: V1beta1MutatingWebhookConfiguration If the method is called asynchronously, returns the request thread.
[ "read_mutating_webhook_configuration", "#", "noqa", ":", "E501" ]
python
train
56.375
collectiveacuity/labPack
labpack/location/find.py
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/location/find.py#L104-L130
def get_locations(self): ''' a method to retrieve all the locations tracked by the model :return: dictionary with location id keys NOTE: results are added to self.locations property { 'location.id': { ' } } ''' import requests url = self.endpoint + '/locations' params = { 'group': self.group_name } response = requests.get(url, params=params) response_details = response.json() if 'locations' in response_details.keys(): self.locations = response_details['locations'] return self.locations
[ "def", "get_locations", "(", "self", ")", ":", "import", "requests", "url", "=", "self", ".", "endpoint", "+", "'/locations'", "params", "=", "{", "'group'", ":", "self", ".", "group_name", "}", "response", "=", "requests", ".", "get", "(", "url", ",", "params", "=", "params", ")", "response_details", "=", "response", ".", "json", "(", ")", "if", "'locations'", "in", "response_details", ".", "keys", "(", ")", ":", "self", ".", "locations", "=", "response_details", "[", "'locations'", "]", "return", "self", ".", "locations" ]
a method to retrieve all the locations tracked by the model :return: dictionary with location id keys NOTE: results are added to self.locations property { 'location.id': { ' } }
[ "a", "method", "to", "retrieve", "all", "the", "locations", "tracked", "by", "the", "model", ":", "return", ":", "dictionary", "with", "location", "id", "keys", "NOTE", ":", "results", "are", "added", "to", "self", ".", "locations", "property", "{", "location", ".", "id", ":", "{", "}", "}" ]
python
train
26.222222
kyper-data/python-highcharts
highcharts/highstock/highstock.py
https://github.com/kyper-data/python-highcharts/blob/a4c488ae5c2e125616efad5a722f3dfd8a9bc450/highcharts/highstock/highstock.py#L388-L396
def save_file(self, filename = 'StockChart'): """ save htmlcontent as .html file """ filename = filename + '.html' with open(filename, 'w') as f: #self.buildhtml() f.write(self.htmlcontent) f.closed
[ "def", "save_file", "(", "self", ",", "filename", "=", "'StockChart'", ")", ":", "filename", "=", "filename", "+", "'.html'", "with", "open", "(", "filename", ",", "'w'", ")", "as", "f", ":", "#self.buildhtml()", "f", ".", "write", "(", "self", ".", "htmlcontent", ")", "f", ".", "closed" ]
save htmlcontent as .html file
[ "save", "htmlcontent", "as", ".", "html", "file" ]
python
train
29.333333
bast/flanders
cmake/autocmake/configure.py
https://github.com/bast/flanders/blob/792f9eed8511cb553e67a25b6c5ce60fd6ae97bc/cmake/autocmake/configure.py#L36-L52
def setup_build_path(build_path): """ Create build directory. If this already exists, print informative error message and quit. """ if os.path.isdir(build_path): fname = os.path.join(build_path, 'CMakeCache.txt') if os.path.exists(fname): sys.stderr.write('aborting setup\n') sys.stderr.write( 'build directory {0} which contains CMakeCache.txt already exists\n'. format(build_path)) sys.stderr.write( 'remove the build directory and then rerun setup\n') sys.exit(1) else: os.makedirs(build_path, 0o755)
[ "def", "setup_build_path", "(", "build_path", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "build_path", ")", ":", "fname", "=", "os", ".", "path", ".", "join", "(", "build_path", ",", "'CMakeCache.txt'", ")", "if", "os", ".", "path", ".", "exists", "(", "fname", ")", ":", "sys", ".", "stderr", ".", "write", "(", "'aborting setup\\n'", ")", "sys", ".", "stderr", ".", "write", "(", "'build directory {0} which contains CMakeCache.txt already exists\\n'", ".", "format", "(", "build_path", ")", ")", "sys", ".", "stderr", ".", "write", "(", "'remove the build directory and then rerun setup\\n'", ")", "sys", ".", "exit", "(", "1", ")", "else", ":", "os", ".", "makedirs", "(", "build_path", ",", "0o755", ")" ]
Create build directory. If this already exists, print informative error message and quit.
[ "Create", "build", "directory", ".", "If", "this", "already", "exists", "print", "informative", "error", "message", "and", "quit", "." ]
python
train
37.117647
zooniverse/panoptes-python-client
panoptes_client/panoptes.py
https://github.com/zooniverse/panoptes-python-client/blob/138d93cb03378501a8d349428e381ad73f928680/panoptes_client/panoptes.py#L657-L672
def where(cls, **kwargs): """ Returns a generator which yields instances matching the given query arguments. For example, this would yield all :py:class:`.Project`:: Project.where() And this would yield all launch approved :py:class:`.Project`:: Project.where(launch_approved=True) """ _id = kwargs.pop('id', '') return cls.paginated_results(*cls.http_get(_id, params=kwargs))
[ "def", "where", "(", "cls", ",", "*", "*", "kwargs", ")", ":", "_id", "=", "kwargs", ".", "pop", "(", "'id'", ",", "''", ")", "return", "cls", ".", "paginated_results", "(", "*", "cls", ".", "http_get", "(", "_id", ",", "params", "=", "kwargs", ")", ")" ]
Returns a generator which yields instances matching the given query arguments. For example, this would yield all :py:class:`.Project`:: Project.where() And this would yield all launch approved :py:class:`.Project`:: Project.where(launch_approved=True)
[ "Returns", "a", "generator", "which", "yields", "instances", "matching", "the", "given", "query", "arguments", "." ]
python
train
28.375
vpelletier/python-libusb1
usb1/libusb1.py
https://github.com/vpelletier/python-libusb1/blob/740c9778e28523e4ec3543415d95f5400ae0fa24/usb1/libusb1.py#L1083-L1095
def get_iso_packet_buffer_list(transfer_p): """ Python-specific helper extracting a list of iso packet buffers. """ transfer = transfer_p.contents offset = 0 result = [] append = result.append for iso_transfer in _get_iso_packet_list(transfer): length = iso_transfer.length append(_get_iso_packet_buffer(transfer, offset, length)) offset += length return result
[ "def", "get_iso_packet_buffer_list", "(", "transfer_p", ")", ":", "transfer", "=", "transfer_p", ".", "contents", "offset", "=", "0", "result", "=", "[", "]", "append", "=", "result", ".", "append", "for", "iso_transfer", "in", "_get_iso_packet_list", "(", "transfer", ")", ":", "length", "=", "iso_transfer", ".", "length", "append", "(", "_get_iso_packet_buffer", "(", "transfer", ",", "offset", ",", "length", ")", ")", "offset", "+=", "length", "return", "result" ]
Python-specific helper extracting a list of iso packet buffers.
[ "Python", "-", "specific", "helper", "extracting", "a", "list", "of", "iso", "packet", "buffers", "." ]
python
train
31.461538
cedricbonhomme/Stegano
stegano/lsbset/generators.py
https://github.com/cedricbonhomme/Stegano/blob/502e6303791d348e479290c22108551ba3be254f/stegano/lsbset/generators.py#L110-L118
def ackermann_naive(m: int, n: int) -> int: """Ackermann number. """ if m == 0: return n + 1 elif n == 0: return ackermann(m - 1, 1) else: return ackermann(m - 1, ackermann(m, n - 1))
[ "def", "ackermann_naive", "(", "m", ":", "int", ",", "n", ":", "int", ")", "->", "int", ":", "if", "m", "==", "0", ":", "return", "n", "+", "1", "elif", "n", "==", "0", ":", "return", "ackermann", "(", "m", "-", "1", ",", "1", ")", "else", ":", "return", "ackermann", "(", "m", "-", "1", ",", "ackermann", "(", "m", ",", "n", "-", "1", ")", ")" ]
Ackermann number.
[ "Ackermann", "number", "." ]
python
train
24.333333
pydata/xarray
xarray/core/utils.py
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/utils.py#L258-L265
def to_0d_array(value: Any) -> np.ndarray: """Given a value, wrap it in a 0-D numpy.ndarray. """ if np.isscalar(value) or (isinstance(value, np.ndarray) and value.ndim == 0): return np.array(value) else: return to_0d_object_array(value)
[ "def", "to_0d_array", "(", "value", ":", "Any", ")", "->", "np", ".", "ndarray", ":", "if", "np", ".", "isscalar", "(", "value", ")", "or", "(", "isinstance", "(", "value", ",", "np", ".", "ndarray", ")", "and", "value", ".", "ndim", "==", "0", ")", ":", "return", "np", ".", "array", "(", "value", ")", "else", ":", "return", "to_0d_object_array", "(", "value", ")" ]
Given a value, wrap it in a 0-D numpy.ndarray.
[ "Given", "a", "value", "wrap", "it", "in", "a", "0", "-", "D", "numpy", ".", "ndarray", "." ]
python
train
36.375
joelfrederico/SciSalt
scisalt/matplotlib/pcolor_axes.py
https://github.com/joelfrederico/SciSalt/blob/7bf57c49c7dde0a8b0aa337fbd2fbd527ce7a67f/scisalt/matplotlib/pcolor_axes.py#L11-L30
def pcolor_axes(array, px_to_units=px_to_units): """ Return axes :code:`x, y` for *array* to be used with :func:`matplotlib.pyplot.color`. *px_to_units* is a function to convert pixels to units. By default, returns pixels. """ # ====================================== # Coords need to be +1 larger than array # ====================================== x_size = array.shape[0]+1 y_size = array.shape[1]+1 x = _np.empty((x_size, y_size)) y = _np.empty((x_size, y_size)) for i in range(x_size): for j in range(y_size): x[i, j], y[i, j] = px_to_units(i-0.5, j-0.5) return x, y
[ "def", "pcolor_axes", "(", "array", ",", "px_to_units", "=", "px_to_units", ")", ":", "# ======================================", "# Coords need to be +1 larger than array", "# ======================================", "x_size", "=", "array", ".", "shape", "[", "0", "]", "+", "1", "y_size", "=", "array", ".", "shape", "[", "1", "]", "+", "1", "x", "=", "_np", ".", "empty", "(", "(", "x_size", ",", "y_size", ")", ")", "y", "=", "_np", ".", "empty", "(", "(", "x_size", ",", "y_size", ")", ")", "for", "i", "in", "range", "(", "x_size", ")", ":", "for", "j", "in", "range", "(", "y_size", ")", ":", "x", "[", "i", ",", "j", "]", ",", "y", "[", "i", ",", "j", "]", "=", "px_to_units", "(", "i", "-", "0.5", ",", "j", "-", "0.5", ")", "return", "x", ",", "y" ]
Return axes :code:`x, y` for *array* to be used with :func:`matplotlib.pyplot.color`. *px_to_units* is a function to convert pixels to units. By default, returns pixels.
[ "Return", "axes", ":", "code", ":", "x", "y", "for", "*", "array", "*", "to", "be", "used", "with", ":", "func", ":", "matplotlib", ".", "pyplot", ".", "color", "." ]
python
valid
31.35
jamieleshaw/lurklib
lurklib/connection.py
https://github.com/jamieleshaw/lurklib/blob/a861f35d880140422103dd78ec3239814e85fd7e/lurklib/connection.py#L280-L304
def umode(self, nick, modes=''): """ Sets/gets user modes. Required arguments: * nick - Nick to set/get user modes for. Optional arguments: * modes='' - Sets these user modes on a nick. """ with self.lock: if not modes: self.send('MODE %s' % nick) if self.readable(): msg = self._recv(expected_replies=('221',)) if msg[0] == '221': modes = msg[2].replace('+', '').replace(':', '', 1) return modes self.send('MODE %s %s' % (nick, modes)) if self.readable(): msg = self._recv(expected_replies=('MODE',)) if msg[0] == 'MODE': if not self.hide_called_events: self.stepback() return msg[2].replace(':', '', 1)
[ "def", "umode", "(", "self", ",", "nick", ",", "modes", "=", "''", ")", ":", "with", "self", ".", "lock", ":", "if", "not", "modes", ":", "self", ".", "send", "(", "'MODE %s'", "%", "nick", ")", "if", "self", ".", "readable", "(", ")", ":", "msg", "=", "self", ".", "_recv", "(", "expected_replies", "=", "(", "'221'", ",", ")", ")", "if", "msg", "[", "0", "]", "==", "'221'", ":", "modes", "=", "msg", "[", "2", "]", ".", "replace", "(", "'+'", ",", "''", ")", ".", "replace", "(", "':'", ",", "''", ",", "1", ")", "return", "modes", "self", ".", "send", "(", "'MODE %s %s'", "%", "(", "nick", ",", "modes", ")", ")", "if", "self", ".", "readable", "(", ")", ":", "msg", "=", "self", ".", "_recv", "(", "expected_replies", "=", "(", "'MODE'", ",", ")", ")", "if", "msg", "[", "0", "]", "==", "'MODE'", ":", "if", "not", "self", ".", "hide_called_events", ":", "self", ".", "stepback", "(", ")", "return", "msg", "[", "2", "]", ".", "replace", "(", "':'", ",", "''", ",", "1", ")" ]
Sets/gets user modes. Required arguments: * nick - Nick to set/get user modes for. Optional arguments: * modes='' - Sets these user modes on a nick.
[ "Sets", "/", "gets", "user", "modes", ".", "Required", "arguments", ":", "*", "nick", "-", "Nick", "to", "set", "/", "get", "user", "modes", "for", ".", "Optional", "arguments", ":", "*", "modes", "=", "-", "Sets", "these", "user", "modes", "on", "a", "nick", "." ]
python
train
35.6
alefnula/tea
tea/console/color.py
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/console/color.py#L286-L308
def _colorize_single_line(line, regexp, color_def): """Print single line to console with ability to colorize parts of it.""" match = regexp.match(line) groupdict = match.groupdict() groups = match.groups() if not groupdict: # no named groups, just colorize whole line color = color_def[0] dark = color_def[1] cprint("%s\n" % line, color, fg_dark=dark) else: rev_groups = {v: k for k, v in groupdict.items()} for part in groups: if part in rev_groups and rev_groups[part] in color_def: group_name = rev_groups[part] cprint( part, color_def[group_name][0], fg_dark=color_def[group_name][1], ) else: cprint(part) cprint("\n")
[ "def", "_colorize_single_line", "(", "line", ",", "regexp", ",", "color_def", ")", ":", "match", "=", "regexp", ".", "match", "(", "line", ")", "groupdict", "=", "match", ".", "groupdict", "(", ")", "groups", "=", "match", ".", "groups", "(", ")", "if", "not", "groupdict", ":", "# no named groups, just colorize whole line\r", "color", "=", "color_def", "[", "0", "]", "dark", "=", "color_def", "[", "1", "]", "cprint", "(", "\"%s\\n\"", "%", "line", ",", "color", ",", "fg_dark", "=", "dark", ")", "else", ":", "rev_groups", "=", "{", "v", ":", "k", "for", "k", ",", "v", "in", "groupdict", ".", "items", "(", ")", "}", "for", "part", "in", "groups", ":", "if", "part", "in", "rev_groups", "and", "rev_groups", "[", "part", "]", "in", "color_def", ":", "group_name", "=", "rev_groups", "[", "part", "]", "cprint", "(", "part", ",", "color_def", "[", "group_name", "]", "[", "0", "]", ",", "fg_dark", "=", "color_def", "[", "group_name", "]", "[", "1", "]", ",", ")", "else", ":", "cprint", "(", "part", ")", "cprint", "(", "\"\\n\"", ")" ]
Print single line to console with ability to colorize parts of it.
[ "Print", "single", "line", "to", "console", "with", "ability", "to", "colorize", "parts", "of", "it", "." ]
python
train
36.956522
blockstack/blockstack-core
blockstack/lib/nameset/db.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/nameset/db.py#L2632-L2673
def namedb_get_name_preorder( db, preorder_hash, current_block ): """ Get a (singular) name preorder record outstanding at the given block, given the preorder hash. NOTE: returns expired preorders. Return the preorder record on success. Return None if not found. """ select_query = "SELECT * FROM preorders WHERE preorder_hash = ? AND op = ? AND block_number < ?;" args = (preorder_hash, NAME_PREORDER, current_block + NAME_PREORDER_EXPIRE) cur = db.cursor() preorder_rows = namedb_query_execute( cur, select_query, args ) preorder_row = preorder_rows.fetchone() if preorder_row is None: # no such preorder return None preorder_rec = {} preorder_rec.update( preorder_row ) unexpired_query, unexpired_args = namedb_select_where_unexpired_names( current_block ) # make sure that the name doesn't already exist select_query = "SELECT name_records.preorder_hash " + \ "FROM name_records JOIN namespaces ON name_records.namespace_id = namespaces.namespace_id " + \ "WHERE name_records.preorder_hash = ? AND " + \ unexpired_query + ";" args = (preorder_hash,) + unexpired_args cur = db.cursor() nm_rows = namedb_query_execute( cur, select_query, args ) nm_row = nm_rows.fetchone() if nm_row is not None: # name with this preorder exists return None return preorder_rec
[ "def", "namedb_get_name_preorder", "(", "db", ",", "preorder_hash", ",", "current_block", ")", ":", "select_query", "=", "\"SELECT * FROM preorders WHERE preorder_hash = ? AND op = ? AND block_number < ?;\"", "args", "=", "(", "preorder_hash", ",", "NAME_PREORDER", ",", "current_block", "+", "NAME_PREORDER_EXPIRE", ")", "cur", "=", "db", ".", "cursor", "(", ")", "preorder_rows", "=", "namedb_query_execute", "(", "cur", ",", "select_query", ",", "args", ")", "preorder_row", "=", "preorder_rows", ".", "fetchone", "(", ")", "if", "preorder_row", "is", "None", ":", "# no such preorder ", "return", "None", "preorder_rec", "=", "{", "}", "preorder_rec", ".", "update", "(", "preorder_row", ")", "unexpired_query", ",", "unexpired_args", "=", "namedb_select_where_unexpired_names", "(", "current_block", ")", "# make sure that the name doesn't already exist ", "select_query", "=", "\"SELECT name_records.preorder_hash \"", "+", "\"FROM name_records JOIN namespaces ON name_records.namespace_id = namespaces.namespace_id \"", "+", "\"WHERE name_records.preorder_hash = ? AND \"", "+", "unexpired_query", "+", "\";\"", "args", "=", "(", "preorder_hash", ",", ")", "+", "unexpired_args", "cur", "=", "db", ".", "cursor", "(", ")", "nm_rows", "=", "namedb_query_execute", "(", "cur", ",", "select_query", ",", "args", ")", "nm_row", "=", "nm_rows", ".", "fetchone", "(", ")", "if", "nm_row", "is", "not", "None", ":", "# name with this preorder exists ", "return", "None", "return", "preorder_rec" ]
Get a (singular) name preorder record outstanding at the given block, given the preorder hash. NOTE: returns expired preorders. Return the preorder record on success. Return None if not found.
[ "Get", "a", "(", "singular", ")", "name", "preorder", "record", "outstanding", "at", "the", "given", "block", "given", "the", "preorder", "hash", ".", "NOTE", ":", "returns", "expired", "preorders", ".", "Return", "the", "preorder", "record", "on", "success", ".", "Return", "None", "if", "not", "found", "." ]
python
train
34.047619
artefactual-labs/mets-reader-writer
metsrw/mets.py
https://github.com/artefactual-labs/mets-reader-writer/blob/d95939cabdfdc25cb1bf67df0c84bd0d6e6a73ff/metsrw/mets.py#L342-L385
def _parse_tree_structmap(self, tree, parent_elem, normative_parent_elem=None): """Recursively parse all the children of parent_elem, including amdSecs and dmdSecs. :param lxml._ElementTree tree: encodes the entire METS file. :param lxml._Element parent_elem: the element whose children we are parsing. :param lxml._Element normative_parent_elem: the normative counterpart of ``parent_elem`` taken from the logical structMap labelled "Normative Directory Structure". """ siblings = [] el_to_normative = self._get_el_to_normative(parent_elem, normative_parent_elem) for elem, normative_elem in el_to_normative.items(): if elem.tag != utils.lxmlns("mets") + "div": continue # Only handle divs, not fptrs entry_type = elem.get("TYPE") label = elem.get("LABEL") fptr_elems = elem.findall("mets:fptr", namespaces=utils.NAMESPACES) # Directories are walked recursively. Additionally, they may # contain direct fptrs. if entry_type.lower() == "directory": children = self._parse_tree_structmap( tree, elem, normative_parent_elem=normative_elem ) fs_entry = fsentry.FSEntry.dir(label, children) self._add_dmdsecs_to_fs_entry(elem, fs_entry, tree) siblings.append(fs_entry) for fptr_elem in fptr_elems: fptr = self._analyze_fptr(fptr_elem, tree, entry_type) fs_entry = fsentry.FSEntry.from_fptr( label=None, type_=u"Item", fptr=fptr ) self._add_amdsecs_to_fs_entry(fptr.amdids, fs_entry, tree) siblings.append(fs_entry) continue # Other types, e.g.: items, aips... if not len(fptr_elems): continue fptr = self._analyze_fptr(fptr_elems[0], tree, entry_type) fs_entry = fsentry.FSEntry.from_fptr(label, entry_type, fptr) self._add_dmdsecs_to_fs_entry(elem, fs_entry, tree) self._add_amdsecs_to_fs_entry(fptr.amdids, fs_entry, tree) siblings.append(fs_entry) return siblings
[ "def", "_parse_tree_structmap", "(", "self", ",", "tree", ",", "parent_elem", ",", "normative_parent_elem", "=", "None", ")", ":", "siblings", "=", "[", "]", "el_to_normative", "=", "self", ".", "_get_el_to_normative", "(", "parent_elem", ",", "normative_parent_elem", ")", "for", "elem", ",", "normative_elem", "in", "el_to_normative", ".", "items", "(", ")", ":", "if", "elem", ".", "tag", "!=", "utils", ".", "lxmlns", "(", "\"mets\"", ")", "+", "\"div\"", ":", "continue", "# Only handle divs, not fptrs", "entry_type", "=", "elem", ".", "get", "(", "\"TYPE\"", ")", "label", "=", "elem", ".", "get", "(", "\"LABEL\"", ")", "fptr_elems", "=", "elem", ".", "findall", "(", "\"mets:fptr\"", ",", "namespaces", "=", "utils", ".", "NAMESPACES", ")", "# Directories are walked recursively. Additionally, they may", "# contain direct fptrs.", "if", "entry_type", ".", "lower", "(", ")", "==", "\"directory\"", ":", "children", "=", "self", ".", "_parse_tree_structmap", "(", "tree", ",", "elem", ",", "normative_parent_elem", "=", "normative_elem", ")", "fs_entry", "=", "fsentry", ".", "FSEntry", ".", "dir", "(", "label", ",", "children", ")", "self", ".", "_add_dmdsecs_to_fs_entry", "(", "elem", ",", "fs_entry", ",", "tree", ")", "siblings", ".", "append", "(", "fs_entry", ")", "for", "fptr_elem", "in", "fptr_elems", ":", "fptr", "=", "self", ".", "_analyze_fptr", "(", "fptr_elem", ",", "tree", ",", "entry_type", ")", "fs_entry", "=", "fsentry", ".", "FSEntry", ".", "from_fptr", "(", "label", "=", "None", ",", "type_", "=", "u\"Item\"", ",", "fptr", "=", "fptr", ")", "self", ".", "_add_amdsecs_to_fs_entry", "(", "fptr", ".", "amdids", ",", "fs_entry", ",", "tree", ")", "siblings", ".", "append", "(", "fs_entry", ")", "continue", "# Other types, e.g.: items, aips...", "if", "not", "len", "(", "fptr_elems", ")", ":", "continue", "fptr", "=", "self", ".", "_analyze_fptr", "(", "fptr_elems", "[", "0", "]", ",", "tree", ",", "entry_type", ")", "fs_entry", "=", "fsentry", ".", "FSEntry", ".", "from_fptr", "(", "label", ",", "entry_type", ",", "fptr", ")", "self", ".", "_add_dmdsecs_to_fs_entry", "(", "elem", ",", "fs_entry", ",", "tree", ")", "self", ".", "_add_amdsecs_to_fs_entry", "(", "fptr", ".", "amdids", ",", "fs_entry", ",", "tree", ")", "siblings", ".", "append", "(", "fs_entry", ")", "return", "siblings" ]
Recursively parse all the children of parent_elem, including amdSecs and dmdSecs. :param lxml._ElementTree tree: encodes the entire METS file. :param lxml._Element parent_elem: the element whose children we are parsing. :param lxml._Element normative_parent_elem: the normative counterpart of ``parent_elem`` taken from the logical structMap labelled "Normative Directory Structure".
[ "Recursively", "parse", "all", "the", "children", "of", "parent_elem", "including", "amdSecs", "and", "dmdSecs", ".", ":", "param", "lxml", ".", "_ElementTree", "tree", ":", "encodes", "the", "entire", "METS", "file", ".", ":", "param", "lxml", ".", "_Element", "parent_elem", ":", "the", "element", "whose", "children", "we", "are", "parsing", ".", ":", "param", "lxml", ".", "_Element", "normative_parent_elem", ":", "the", "normative", "counterpart", "of", "parent_elem", "taken", "from", "the", "logical", "structMap", "labelled", "Normative", "Directory", "Structure", "." ]
python
train
52.090909
bcbio/bcbio-nextgen
bcbio/illumina/flowcell.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/illumina/flowcell.py#L39-L54
def get_fastq_dir(fc_dir): """Retrieve the fastq directory within Solexa flowcell output. """ full_goat_bc = glob.glob(os.path.join(fc_dir, "Data", "*Firecrest*", "Bustard*")) bustard_bc = glob.glob(os.path.join(fc_dir, "Data", "Intensities", "*Bustard*")) machine_bc = os.path.join(fc_dir, "Data", "Intensities", "BaseCalls") if os.path.exists(machine_bc): return os.path.join(machine_bc, "fastq") elif len(full_goat_bc) > 0: return os.path.join(full_goat_bc[0], "fastq") elif len(bustard_bc) > 0: return os.path.join(bustard_bc[0], "fastq") # otherwise assume we are in the fastq directory # XXX What other cases can we end up with here? else: return fc_dir
[ "def", "get_fastq_dir", "(", "fc_dir", ")", ":", "full_goat_bc", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "fc_dir", ",", "\"Data\"", ",", "\"*Firecrest*\"", ",", "\"Bustard*\"", ")", ")", "bustard_bc", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "fc_dir", ",", "\"Data\"", ",", "\"Intensities\"", ",", "\"*Bustard*\"", ")", ")", "machine_bc", "=", "os", ".", "path", ".", "join", "(", "fc_dir", ",", "\"Data\"", ",", "\"Intensities\"", ",", "\"BaseCalls\"", ")", "if", "os", ".", "path", ".", "exists", "(", "machine_bc", ")", ":", "return", "os", ".", "path", ".", "join", "(", "machine_bc", ",", "\"fastq\"", ")", "elif", "len", "(", "full_goat_bc", ")", ">", "0", ":", "return", "os", ".", "path", ".", "join", "(", "full_goat_bc", "[", "0", "]", ",", "\"fastq\"", ")", "elif", "len", "(", "bustard_bc", ")", ">", "0", ":", "return", "os", ".", "path", ".", "join", "(", "bustard_bc", "[", "0", "]", ",", "\"fastq\"", ")", "# otherwise assume we are in the fastq directory", "# XXX What other cases can we end up with here?", "else", ":", "return", "fc_dir" ]
Retrieve the fastq directory within Solexa flowcell output.
[ "Retrieve", "the", "fastq", "directory", "within", "Solexa", "flowcell", "output", "." ]
python
train
45
PmagPy/PmagPy
pmagpy/pmag.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L5150-L5165
def Dir_anis_corr(InDir, AniSpec): """ takes the 6 element 's' vector and the Dec,Inc 'InDir' data, performs simple anisotropy correction. returns corrected Dec, Inc """ Dir = np.zeros((3), 'f') Dir[0] = InDir[0] Dir[1] = InDir[1] Dir[2] = 1. chi, chi_inv = check_F(AniSpec) if chi[0][0] == 1.: return Dir # isotropic X = dir2cart(Dir) M = np.array(X) H = np.dot(M, chi_inv) return cart2dir(H)
[ "def", "Dir_anis_corr", "(", "InDir", ",", "AniSpec", ")", ":", "Dir", "=", "np", ".", "zeros", "(", "(", "3", ")", ",", "'f'", ")", "Dir", "[", "0", "]", "=", "InDir", "[", "0", "]", "Dir", "[", "1", "]", "=", "InDir", "[", "1", "]", "Dir", "[", "2", "]", "=", "1.", "chi", ",", "chi_inv", "=", "check_F", "(", "AniSpec", ")", "if", "chi", "[", "0", "]", "[", "0", "]", "==", "1.", ":", "return", "Dir", "# isotropic", "X", "=", "dir2cart", "(", "Dir", ")", "M", "=", "np", ".", "array", "(", "X", ")", "H", "=", "np", ".", "dot", "(", "M", ",", "chi_inv", ")", "return", "cart2dir", "(", "H", ")" ]
takes the 6 element 's' vector and the Dec,Inc 'InDir' data, performs simple anisotropy correction. returns corrected Dec, Inc
[ "takes", "the", "6", "element", "s", "vector", "and", "the", "Dec", "Inc", "InDir", "data", "performs", "simple", "anisotropy", "correction", ".", "returns", "corrected", "Dec", "Inc" ]
python
train
27.6875
OpenTreeOfLife/peyotl
peyotl/git_storage/git_shard.py
https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/git_storage/git_shard.py#L275-L287
def _read_master_branch_resource(self, fn, is_json=False): """This will force the current branch to master! """ with self._master_branch_repo_lock: ga = self._create_git_action_for_global_resource() with ga.lock(): ga.checkout_master() if os.path.exists(fn): if is_json: return read_as_json(fn) with codecs.open(fn, 'rU', encoding='utf-8') as f: ret = f.read() return ret return None
[ "def", "_read_master_branch_resource", "(", "self", ",", "fn", ",", "is_json", "=", "False", ")", ":", "with", "self", ".", "_master_branch_repo_lock", ":", "ga", "=", "self", ".", "_create_git_action_for_global_resource", "(", ")", "with", "ga", ".", "lock", "(", ")", ":", "ga", ".", "checkout_master", "(", ")", "if", "os", ".", "path", ".", "exists", "(", "fn", ")", ":", "if", "is_json", ":", "return", "read_as_json", "(", "fn", ")", "with", "codecs", ".", "open", "(", "fn", ",", "'rU'", ",", "encoding", "=", "'utf-8'", ")", "as", "f", ":", "ret", "=", "f", ".", "read", "(", ")", "return", "ret", "return", "None" ]
This will force the current branch to master!
[ "This", "will", "force", "the", "current", "branch", "to", "master!" ]
python
train
43.615385
druids/django-chamber
chamber/models/__init__.py
https://github.com/druids/django-chamber/blob/eef4169923557e96877a664fa254e8c0814f3f23/chamber/models/__init__.py#L258-L264
def change_and_save(self, update_only_changed_fields=False, **changed_fields): """ Changes a given `changed_fields` on each object in the queryset, saves objects and returns the changed objects in the queryset. """ bulk_change_and_save(self, update_only_changed_fields=update_only_changed_fields, **changed_fields) return self.filter()
[ "def", "change_and_save", "(", "self", ",", "update_only_changed_fields", "=", "False", ",", "*", "*", "changed_fields", ")", ":", "bulk_change_and_save", "(", "self", ",", "update_only_changed_fields", "=", "update_only_changed_fields", ",", "*", "*", "changed_fields", ")", "return", "self", ".", "filter", "(", ")" ]
Changes a given `changed_fields` on each object in the queryset, saves objects and returns the changed objects in the queryset.
[ "Changes", "a", "given", "changed_fields", "on", "each", "object", "in", "the", "queryset", "saves", "objects", "and", "returns", "the", "changed", "objects", "in", "the", "queryset", "." ]
python
train
53.857143
tethysplatform/condorpy
condorpy/job.py
https://github.com/tethysplatform/condorpy/blob/a5aaaef0d73198f7d9756dda7abe98b4e209f1f4/condorpy/job.py#L183-L191
def log_file(self): """The path to the log file for this job. """ log_file = self.get('log') if not log_file: log_file = '%s.log' % (self.name) self.set('log', log_file) return os.path.join(self.initial_dir, self.get('log'))
[ "def", "log_file", "(", "self", ")", ":", "log_file", "=", "self", ".", "get", "(", "'log'", ")", "if", "not", "log_file", ":", "log_file", "=", "'%s.log'", "%", "(", "self", ".", "name", ")", "self", ".", "set", "(", "'log'", ",", "log_file", ")", "return", "os", ".", "path", ".", "join", "(", "self", ".", "initial_dir", ",", "self", ".", "get", "(", "'log'", ")", ")" ]
The path to the log file for this job.
[ "The", "path", "to", "the", "log", "file", "for", "this", "job", "." ]
python
train
31.222222
ewels/MultiQC
multiqc/modules/fastqc/fastqc.py
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/fastqc/fastqc.py#L939-L951
def avg_bp_from_range(self, bp): """ Helper function - FastQC often gives base pair ranges (eg. 10-15) which are not helpful when plotting. This returns the average from such ranges as an int, which is helpful. If not a range, just returns the int """ try: if '-' in bp: maxlen = float(bp.split("-",1)[1]) minlen = float(bp.split("-",1)[0]) bp = ((maxlen - minlen)/2) + minlen except TypeError: pass return(int(bp))
[ "def", "avg_bp_from_range", "(", "self", ",", "bp", ")", ":", "try", ":", "if", "'-'", "in", "bp", ":", "maxlen", "=", "float", "(", "bp", ".", "split", "(", "\"-\"", ",", "1", ")", "[", "1", "]", ")", "minlen", "=", "float", "(", "bp", ".", "split", "(", "\"-\"", ",", "1", ")", "[", "0", "]", ")", "bp", "=", "(", "(", "maxlen", "-", "minlen", ")", "/", "2", ")", "+", "minlen", "except", "TypeError", ":", "pass", "return", "(", "int", "(", "bp", ")", ")" ]
Helper function - FastQC often gives base pair ranges (eg. 10-15) which are not helpful when plotting. This returns the average from such ranges as an int, which is helpful. If not a range, just returns the int
[ "Helper", "function", "-", "FastQC", "often", "gives", "base", "pair", "ranges", "(", "eg", ".", "10", "-", "15", ")", "which", "are", "not", "helpful", "when", "plotting", ".", "This", "returns", "the", "average", "from", "such", "ranges", "as", "an", "int", "which", "is", "helpful", ".", "If", "not", "a", "range", "just", "returns", "the", "int" ]
python
train
40.307692
aio-libs/aioftp
ftpbench.py
https://github.com/aio-libs/aioftp/blob/b45395b1aba41301b898040acade7010e6878a08/ftpbench.py#L159-L174
def human2bytes(s): """ >>> human2bytes('1M') 1048576 >>> human2bytes('1G') 1073741824 """ symbols = ('B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') letter = s[-1:].strip().upper() num = s[:-1] assert num.isdigit() and letter in symbols, s num = float(num) prefix = {symbols[0]: 1} for i, s in enumerate(symbols[1:]): prefix[s] = 1 << (i + 1) * 10 return int(num * prefix[letter])
[ "def", "human2bytes", "(", "s", ")", ":", "symbols", "=", "(", "'B'", ",", "'K'", ",", "'M'", ",", "'G'", ",", "'T'", ",", "'P'", ",", "'E'", ",", "'Z'", ",", "'Y'", ")", "letter", "=", "s", "[", "-", "1", ":", "]", ".", "strip", "(", ")", ".", "upper", "(", ")", "num", "=", "s", "[", ":", "-", "1", "]", "assert", "num", ".", "isdigit", "(", ")", "and", "letter", "in", "symbols", ",", "s", "num", "=", "float", "(", "num", ")", "prefix", "=", "{", "symbols", "[", "0", "]", ":", "1", "}", "for", "i", ",", "s", "in", "enumerate", "(", "symbols", "[", "1", ":", "]", ")", ":", "prefix", "[", "s", "]", "=", "1", "<<", "(", "i", "+", "1", ")", "*", "10", "return", "int", "(", "num", "*", "prefix", "[", "letter", "]", ")" ]
>>> human2bytes('1M') 1048576 >>> human2bytes('1G') 1073741824
[ ">>>", "human2bytes", "(", "1M", ")", "1048576", ">>>", "human2bytes", "(", "1G", ")", "1073741824" ]
python
valid
26.6875
vilmibm/done
parsedatetime/parsedatetime.py
https://github.com/vilmibm/done/blob/7e5b60d2900ceddefa49de352a19b794199b51a8/parsedatetime/parsedatetime.py#L389-L440
def parseDateText(self, dateString): """ Parse long-form date strings:: 'May 31st, 2006' 'Jan 1st' 'July 2006' @type dateString: string @param dateString: text to convert to a datetime @rtype: struct_time @return: calculated C{struct_time} value of dateString """ yr, mth, dy, hr, mn, sec, wd, yd, isdst = time.localtime() currentMth = mth currentDy = dy s = dateString.lower() m = self.ptc.CRE_DATE3.search(s) mth = m.group('mthname') mth = self.ptc.MonthOffsets[mth] if m.group('day') != None: dy = int(m.group('day')) else: dy = 1 if m.group('year') != None: yr = int(m.group('year')) # birthday epoch constraint if yr < self.ptc.BirthdayEpoch: yr += 2000 elif yr < 100: yr += 1900 elif (mth < currentMth) or (mth == currentMth and dy < currentDy): # if that day and month have already passed in this year, # then increment the year by 1 yr += 1 if dy > 0 and dy <= self.ptc.daysInMonth(mth, yr): sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst) else: # Return current time if date string is invalid self.dateFlag = 0 self.timeFlag = 0 sourceTime = time.localtime() return sourceTime
[ "def", "parseDateText", "(", "self", ",", "dateString", ")", ":", "yr", ",", "mth", ",", "dy", ",", "hr", ",", "mn", ",", "sec", ",", "wd", ",", "yd", ",", "isdst", "=", "time", ".", "localtime", "(", ")", "currentMth", "=", "mth", "currentDy", "=", "dy", "s", "=", "dateString", ".", "lower", "(", ")", "m", "=", "self", ".", "ptc", ".", "CRE_DATE3", ".", "search", "(", "s", ")", "mth", "=", "m", ".", "group", "(", "'mthname'", ")", "mth", "=", "self", ".", "ptc", ".", "MonthOffsets", "[", "mth", "]", "if", "m", ".", "group", "(", "'day'", ")", "!=", "None", ":", "dy", "=", "int", "(", "m", ".", "group", "(", "'day'", ")", ")", "else", ":", "dy", "=", "1", "if", "m", ".", "group", "(", "'year'", ")", "!=", "None", ":", "yr", "=", "int", "(", "m", ".", "group", "(", "'year'", ")", ")", "# birthday epoch constraint\r", "if", "yr", "<", "self", ".", "ptc", ".", "BirthdayEpoch", ":", "yr", "+=", "2000", "elif", "yr", "<", "100", ":", "yr", "+=", "1900", "elif", "(", "mth", "<", "currentMth", ")", "or", "(", "mth", "==", "currentMth", "and", "dy", "<", "currentDy", ")", ":", "# if that day and month have already passed in this year,\r", "# then increment the year by 1\r", "yr", "+=", "1", "if", "dy", ">", "0", "and", "dy", "<=", "self", ".", "ptc", ".", "daysInMonth", "(", "mth", ",", "yr", ")", ":", "sourceTime", "=", "(", "yr", ",", "mth", ",", "dy", ",", "hr", ",", "mn", ",", "sec", ",", "wd", ",", "yd", ",", "isdst", ")", "else", ":", "# Return current time if date string is invalid\r", "self", ".", "dateFlag", "=", "0", "self", ".", "timeFlag", "=", "0", "sourceTime", "=", "time", ".", "localtime", "(", ")", "return", "sourceTime" ]
Parse long-form date strings:: 'May 31st, 2006' 'Jan 1st' 'July 2006' @type dateString: string @param dateString: text to convert to a datetime @rtype: struct_time @return: calculated C{struct_time} value of dateString
[ "Parse", "long", "-", "form", "date", "strings", "::", "May", "31st", "2006", "Jan", "1st", "July", "2006" ]
python
train
29.211538
peergradeio/flask-mongo-profiler
flask_mongo_profiler/contrib/flask_admin/formatters/polymorphic_relations.py
https://github.com/peergradeio/flask-mongo-profiler/blob/a267eeb49fea07c9a24fb370bd9d7a90ed313ccf/flask_mongo_profiler/contrib/flask_admin/formatters/polymorphic_relations.py#L27-L65
def generic_ref_formatter(view, context, model, name, lazy=False): """ For GenericReferenceField and LazyGenericReferenceField See Also -------- diff_formatter """ try: if lazy: rel_model = getattr(model, name).fetch() else: rel_model = getattr(model, name) except (mongoengine.DoesNotExist, AttributeError) as e: # custom_field_type_formatters seems to fix the issue of stale references # crashing pages, since it intercepts the display of all ReferenceField's. return Markup( '<span class="label label-danger">Error</span> <small>%s</small>' % e ) if rel_model is None: return '' try: return Markup( '<a href="%s">%s</a>' % ( url_for( # Flask-Admin creates URL's namespaced w/ model class name, lowercase. '%s.details_view' % rel_model.__class__.__name__.lower(), id=rel_model.id, ), rel_model, ) ) except werkzeug.routing.BuildError as e: return Markup( '<span class="label label-danger">Error</span> <small>%s</small>' % e )
[ "def", "generic_ref_formatter", "(", "view", ",", "context", ",", "model", ",", "name", ",", "lazy", "=", "False", ")", ":", "try", ":", "if", "lazy", ":", "rel_model", "=", "getattr", "(", "model", ",", "name", ")", ".", "fetch", "(", ")", "else", ":", "rel_model", "=", "getattr", "(", "model", ",", "name", ")", "except", "(", "mongoengine", ".", "DoesNotExist", ",", "AttributeError", ")", "as", "e", ":", "# custom_field_type_formatters seems to fix the issue of stale references", "# crashing pages, since it intercepts the display of all ReferenceField's.", "return", "Markup", "(", "'<span class=\"label label-danger\">Error</span> <small>%s</small>'", "%", "e", ")", "if", "rel_model", "is", "None", ":", "return", "''", "try", ":", "return", "Markup", "(", "'<a href=\"%s\">%s</a>'", "%", "(", "url_for", "(", "# Flask-Admin creates URL's namespaced w/ model class name, lowercase.", "'%s.details_view'", "%", "rel_model", ".", "__class__", ".", "__name__", ".", "lower", "(", ")", ",", "id", "=", "rel_model", ".", "id", ",", ")", ",", "rel_model", ",", ")", ")", "except", "werkzeug", ".", "routing", ".", "BuildError", "as", "e", ":", "return", "Markup", "(", "'<span class=\"label label-danger\">Error</span> <small>%s</small>'", "%", "e", ")" ]
For GenericReferenceField and LazyGenericReferenceField See Also -------- diff_formatter
[ "For", "GenericReferenceField", "and", "LazyGenericReferenceField" ]
python
train
31.205128
psd-tools/psd-tools
src/psd_tools/api/layers.py
https://github.com/psd-tools/psd-tools/blob/4952b57bcf1cf2c1f16fd9d6d51d4fa0b53bce4e/src/psd_tools/api/layers.py#L342-L351
def compose(self, *args, **kwargs): """ Compose layer and masks (mask, vector mask, and clipping layers). :return: :py:class:`PIL.Image`, or `None` if the layer has no pixel. """ from psd_tools.api.composer import compose_layer if self.bbox == (0, 0, 0, 0): return None return compose_layer(self, *args, **kwargs)
[ "def", "compose", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "from", "psd_tools", ".", "api", ".", "composer", "import", "compose_layer", "if", "self", ".", "bbox", "==", "(", "0", ",", "0", ",", "0", ",", "0", ")", ":", "return", "None", "return", "compose_layer", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Compose layer and masks (mask, vector mask, and clipping layers). :return: :py:class:`PIL.Image`, or `None` if the layer has no pixel.
[ "Compose", "layer", "and", "masks", "(", "mask", "vector", "mask", "and", "clipping", "layers", ")", "." ]
python
train
37.3
jmcarp/flask-apispec
flask_apispec/annotations.py
https://github.com/jmcarp/flask-apispec/blob/d8cb658fa427f051568e58d6af201b8e9924c325/flask_apispec/annotations.py#L74-L91
def doc(inherit=None, **kwargs): """Annotate the decorated view function or class with the specified Swagger attributes. Usage: .. code-block:: python @doc(tags=['pet'], description='a pet store') def get_pet(pet_id): return Pet.query.filter(Pet.id == pet_id).one() :param inherit: Inherit Swagger documentation from parent classes """ def wrapper(func): annotate(func, 'docs', [kwargs], inherit=inherit) return activate(func) return wrapper
[ "def", "doc", "(", "inherit", "=", "None", ",", "*", "*", "kwargs", ")", ":", "def", "wrapper", "(", "func", ")", ":", "annotate", "(", "func", ",", "'docs'", ",", "[", "kwargs", "]", ",", "inherit", "=", "inherit", ")", "return", "activate", "(", "func", ")", "return", "wrapper" ]
Annotate the decorated view function or class with the specified Swagger attributes. Usage: .. code-block:: python @doc(tags=['pet'], description='a pet store') def get_pet(pet_id): return Pet.query.filter(Pet.id == pet_id).one() :param inherit: Inherit Swagger documentation from parent classes
[ "Annotate", "the", "decorated", "view", "function", "or", "class", "with", "the", "specified", "Swagger", "attributes", "." ]
python
train
28
O365/python-o365
release.py
https://github.com/O365/python-o365/blob/02a71cf3775cc6a3c042e003365d6a07c8c75a73/release.py#L50-L69
def upload(ctx, release, rebuild): """ Uploads distribuition files to pypi or pypitest. """ dist_path = Path(DIST_PATH) if rebuild is False: if not dist_path.exists() or not list(dist_path.glob('*')): print("No distribution files found. Please run 'build' command first") return else: ctx.invoke(build, force=True) if release: args = ['twine', 'upload', 'dist/*'] else: repository = 'https://test.pypi.org/legacy/' args = ['twine', 'upload', '--repository-url', repository, 'dist/*'] env = os.environ.copy() p = subprocess.Popen(args, env=env) p.wait()
[ "def", "upload", "(", "ctx", ",", "release", ",", "rebuild", ")", ":", "dist_path", "=", "Path", "(", "DIST_PATH", ")", "if", "rebuild", "is", "False", ":", "if", "not", "dist_path", ".", "exists", "(", ")", "or", "not", "list", "(", "dist_path", ".", "glob", "(", "'*'", ")", ")", ":", "print", "(", "\"No distribution files found. Please run 'build' command first\"", ")", "return", "else", ":", "ctx", ".", "invoke", "(", "build", ",", "force", "=", "True", ")", "if", "release", ":", "args", "=", "[", "'twine'", ",", "'upload'", ",", "'dist/*'", "]", "else", ":", "repository", "=", "'https://test.pypi.org/legacy/'", "args", "=", "[", "'twine'", ",", "'upload'", ",", "'--repository-url'", ",", "repository", ",", "'dist/*'", "]", "env", "=", "os", ".", "environ", ".", "copy", "(", ")", "p", "=", "subprocess", ".", "Popen", "(", "args", ",", "env", "=", "env", ")", "p", ".", "wait", "(", ")" ]
Uploads distribuition files to pypi or pypitest.
[ "Uploads", "distribuition", "files", "to", "pypi", "or", "pypitest", "." ]
python
train
31.8
Chilipp/psy-simple
psy_simple/base.py
https://github.com/Chilipp/psy-simple/blob/7d916406a6d3c3c27c0b7102f98fef07a4da0a61/psy_simple/base.py#L133-L167
def get_fig_data_attrs(self, delimiter=None): """Join the data attributes with other plotters in the project This method joins the attributes of the :class:`~psyplot.InteractiveBase` instances in the project that draw on the same figure as this instance does. Parameters ---------- delimiter: str Specifies the delimiter with what the attributes are joined. If None, the :attr:`delimiter` attribute of this instance or (if the latter is also None), the rcParams['texts.delimiter'] item is used. Returns ------- dict A dictionary with all the meta attributes joined by the specified `delimiter`""" if self.project is not None: delimiter = next(filter(lambda d: d is not None, [ delimiter, self.delimiter, self.rc['delimiter']])) figs = self.project.figs fig = self.ax.get_figure() if self.plotter._initialized and fig in figs: ret = figs[fig].joined_attrs(delimiter=delimiter, plot_data=True) else: ret = self.get_enhanced_attrs(self.plotter.plot_data) self.logger.debug( 'Can not get the figure attributes because plot has not ' 'yet been initialized!') return ret else: return self.get_enhanced_attrs(self.plotter.plot_data)
[ "def", "get_fig_data_attrs", "(", "self", ",", "delimiter", "=", "None", ")", ":", "if", "self", ".", "project", "is", "not", "None", ":", "delimiter", "=", "next", "(", "filter", "(", "lambda", "d", ":", "d", "is", "not", "None", ",", "[", "delimiter", ",", "self", ".", "delimiter", ",", "self", ".", "rc", "[", "'delimiter'", "]", "]", ")", ")", "figs", "=", "self", ".", "project", ".", "figs", "fig", "=", "self", ".", "ax", ".", "get_figure", "(", ")", "if", "self", ".", "plotter", ".", "_initialized", "and", "fig", "in", "figs", ":", "ret", "=", "figs", "[", "fig", "]", ".", "joined_attrs", "(", "delimiter", "=", "delimiter", ",", "plot_data", "=", "True", ")", "else", ":", "ret", "=", "self", ".", "get_enhanced_attrs", "(", "self", ".", "plotter", ".", "plot_data", ")", "self", ".", "logger", ".", "debug", "(", "'Can not get the figure attributes because plot has not '", "'yet been initialized!'", ")", "return", "ret", "else", ":", "return", "self", ".", "get_enhanced_attrs", "(", "self", ".", "plotter", ".", "plot_data", ")" ]
Join the data attributes with other plotters in the project This method joins the attributes of the :class:`~psyplot.InteractiveBase` instances in the project that draw on the same figure as this instance does. Parameters ---------- delimiter: str Specifies the delimiter with what the attributes are joined. If None, the :attr:`delimiter` attribute of this instance or (if the latter is also None), the rcParams['texts.delimiter'] item is used. Returns ------- dict A dictionary with all the meta attributes joined by the specified `delimiter`
[ "Join", "the", "data", "attributes", "with", "other", "plotters", "in", "the", "project" ]
python
train
42.371429
Robpol86/docoptcfg
docoptcfg.py
https://github.com/Robpol86/docoptcfg/blob/3746dc263549f7f3ef5a86e739d588546b084bde/docoptcfg.py#L234-L279
def docoptcfg(doc, argv=None, env_prefix=None, config_option=None, ignore=None, *args, **kwargs): """Pass most args/kwargs to docopt. Handle `env_prefix` and `config_option`. :raise DocoptcfgError: If `config_option` isn't found in docstring. :raise DocoptcfgFileError: On any error while trying to read and parse config file (if enabled). :param str doc: Docstring passed to docopt. :param iter argv: sys.argv[1:] passed to docopt. :param str env_prefix: Enable environment variable support, prefix of said variables. :param str config_option: Enable config file support, docopt option defining path to config file. :param iter ignore: Options to ignore. Default is --help and --version. :param iter args: Additional positional arguments passed to docopt. :param dict kwargs: Additional keyword arguments passed to docopt. :return: Dictionary constructed by docopt and updated by docoptcfg. :rtype: dict """ docopt_dict = docopt.docopt(doc, argv, *args, **kwargs) if env_prefix is None and config_option is None: return docopt_dict # Nothing to do. if argv is None: argv = sys.argv[1:] if ignore is None: ignore = ('--help', '--version') settable, booleans, repeatable, short_map = settable_options(doc, argv, ignore, kwargs.get('options_first', False)) if not settable: return docopt_dict # Nothing to do. # Handle environment variables defaults. if env_prefix is not None: defaults = values_from_env(env_prefix, settable, booleans, repeatable) settable -= set(defaults.keys()) # No longer settable by values_from_file(). docopt_dict.update(defaults) # Handle config file defaults. if config_option is not None: defaults = values_from_file( docopt_dict, short_map.get(config_option, config_option), settable, booleans, repeatable, ) docopt_dict.update(defaults) return docopt_dict
[ "def", "docoptcfg", "(", "doc", ",", "argv", "=", "None", ",", "env_prefix", "=", "None", ",", "config_option", "=", "None", ",", "ignore", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "docopt_dict", "=", "docopt", ".", "docopt", "(", "doc", ",", "argv", ",", "*", "args", ",", "*", "*", "kwargs", ")", "if", "env_prefix", "is", "None", "and", "config_option", "is", "None", ":", "return", "docopt_dict", "# Nothing to do.", "if", "argv", "is", "None", ":", "argv", "=", "sys", ".", "argv", "[", "1", ":", "]", "if", "ignore", "is", "None", ":", "ignore", "=", "(", "'--help'", ",", "'--version'", ")", "settable", ",", "booleans", ",", "repeatable", ",", "short_map", "=", "settable_options", "(", "doc", ",", "argv", ",", "ignore", ",", "kwargs", ".", "get", "(", "'options_first'", ",", "False", ")", ")", "if", "not", "settable", ":", "return", "docopt_dict", "# Nothing to do.", "# Handle environment variables defaults.", "if", "env_prefix", "is", "not", "None", ":", "defaults", "=", "values_from_env", "(", "env_prefix", ",", "settable", ",", "booleans", ",", "repeatable", ")", "settable", "-=", "set", "(", "defaults", ".", "keys", "(", ")", ")", "# No longer settable by values_from_file().", "docopt_dict", ".", "update", "(", "defaults", ")", "# Handle config file defaults.", "if", "config_option", "is", "not", "None", ":", "defaults", "=", "values_from_file", "(", "docopt_dict", ",", "short_map", ".", "get", "(", "config_option", ",", "config_option", ")", ",", "settable", ",", "booleans", ",", "repeatable", ",", ")", "docopt_dict", ".", "update", "(", "defaults", ")", "return", "docopt_dict" ]
Pass most args/kwargs to docopt. Handle `env_prefix` and `config_option`. :raise DocoptcfgError: If `config_option` isn't found in docstring. :raise DocoptcfgFileError: On any error while trying to read and parse config file (if enabled). :param str doc: Docstring passed to docopt. :param iter argv: sys.argv[1:] passed to docopt. :param str env_prefix: Enable environment variable support, prefix of said variables. :param str config_option: Enable config file support, docopt option defining path to config file. :param iter ignore: Options to ignore. Default is --help and --version. :param iter args: Additional positional arguments passed to docopt. :param dict kwargs: Additional keyword arguments passed to docopt. :return: Dictionary constructed by docopt and updated by docoptcfg. :rtype: dict
[ "Pass", "most", "args", "/", "kwargs", "to", "docopt", ".", "Handle", "env_prefix", "and", "config_option", "." ]
python
train
43.086957
sdispater/orator
orator/orm/builder.py
https://github.com/sdispater/orator/blob/bd90bf198ee897751848f9a92e49d18e60a74136/orator/orm/builder.py#L63-L85
def without_global_scope(self, scope): """ Remove a registered global scope. :param scope: The scope to remove :type scope: Scope or str :rtype: Builder """ if isinstance(scope, basestring): del self._scopes[scope] return self keys = [] for key, value in self._scopes.items(): if scope == value.__class__ or isinstance(scope, value.__class__): keys.append(key) for key in keys: del self._scopes[key] return self
[ "def", "without_global_scope", "(", "self", ",", "scope", ")", ":", "if", "isinstance", "(", "scope", ",", "basestring", ")", ":", "del", "self", ".", "_scopes", "[", "scope", "]", "return", "self", "keys", "=", "[", "]", "for", "key", ",", "value", "in", "self", ".", "_scopes", ".", "items", "(", ")", ":", "if", "scope", "==", "value", ".", "__class__", "or", "isinstance", "(", "scope", ",", "value", ".", "__class__", ")", ":", "keys", ".", "append", "(", "key", ")", "for", "key", "in", "keys", ":", "del", "self", ".", "_scopes", "[", "key", "]", "return", "self" ]
Remove a registered global scope. :param scope: The scope to remove :type scope: Scope or str :rtype: Builder
[ "Remove", "a", "registered", "global", "scope", "." ]
python
train
23.782609
rbarrois/aionotify
aionotify/base.py
https://github.com/rbarrois/aionotify/blob/6cfa35b26a2660f77f29a92d3efb7d1dde685b43/aionotify/base.py#L84-L93
def setup(self, loop): """Start the watcher, registering new watches if any.""" self._loop = loop self._fd = LibC.inotify_init() for alias, (path, flags) in self.requests.items(): self._setup_watch(alias, path, flags) # We pass ownership of the fd to the transport; it will close it. self._stream, self._transport = yield from aioutils.stream_from_fd(self._fd, loop)
[ "def", "setup", "(", "self", ",", "loop", ")", ":", "self", ".", "_loop", "=", "loop", "self", ".", "_fd", "=", "LibC", ".", "inotify_init", "(", ")", "for", "alias", ",", "(", "path", ",", "flags", ")", "in", "self", ".", "requests", ".", "items", "(", ")", ":", "self", ".", "_setup_watch", "(", "alias", ",", "path", ",", "flags", ")", "# We pass ownership of the fd to the transport; it will close it.", "self", ".", "_stream", ",", "self", ".", "_transport", "=", "yield", "from", "aioutils", ".", "stream_from_fd", "(", "self", ".", "_fd", ",", "loop", ")" ]
Start the watcher, registering new watches if any.
[ "Start", "the", "watcher", "registering", "new", "watches", "if", "any", "." ]
python
test
41.9
automl/HpBandSter
hpbandster/optimizers/kde/mvkde.py
https://github.com/automl/HpBandSter/blob/841db4b827f342e5eb7f725723ea6461ac52d45a/hpbandster/optimizers/kde/mvkde.py#L210-L237
def _get_types(self): """ extracts the needed types from the configspace for faster retrival later type = 0 - numerical (continuous or integer) parameter type >=1 - categorical parameter TODO: figure out a way to properly handle ordinal parameters """ types = [] num_values = [] for hp in self.configspace.get_hyperparameters(): #print(hp) if isinstance(hp, CS.CategoricalHyperparameter): types.append('U') num_values.append(len(hp.choices)) elif isinstance(hp, CS.UniformIntegerHyperparameter): types.append('I') num_values.append((hp.upper - hp.lower + 1)) elif isinstance(hp, CS.UniformFloatHyperparameter): types.append('C') num_values.append(np.inf) elif isinstance(hp, CS.OrdinalHyperparameter): types.append('O') num_values.append(len(hp.sequence)) else: raise ValueError('Unsupported Parametertype %s'%type(hp)) return(types, num_values)
[ "def", "_get_types", "(", "self", ")", ":", "types", "=", "[", "]", "num_values", "=", "[", "]", "for", "hp", "in", "self", ".", "configspace", ".", "get_hyperparameters", "(", ")", ":", "#print(hp)", "if", "isinstance", "(", "hp", ",", "CS", ".", "CategoricalHyperparameter", ")", ":", "types", ".", "append", "(", "'U'", ")", "num_values", ".", "append", "(", "len", "(", "hp", ".", "choices", ")", ")", "elif", "isinstance", "(", "hp", ",", "CS", ".", "UniformIntegerHyperparameter", ")", ":", "types", ".", "append", "(", "'I'", ")", "num_values", ".", "append", "(", "(", "hp", ".", "upper", "-", "hp", ".", "lower", "+", "1", ")", ")", "elif", "isinstance", "(", "hp", ",", "CS", ".", "UniformFloatHyperparameter", ")", ":", "types", ".", "append", "(", "'C'", ")", "num_values", ".", "append", "(", "np", ".", "inf", ")", "elif", "isinstance", "(", "hp", ",", "CS", ".", "OrdinalHyperparameter", ")", ":", "types", ".", "append", "(", "'O'", ")", "num_values", ".", "append", "(", "len", "(", "hp", ".", "sequence", ")", ")", "else", ":", "raise", "ValueError", "(", "'Unsupported Parametertype %s'", "%", "type", "(", "hp", ")", ")", "return", "(", "types", ",", "num_values", ")" ]
extracts the needed types from the configspace for faster retrival later type = 0 - numerical (continuous or integer) parameter type >=1 - categorical parameter TODO: figure out a way to properly handle ordinal parameters
[ "extracts", "the", "needed", "types", "from", "the", "configspace", "for", "faster", "retrival", "later", "type", "=", "0", "-", "numerical", "(", "continuous", "or", "integer", ")", "parameter", "type", ">", "=", "1", "-", "categorical", "parameter", "TODO", ":", "figure", "out", "a", "way", "to", "properly", "handle", "ordinal", "parameters" ]
python
train
32.25
PetrochukM/PyTorch-NLP
torchnlp/utils.py
https://github.com/PetrochukM/PyTorch-NLP/blob/5f7320da5c8d781df072fab3f7e421c6347e5bfa/torchnlp/utils.py#L241-L281
def tensors_to(tensors, *args, **kwargs): """ Apply ``torch.Tensor.to`` to tensors in a generic data structure. Inspired by: https://github.com/pytorch/pytorch/blob/master/torch/utils/data/_utils/collate.py#L31 Args: tensors (tensor, dict, list, namedtuple or tuple): Data structure with tensor values to move. *args: Arguments passed to ``torch.Tensor.to``. **kwargs: Keyword arguments passed to ``torch.Tensor.to``. Example use case: This is useful as a complementary function to ``collate_tensors``. Following collating, it's important to move your tensors to the appropriate device. Returns: The inputted ``tensors`` with ``torch.Tensor.to`` applied. Example: >>> import torch >>> batch = [ ... { 'column_a': torch.randn(5), 'column_b': torch.randn(5) }, ... { 'column_a': torch.randn(5), 'column_b': torch.randn(5) }, ... ] >>> tensors_to(batch, torch.device('cpu')) # doctest: +ELLIPSIS [{'column_a': tensor(...}] """ if torch.is_tensor(tensors): return tensors.to(*args, **kwargs) elif isinstance(tensors, dict): return {k: tensors_to(v, *args, **kwargs) for k, v in tensors.items()} elif hasattr(tensors, '_asdict') and isinstance(tensors, tuple): # Handle ``namedtuple`` return tensors.__class__(**tensors_to(tensors._asdict(), *args, **kwargs)) elif isinstance(tensors, list): return [tensors_to(t, *args, **kwargs) for t in tensors] elif isinstance(tensors, tuple): return tuple([tensors_to(t, *args, **kwargs) for t in tensors]) else: return tensors
[ "def", "tensors_to", "(", "tensors", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "torch", ".", "is_tensor", "(", "tensors", ")", ":", "return", "tensors", ".", "to", "(", "*", "args", ",", "*", "*", "kwargs", ")", "elif", "isinstance", "(", "tensors", ",", "dict", ")", ":", "return", "{", "k", ":", "tensors_to", "(", "v", ",", "*", "args", ",", "*", "*", "kwargs", ")", "for", "k", ",", "v", "in", "tensors", ".", "items", "(", ")", "}", "elif", "hasattr", "(", "tensors", ",", "'_asdict'", ")", "and", "isinstance", "(", "tensors", ",", "tuple", ")", ":", "# Handle ``namedtuple``", "return", "tensors", ".", "__class__", "(", "*", "*", "tensors_to", "(", "tensors", ".", "_asdict", "(", ")", ",", "*", "args", ",", "*", "*", "kwargs", ")", ")", "elif", "isinstance", "(", "tensors", ",", "list", ")", ":", "return", "[", "tensors_to", "(", "t", ",", "*", "args", ",", "*", "*", "kwargs", ")", "for", "t", "in", "tensors", "]", "elif", "isinstance", "(", "tensors", ",", "tuple", ")", ":", "return", "tuple", "(", "[", "tensors_to", "(", "t", ",", "*", "args", ",", "*", "*", "kwargs", ")", "for", "t", "in", "tensors", "]", ")", "else", ":", "return", "tensors" ]
Apply ``torch.Tensor.to`` to tensors in a generic data structure. Inspired by: https://github.com/pytorch/pytorch/blob/master/torch/utils/data/_utils/collate.py#L31 Args: tensors (tensor, dict, list, namedtuple or tuple): Data structure with tensor values to move. *args: Arguments passed to ``torch.Tensor.to``. **kwargs: Keyword arguments passed to ``torch.Tensor.to``. Example use case: This is useful as a complementary function to ``collate_tensors``. Following collating, it's important to move your tensors to the appropriate device. Returns: The inputted ``tensors`` with ``torch.Tensor.to`` applied. Example: >>> import torch >>> batch = [ ... { 'column_a': torch.randn(5), 'column_b': torch.randn(5) }, ... { 'column_a': torch.randn(5), 'column_b': torch.randn(5) }, ... ] >>> tensors_to(batch, torch.device('cpu')) # doctest: +ELLIPSIS [{'column_a': tensor(...}]
[ "Apply", "torch", ".", "Tensor", ".", "to", "to", "tensors", "in", "a", "generic", "data", "structure", "." ]
python
train
40.317073
joelfrederico/SciSalt
scisalt/facettools/logbookForm.py
https://github.com/joelfrederico/SciSalt/blob/7bf57c49c7dde0a8b0aa337fbd2fbd527ce7a67f/scisalt/facettools/logbookForm.py#L359-L390
def submitEntry(self): """Process user inputs and subit logbook entry when user clicks Submit button""" # logType = self.logui.logType.currentText() mcclogs, physlogs = self.selectedLogs() success = True if mcclogs != []: if not self.acceptedUser("MCC"): QMessageBox().warning(self, "Invalid User", "Please enter a valid user name!") return fileName = self.xmlSetup("MCC", mcclogs) if fileName is None: return if not self.imagePixmap.isNull(): self.prepareImages(fileName, "MCC") success = self.sendToLogbook(fileName, "MCC") if physlogs != []: for i in range(len(physlogs)): fileName = self.xmlSetup("Physics", physlogs[i]) if fileName is None: return if not self.imagePixmap.isNull(): self.prepareImages(fileName, "Physics") success_phys = self.sendToLogbook(fileName, "Physics", physlogs[i]) success = success and success_phys self.done(success)
[ "def", "submitEntry", "(", "self", ")", ":", "# logType = self.logui.logType.currentText()", "mcclogs", ",", "physlogs", "=", "self", ".", "selectedLogs", "(", ")", "success", "=", "True", "if", "mcclogs", "!=", "[", "]", ":", "if", "not", "self", ".", "acceptedUser", "(", "\"MCC\"", ")", ":", "QMessageBox", "(", ")", ".", "warning", "(", "self", ",", "\"Invalid User\"", ",", "\"Please enter a valid user name!\"", ")", "return", "fileName", "=", "self", ".", "xmlSetup", "(", "\"MCC\"", ",", "mcclogs", ")", "if", "fileName", "is", "None", ":", "return", "if", "not", "self", ".", "imagePixmap", ".", "isNull", "(", ")", ":", "self", ".", "prepareImages", "(", "fileName", ",", "\"MCC\"", ")", "success", "=", "self", ".", "sendToLogbook", "(", "fileName", ",", "\"MCC\"", ")", "if", "physlogs", "!=", "[", "]", ":", "for", "i", "in", "range", "(", "len", "(", "physlogs", ")", ")", ":", "fileName", "=", "self", ".", "xmlSetup", "(", "\"Physics\"", ",", "physlogs", "[", "i", "]", ")", "if", "fileName", "is", "None", ":", "return", "if", "not", "self", ".", "imagePixmap", ".", "isNull", "(", ")", ":", "self", ".", "prepareImages", "(", "fileName", ",", "\"Physics\"", ")", "success_phys", "=", "self", ".", "sendToLogbook", "(", "fileName", ",", "\"Physics\"", ",", "physlogs", "[", "i", "]", ")", "success", "=", "success", "and", "success_phys", "self", ".", "done", "(", "success", ")" ]
Process user inputs and subit logbook entry when user clicks Submit button
[ "Process", "user", "inputs", "and", "subit", "logbook", "entry", "when", "user", "clicks", "Submit", "button" ]
python
valid
37.75
woolfson-group/isambard
isambard/ampal/protein.py
https://github.com/woolfson-group/isambard/blob/ebc33b48a28ad217e18f93b910dfba46e6e71e07/isambard/ampal/protein.py#L1069-L1141
def n_cap(self, n_cap='acetyl', cap_dihedral=None): """Adds an N-terminal acetamide cap. Notes ----- Default behaviour is to duplicate the dihedral angle of the succeeding residues such that the orientation of the carbonyl of the acetyl will resemble that of the first residue. This can be adjusted by supplying a cap_dihedral value. Currently only acetyl cap is supported, but this structure should work for other caps. Parameters ---------- cap : str, optional Type of cap to be added. Options: 'acetyl' cap_dihedral : bool Alternate psi angle to be used when added cap. """ if n_cap == 'acetyl': methylacetamide = Ligand( atoms=None, mol_code='UNK', is_hetero=True) atoms = OrderedDict() atoms['C'] = Atom([0.9500, -0.2290, 0.5090], 'C', res_label='C') atoms['CA'] = Atom([0.7450, -0.9430, 1.8040], 'C', res_label='CA') atoms['O'] = Atom([0.1660, -2.0230, 1.8130], 'O', res_label='O') atoms['N'] = Atom([1.2540, -0.2750, 2.9010], 'N', res_label='N') atoms['CME'] = Atom([1.1630, -0.7870, 4.2500], 'C', res_label='CME') # these coordinates seem ok, but could review # and use a different fragment if necessary methylacetamide.atoms = atoms s1, e1, s2, e2 = [ x._vector for x in [methylacetamide['N'], methylacetamide['CME'], self._monomers[0]['N'], self._monomers[0]['CA']]] translation, angle, axis, point = find_transformations( s1, e1, s2, e2, radians=False) methylacetamide.rotate( angle=angle, axis=axis, point=point, radians=False) methylacetamide.translate(vector=translation) start_angle = dihedral( methylacetamide['C'], self._monomers[0]['N'], self._monomers[0]['CA'], self._monomers[0]['C']) ref_angle = dihedral( self._monomers[0]['C'], self._monomers[1]['N'], self._monomers[1]['CA'], self._monomers[1]['C']) if cap_dihedral is not None: methylacetamide.rotate(ref_angle - start_angle + cap_dihedral, axis=methylacetamide['N']._vector - self._monomers[0]['CA']._vector, point=methylacetamide['N']._vector) else: methylacetamide.rotate(ref_angle - start_angle, axis=methylacetamide['N']._vector - self._monomers[0]['CA']._vector, point=methylacetamide['N']._vector) if self.ligands is None: self.ligands = LigandGroup(ampal_parent=self) acetamide = Ligand(mol_code='ACM', ampal_parent=self.ligands) acetamide_atoms = OrderedDict() acetamide_atoms['C'] = atoms['C'] acetamide_atoms['CA'] = atoms['CA'] acetamide_atoms['O'] = atoms['O'] for atom in acetamide_atoms.values(): atom.ampal_parent = acetamide acetamide.atoms = acetamide_atoms self.ligands.append(acetamide) else: pass # just in case we want to build different caps in later self.tags['assigned_ff'] = False return
[ "def", "n_cap", "(", "self", ",", "n_cap", "=", "'acetyl'", ",", "cap_dihedral", "=", "None", ")", ":", "if", "n_cap", "==", "'acetyl'", ":", "methylacetamide", "=", "Ligand", "(", "atoms", "=", "None", ",", "mol_code", "=", "'UNK'", ",", "is_hetero", "=", "True", ")", "atoms", "=", "OrderedDict", "(", ")", "atoms", "[", "'C'", "]", "=", "Atom", "(", "[", "0.9500", ",", "-", "0.2290", ",", "0.5090", "]", ",", "'C'", ",", "res_label", "=", "'C'", ")", "atoms", "[", "'CA'", "]", "=", "Atom", "(", "[", "0.7450", ",", "-", "0.9430", ",", "1.8040", "]", ",", "'C'", ",", "res_label", "=", "'CA'", ")", "atoms", "[", "'O'", "]", "=", "Atom", "(", "[", "0.1660", ",", "-", "2.0230", ",", "1.8130", "]", ",", "'O'", ",", "res_label", "=", "'O'", ")", "atoms", "[", "'N'", "]", "=", "Atom", "(", "[", "1.2540", ",", "-", "0.2750", ",", "2.9010", "]", ",", "'N'", ",", "res_label", "=", "'N'", ")", "atoms", "[", "'CME'", "]", "=", "Atom", "(", "[", "1.1630", ",", "-", "0.7870", ",", "4.2500", "]", ",", "'C'", ",", "res_label", "=", "'CME'", ")", "# these coordinates seem ok, but could review", "# and use a different fragment if necessary", "methylacetamide", ".", "atoms", "=", "atoms", "s1", ",", "e1", ",", "s2", ",", "e2", "=", "[", "x", ".", "_vector", "for", "x", "in", "[", "methylacetamide", "[", "'N'", "]", ",", "methylacetamide", "[", "'CME'", "]", ",", "self", ".", "_monomers", "[", "0", "]", "[", "'N'", "]", ",", "self", ".", "_monomers", "[", "0", "]", "[", "'CA'", "]", "]", "]", "translation", ",", "angle", ",", "axis", ",", "point", "=", "find_transformations", "(", "s1", ",", "e1", ",", "s2", ",", "e2", ",", "radians", "=", "False", ")", "methylacetamide", ".", "rotate", "(", "angle", "=", "angle", ",", "axis", "=", "axis", ",", "point", "=", "point", ",", "radians", "=", "False", ")", "methylacetamide", ".", "translate", "(", "vector", "=", "translation", ")", "start_angle", "=", "dihedral", "(", "methylacetamide", "[", "'C'", "]", ",", "self", ".", "_monomers", "[", "0", "]", "[", "'N'", "]", ",", "self", ".", "_monomers", "[", "0", "]", "[", "'CA'", "]", ",", "self", ".", "_monomers", "[", "0", "]", "[", "'C'", "]", ")", "ref_angle", "=", "dihedral", "(", "self", ".", "_monomers", "[", "0", "]", "[", "'C'", "]", ",", "self", ".", "_monomers", "[", "1", "]", "[", "'N'", "]", ",", "self", ".", "_monomers", "[", "1", "]", "[", "'CA'", "]", ",", "self", ".", "_monomers", "[", "1", "]", "[", "'C'", "]", ")", "if", "cap_dihedral", "is", "not", "None", ":", "methylacetamide", ".", "rotate", "(", "ref_angle", "-", "start_angle", "+", "cap_dihedral", ",", "axis", "=", "methylacetamide", "[", "'N'", "]", ".", "_vector", "-", "self", ".", "_monomers", "[", "0", "]", "[", "'CA'", "]", ".", "_vector", ",", "point", "=", "methylacetamide", "[", "'N'", "]", ".", "_vector", ")", "else", ":", "methylacetamide", ".", "rotate", "(", "ref_angle", "-", "start_angle", ",", "axis", "=", "methylacetamide", "[", "'N'", "]", ".", "_vector", "-", "self", ".", "_monomers", "[", "0", "]", "[", "'CA'", "]", ".", "_vector", ",", "point", "=", "methylacetamide", "[", "'N'", "]", ".", "_vector", ")", "if", "self", ".", "ligands", "is", "None", ":", "self", ".", "ligands", "=", "LigandGroup", "(", "ampal_parent", "=", "self", ")", "acetamide", "=", "Ligand", "(", "mol_code", "=", "'ACM'", ",", "ampal_parent", "=", "self", ".", "ligands", ")", "acetamide_atoms", "=", "OrderedDict", "(", ")", "acetamide_atoms", "[", "'C'", "]", "=", "atoms", "[", "'C'", "]", "acetamide_atoms", "[", "'CA'", "]", "=", "atoms", "[", "'CA'", "]", "acetamide_atoms", "[", "'O'", "]", "=", "atoms", "[", "'O'", "]", "for", "atom", "in", "acetamide_atoms", ".", "values", "(", ")", ":", "atom", ".", "ampal_parent", "=", "acetamide", "acetamide", ".", "atoms", "=", "acetamide_atoms", "self", ".", "ligands", ".", "append", "(", "acetamide", ")", "else", ":", "pass", "# just in case we want to build different caps in later", "self", ".", "tags", "[", "'assigned_ff'", "]", "=", "False", "return" ]
Adds an N-terminal acetamide cap. Notes ----- Default behaviour is to duplicate the dihedral angle of the succeeding residues such that the orientation of the carbonyl of the acetyl will resemble that of the first residue. This can be adjusted by supplying a cap_dihedral value. Currently only acetyl cap is supported, but this structure should work for other caps. Parameters ---------- cap : str, optional Type of cap to be added. Options: 'acetyl' cap_dihedral : bool Alternate psi angle to be used when added cap.
[ "Adds", "an", "N", "-", "terminal", "acetamide", "cap", "." ]
python
train
49.164384
happyleavesaoc/python-limitlessled
limitlessled/group/commands/legacy.py
https://github.com/happyleavesaoc/python-limitlessled/blob/70307c2bf8c91430a99579d2ad18b228ec7a8488/limitlessled/group/commands/legacy.py#L29-L42
def get_bytes(self, bridge): """ Gets the full command as bytes. :param bridge: The bridge, to which the command should be sent. """ if self.cmd_2 is not None: cmd = [self.cmd_1, self.cmd_2] else: cmd = [self.cmd_1, self.SUFFIX_BYTE] if bridge.version < self.BRIDGE_SHORT_VERSION_MIN: cmd.append(self.BRIDGE_LONG_BYTE) return bytearray(cmd)
[ "def", "get_bytes", "(", "self", ",", "bridge", ")", ":", "if", "self", ".", "cmd_2", "is", "not", "None", ":", "cmd", "=", "[", "self", ".", "cmd_1", ",", "self", ".", "cmd_2", "]", "else", ":", "cmd", "=", "[", "self", ".", "cmd_1", ",", "self", ".", "SUFFIX_BYTE", "]", "if", "bridge", ".", "version", "<", "self", ".", "BRIDGE_SHORT_VERSION_MIN", ":", "cmd", ".", "append", "(", "self", ".", "BRIDGE_LONG_BYTE", ")", "return", "bytearray", "(", "cmd", ")" ]
Gets the full command as bytes. :param bridge: The bridge, to which the command should be sent.
[ "Gets", "the", "full", "command", "as", "bytes", ".", ":", "param", "bridge", ":", "The", "bridge", "to", "which", "the", "command", "should", "be", "sent", "." ]
python
train
30.642857
jlesquembre/autopilot
src/autopilot/main.py
https://github.com/jlesquembre/autopilot/blob/ca5f36269ba0173bd29c39db6971dac57a58513d/src/autopilot/main.py#L28-L46
def new(project_name): """Creates a new project""" try: locale.setlocale(locale.LC_ALL, '') except: print("Warning: Unable to set locale. Expect encoding problems.") config = utils.get_config() config['new_project']['project_name'] = project_name values = new_project_ui(config) if type(values) is not str: print('New project options:') pprint.pprint(values) project_dir = render.render_project(**values) git.init_repo(project_dir, **values) else: print(values)
[ "def", "new", "(", "project_name", ")", ":", "try", ":", "locale", ".", "setlocale", "(", "locale", ".", "LC_ALL", ",", "''", ")", "except", ":", "print", "(", "\"Warning: Unable to set locale. Expect encoding problems.\"", ")", "config", "=", "utils", ".", "get_config", "(", ")", "config", "[", "'new_project'", "]", "[", "'project_name'", "]", "=", "project_name", "values", "=", "new_project_ui", "(", "config", ")", "if", "type", "(", "values", ")", "is", "not", "str", ":", "print", "(", "'New project options:'", ")", "pprint", ".", "pprint", "(", "values", ")", "project_dir", "=", "render", ".", "render_project", "(", "*", "*", "values", ")", "git", ".", "init_repo", "(", "project_dir", ",", "*", "*", "values", ")", "else", ":", "print", "(", "values", ")" ]
Creates a new project
[ "Creates", "a", "new", "project" ]
python
train
28.157895
spyder-ide/spyder
spyder/plugins/editor/utils/editor.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/utils/editor.py#L267-L276
def word_under_mouse_cursor(self): """ Selects the word under the **mouse** cursor. :return: A QTextCursor with the word under mouse cursor selected. """ editor = self._editor text_cursor = editor.cursorForPosition(editor._last_mouse_pos) text_cursor = self.word_under_cursor(True, text_cursor) return text_cursor
[ "def", "word_under_mouse_cursor", "(", "self", ")", ":", "editor", "=", "self", ".", "_editor", "text_cursor", "=", "editor", ".", "cursorForPosition", "(", "editor", ".", "_last_mouse_pos", ")", "text_cursor", "=", "self", ".", "word_under_cursor", "(", "True", ",", "text_cursor", ")", "return", "text_cursor" ]
Selects the word under the **mouse** cursor. :return: A QTextCursor with the word under mouse cursor selected.
[ "Selects", "the", "word", "under", "the", "**", "mouse", "**", "cursor", "." ]
python
train
36.9
facetoe/zenpy
zenpy/lib/api.py
https://github.com/facetoe/zenpy/blob/34c54c7e408b9ed01604ddf8b3422204c8bf31ea/zenpy/lib/api.py#L1055-L1063
def comments(self, ticket, include_inline_images=False): """ Retrieve the comments for a ticket. :param ticket: Ticket object or id :param include_inline_images: Boolean. If `True`, inline image attachments will be returned in each comments' `attachments` field alongside non-inline attachments """ return self._query_zendesk(self.endpoint.comments, 'comment', id=ticket, include_inline_images=repr(include_inline_images).lower())
[ "def", "comments", "(", "self", ",", "ticket", ",", "include_inline_images", "=", "False", ")", ":", "return", "self", ".", "_query_zendesk", "(", "self", ".", "endpoint", ".", "comments", ",", "'comment'", ",", "id", "=", "ticket", ",", "include_inline_images", "=", "repr", "(", "include_inline_images", ")", ".", "lower", "(", ")", ")" ]
Retrieve the comments for a ticket. :param ticket: Ticket object or id :param include_inline_images: Boolean. If `True`, inline image attachments will be returned in each comments' `attachments` field alongside non-inline attachments
[ "Retrieve", "the", "comments", "for", "a", "ticket", "." ]
python
train
53.666667
boundlessgeo/gsconfig
src/geoserver/catalog.py
https://github.com/boundlessgeo/gsconfig/blob/532f561f32b91ea8debea0573c503dd20988bf40/src/geoserver/catalog.py#L618-L669
def add_granule(self, data, store, workspace=None): '''Harvest/add a granule into an existing imagemosaic''' ext = os.path.splitext(data)[-1] if ext == ".zip": type = "file.imagemosaic" upload_data = open(data, 'rb') headers = { "Content-type": "application/zip", "Accept": "application/xml" } else: type = "external.imagemosaic" upload_data = data if data.startswith("file:") else "file:{data}".format(data=data) headers = { "Content-type": "text/plain", "Accept": "application/xml" } params = dict() workspace_name = workspace if isinstance(store, basestring): store_name = store else: store_name = store.name workspace_name = store.workspace.name if workspace_name is None: raise ValueError("Must specify workspace") url = build_url( self.service_url, [ "workspaces", workspace_name, "coveragestores", store_name, type ], params ) try: resp = self.http_request(url, method='post', data=upload_data, headers=headers) if resp.status_code != 202: FailedRequestError('Failed to add granule to mosaic {} : {}, {}'.format(store, resp.status_code, resp.text)) self._cache.clear() finally: if hasattr(upload_data, "close"): upload_data.close() # maybe return a list of all granules? return None
[ "def", "add_granule", "(", "self", ",", "data", ",", "store", ",", "workspace", "=", "None", ")", ":", "ext", "=", "os", ".", "path", ".", "splitext", "(", "data", ")", "[", "-", "1", "]", "if", "ext", "==", "\".zip\"", ":", "type", "=", "\"file.imagemosaic\"", "upload_data", "=", "open", "(", "data", ",", "'rb'", ")", "headers", "=", "{", "\"Content-type\"", ":", "\"application/zip\"", ",", "\"Accept\"", ":", "\"application/xml\"", "}", "else", ":", "type", "=", "\"external.imagemosaic\"", "upload_data", "=", "data", "if", "data", ".", "startswith", "(", "\"file:\"", ")", "else", "\"file:{data}\"", ".", "format", "(", "data", "=", "data", ")", "headers", "=", "{", "\"Content-type\"", ":", "\"text/plain\"", ",", "\"Accept\"", ":", "\"application/xml\"", "}", "params", "=", "dict", "(", ")", "workspace_name", "=", "workspace", "if", "isinstance", "(", "store", ",", "basestring", ")", ":", "store_name", "=", "store", "else", ":", "store_name", "=", "store", ".", "name", "workspace_name", "=", "store", ".", "workspace", ".", "name", "if", "workspace_name", "is", "None", ":", "raise", "ValueError", "(", "\"Must specify workspace\"", ")", "url", "=", "build_url", "(", "self", ".", "service_url", ",", "[", "\"workspaces\"", ",", "workspace_name", ",", "\"coveragestores\"", ",", "store_name", ",", "type", "]", ",", "params", ")", "try", ":", "resp", "=", "self", ".", "http_request", "(", "url", ",", "method", "=", "'post'", ",", "data", "=", "upload_data", ",", "headers", "=", "headers", ")", "if", "resp", ".", "status_code", "!=", "202", ":", "FailedRequestError", "(", "'Failed to add granule to mosaic {} : {}, {}'", ".", "format", "(", "store", ",", "resp", ".", "status_code", ",", "resp", ".", "text", ")", ")", "self", ".", "_cache", ".", "clear", "(", ")", "finally", ":", "if", "hasattr", "(", "upload_data", ",", "\"close\"", ")", ":", "upload_data", ".", "close", "(", ")", "# maybe return a list of all granules?", "return", "None" ]
Harvest/add a granule into an existing imagemosaic
[ "Harvest", "/", "add", "a", "granule", "into", "an", "existing", "imagemosaic" ]
python
valid
32.288462
limodou/uliweb
uliweb/contrib/auth/__init__.py
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/contrib/auth/__init__.py#L90-L125
def set_user_session(user): """ Set user session :param user: user object chould be model instance or dict :return: """ from uliweb import settings, request user_fieldname = settings.get_var('AUTH/GET_AUTH_USER_FIELDNAME', 'id') share_session = settings.get_var('AUTH/AUTH_SHARE_USER_SESSION', False) if isinstance(user, dict): user_id = user[user_fieldname] else: user_id = getattr(user, user_fieldname) if share_session: cache = functions.get_cache() key = get_user_session_key(user_id) session_id = cache.get(key, None) log.debug('Auth: user session user_id={}, session_id={}, key={}'.format(user_id, session_id, key)) if not session_id: request.session.save() log.debug('Auth: set user session mapping userid={}, ' 'session_id={}, expiry time={}'.format(user_id, request.session.key, request.session.expiry_time)) cache.set(key, request.session.key, expire=request.session.expiry_time) elif session_id != request.session.key: log.debug('Auth: load oldkey={}, key={}'.format(request.session.key, session_id)) request.session.delete() request.session.load(session_id) if isinstance(user, dict): request.session[_get_auth_key()] = user else: request.session[_get_auth_key()] = user_id request.user = user
[ "def", "set_user_session", "(", "user", ")", ":", "from", "uliweb", "import", "settings", ",", "request", "user_fieldname", "=", "settings", ".", "get_var", "(", "'AUTH/GET_AUTH_USER_FIELDNAME'", ",", "'id'", ")", "share_session", "=", "settings", ".", "get_var", "(", "'AUTH/AUTH_SHARE_USER_SESSION'", ",", "False", ")", "if", "isinstance", "(", "user", ",", "dict", ")", ":", "user_id", "=", "user", "[", "user_fieldname", "]", "else", ":", "user_id", "=", "getattr", "(", "user", ",", "user_fieldname", ")", "if", "share_session", ":", "cache", "=", "functions", ".", "get_cache", "(", ")", "key", "=", "get_user_session_key", "(", "user_id", ")", "session_id", "=", "cache", ".", "get", "(", "key", ",", "None", ")", "log", ".", "debug", "(", "'Auth: user session user_id={}, session_id={}, key={}'", ".", "format", "(", "user_id", ",", "session_id", ",", "key", ")", ")", "if", "not", "session_id", ":", "request", ".", "session", ".", "save", "(", ")", "log", ".", "debug", "(", "'Auth: set user session mapping userid={}, '", "'session_id={}, expiry time={}'", ".", "format", "(", "user_id", ",", "request", ".", "session", ".", "key", ",", "request", ".", "session", ".", "expiry_time", ")", ")", "cache", ".", "set", "(", "key", ",", "request", ".", "session", ".", "key", ",", "expire", "=", "request", ".", "session", ".", "expiry_time", ")", "elif", "session_id", "!=", "request", ".", "session", ".", "key", ":", "log", ".", "debug", "(", "'Auth: load oldkey={}, key={}'", ".", "format", "(", "request", ".", "session", ".", "key", ",", "session_id", ")", ")", "request", ".", "session", ".", "delete", "(", ")", "request", ".", "session", ".", "load", "(", "session_id", ")", "if", "isinstance", "(", "user", ",", "dict", ")", ":", "request", ".", "session", "[", "_get_auth_key", "(", ")", "]", "=", "user", "else", ":", "request", ".", "session", "[", "_get_auth_key", "(", ")", "]", "=", "user_id", "request", ".", "user", "=", "user" ]
Set user session :param user: user object chould be model instance or dict :return:
[ "Set", "user", "session", ":", "param", "user", ":", "user", "object", "chould", "be", "model", "instance", "or", "dict", ":", "return", ":" ]
python
train
43.305556
googledatalab/pydatalab
solutionbox/structured_data/mltoolbox/_structured_data/trainer/util.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/structured_data/mltoolbox/_structured_data/trainer/util.py#L315-L359
def read_examples(input_files, batch_size, shuffle, num_epochs=None): """Creates readers and queues for reading example protos.""" files = [] for e in input_files: for path in e.split(','): files.extend(file_io.get_matching_files(path)) thread_count = multiprocessing.cpu_count() # The minimum number of instances in a queue from which examples are drawn # randomly. The larger this number, the more randomness at the expense of # higher memory requirements. min_after_dequeue = 1000 # When batching data, the queue's capacity will be larger than the batch_size # by some factor. The recommended formula is (num_threads + a small safety # margin). For now, we use a single thread for reading, so this can be small. queue_size_multiplier = thread_count + 3 # Convert num_epochs == 0 -> num_epochs is None, if necessary num_epochs = num_epochs or None # Build a queue of the filenames to be read. filename_queue = tf.train.string_input_producer(files, num_epochs, shuffle) example_id, encoded_example = tf.TextLineReader().read_up_to( filename_queue, batch_size) if shuffle: capacity = min_after_dequeue + queue_size_multiplier * batch_size return tf.train.shuffle_batch( [example_id, encoded_example], batch_size, capacity, min_after_dequeue, enqueue_many=True, num_threads=thread_count) else: capacity = queue_size_multiplier * batch_size return tf.train.batch( [example_id, encoded_example], batch_size, capacity=capacity, enqueue_many=True, num_threads=thread_count)
[ "def", "read_examples", "(", "input_files", ",", "batch_size", ",", "shuffle", ",", "num_epochs", "=", "None", ")", ":", "files", "=", "[", "]", "for", "e", "in", "input_files", ":", "for", "path", "in", "e", ".", "split", "(", "','", ")", ":", "files", ".", "extend", "(", "file_io", ".", "get_matching_files", "(", "path", ")", ")", "thread_count", "=", "multiprocessing", ".", "cpu_count", "(", ")", "# The minimum number of instances in a queue from which examples are drawn", "# randomly. The larger this number, the more randomness at the expense of", "# higher memory requirements.", "min_after_dequeue", "=", "1000", "# When batching data, the queue's capacity will be larger than the batch_size", "# by some factor. The recommended formula is (num_threads + a small safety", "# margin). For now, we use a single thread for reading, so this can be small.", "queue_size_multiplier", "=", "thread_count", "+", "3", "# Convert num_epochs == 0 -> num_epochs is None, if necessary", "num_epochs", "=", "num_epochs", "or", "None", "# Build a queue of the filenames to be read.", "filename_queue", "=", "tf", ".", "train", ".", "string_input_producer", "(", "files", ",", "num_epochs", ",", "shuffle", ")", "example_id", ",", "encoded_example", "=", "tf", ".", "TextLineReader", "(", ")", ".", "read_up_to", "(", "filename_queue", ",", "batch_size", ")", "if", "shuffle", ":", "capacity", "=", "min_after_dequeue", "+", "queue_size_multiplier", "*", "batch_size", "return", "tf", ".", "train", ".", "shuffle_batch", "(", "[", "example_id", ",", "encoded_example", "]", ",", "batch_size", ",", "capacity", ",", "min_after_dequeue", ",", "enqueue_many", "=", "True", ",", "num_threads", "=", "thread_count", ")", "else", ":", "capacity", "=", "queue_size_multiplier", "*", "batch_size", "return", "tf", ".", "train", ".", "batch", "(", "[", "example_id", ",", "encoded_example", "]", ",", "batch_size", ",", "capacity", "=", "capacity", ",", "enqueue_many", "=", "True", ",", "num_threads", "=", "thread_count", ")" ]
Creates readers and queues for reading example protos.
[ "Creates", "readers", "and", "queues", "for", "reading", "example", "protos", "." ]
python
train
35.311111
saltstack/salt
salt/utils/minions.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/minions.py#L355-L363
def _check_pillar_exact_minions(self, expr, delimiter, greedy): ''' Return the minions found by looking via pillar ''' return self._check_cache_minions(expr, delimiter, greedy, 'pillar', exact_match=True)
[ "def", "_check_pillar_exact_minions", "(", "self", ",", "expr", ",", "delimiter", ",", "greedy", ")", ":", "return", "self", ".", "_check_cache_minions", "(", "expr", ",", "delimiter", ",", "greedy", ",", "'pillar'", ",", "exact_match", "=", "True", ")" ]
Return the minions found by looking via pillar
[ "Return", "the", "minions", "found", "by", "looking", "via", "pillar" ]
python
train
43.555556
lowandrew/OLCTools
spadespipeline/quality.py
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/quality.py#L695-L712
def run_qaml(self): """ Create and run the GenomeQAML system call """ logging.info('Running GenomeQAML quality assessment') qaml_call = 'classify.py -t {tf} -r {rf}'\ .format(tf=self.qaml_path, rf=self.qaml_report) make_path(self.reportpath) # Only attempt to assess assemblies if the report doesn't already exist if not os.path.isfile(self.qaml_report): # Run the system calls out, err = run_subprocess(qaml_call) # Acquire thread lock, and write the logs to file self.threadlock.acquire() write_to_logfile(qaml_call, qaml_call, self.logfile) write_to_logfile(out, err, self.logfile) self.threadlock.release()
[ "def", "run_qaml", "(", "self", ")", ":", "logging", ".", "info", "(", "'Running GenomeQAML quality assessment'", ")", "qaml_call", "=", "'classify.py -t {tf} -r {rf}'", ".", "format", "(", "tf", "=", "self", ".", "qaml_path", ",", "rf", "=", "self", ".", "qaml_report", ")", "make_path", "(", "self", ".", "reportpath", ")", "# Only attempt to assess assemblies if the report doesn't already exist", "if", "not", "os", ".", "path", ".", "isfile", "(", "self", ".", "qaml_report", ")", ":", "# Run the system calls", "out", ",", "err", "=", "run_subprocess", "(", "qaml_call", ")", "# Acquire thread lock, and write the logs to file", "self", ".", "threadlock", ".", "acquire", "(", ")", "write_to_logfile", "(", "qaml_call", ",", "qaml_call", ",", "self", ".", "logfile", ")", "write_to_logfile", "(", "out", ",", "err", ",", "self", ".", "logfile", ")", "self", ".", "threadlock", ".", "release", "(", ")" ]
Create and run the GenomeQAML system call
[ "Create", "and", "run", "the", "GenomeQAML", "system", "call" ]
python
train
42.944444
xtrementl/focus
focus/parser/parser.py
https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/parser/parser.py#L312-L322
def _rule_option(self): """ Parses the production rule:: option : NAME value ';' Returns list (name, value_list). """ name = self._get_token(self.RE_NAME) value = self._rule_value() self._expect_token(';') return [name, value]
[ "def", "_rule_option", "(", "self", ")", ":", "name", "=", "self", ".", "_get_token", "(", "self", ".", "RE_NAME", ")", "value", "=", "self", ".", "_rule_value", "(", ")", "self", ".", "_expect_token", "(", "';'", ")", "return", "[", "name", ",", "value", "]" ]
Parses the production rule:: option : NAME value ';' Returns list (name, value_list).
[ "Parses", "the", "production", "rule", "::", "option", ":", "NAME", "value", ";" ]
python
train
27.090909
beregond/jsonmodels
jsonmodels/utilities.py
https://github.com/beregond/jsonmodels/blob/97a1a6b90a49490fc5a6078f49027055d2e13541/jsonmodels/utilities.py#L60-L90
def compare_schemas(one, two): """Compare two structures that represents JSON schemas. For comparison you can't use normal comparison, because in JSON schema lists DO NOT keep order (and Python lists do), so this must be taken into account during comparison. Note this wont check all configurations, only first one that seems to match, which can lead to wrong results. :param one: First schema to compare. :param two: Second schema to compare. :rtype: `bool` """ one = _normalize_string_type(one) two = _normalize_string_type(two) _assert_same_types(one, two) if isinstance(one, list): return _compare_lists(one, two) elif isinstance(one, dict): return _compare_dicts(one, two) elif isinstance(one, SCALAR_TYPES): return one == two elif one is None: return one is two else: raise RuntimeError('Not allowed type "{type}"'.format( type=type(one).__name__))
[ "def", "compare_schemas", "(", "one", ",", "two", ")", ":", "one", "=", "_normalize_string_type", "(", "one", ")", "two", "=", "_normalize_string_type", "(", "two", ")", "_assert_same_types", "(", "one", ",", "two", ")", "if", "isinstance", "(", "one", ",", "list", ")", ":", "return", "_compare_lists", "(", "one", ",", "two", ")", "elif", "isinstance", "(", "one", ",", "dict", ")", ":", "return", "_compare_dicts", "(", "one", ",", "two", ")", "elif", "isinstance", "(", "one", ",", "SCALAR_TYPES", ")", ":", "return", "one", "==", "two", "elif", "one", "is", "None", ":", "return", "one", "is", "two", "else", ":", "raise", "RuntimeError", "(", "'Not allowed type \"{type}\"'", ".", "format", "(", "type", "=", "type", "(", "one", ")", ".", "__name__", ")", ")" ]
Compare two structures that represents JSON schemas. For comparison you can't use normal comparison, because in JSON schema lists DO NOT keep order (and Python lists do), so this must be taken into account during comparison. Note this wont check all configurations, only first one that seems to match, which can lead to wrong results. :param one: First schema to compare. :param two: Second schema to compare. :rtype: `bool`
[ "Compare", "two", "structures", "that", "represents", "JSON", "schemas", "." ]
python
train
30.774194
costastf/locationsharinglib
_CI/library/patch.py
https://github.com/costastf/locationsharinglib/blob/dcd74b0cdb59b951345df84987238763e50ef282/_CI/library/patch.py#L198-L203
def pathstrip(path, n): """ Strip n leading components from the given path """ pathlist = [path] while os.path.dirname(pathlist[0]) != b'': pathlist[0:1] = os.path.split(pathlist[0]) return b'/'.join(pathlist[n:])
[ "def", "pathstrip", "(", "path", ",", "n", ")", ":", "pathlist", "=", "[", "path", "]", "while", "os", ".", "path", ".", "dirname", "(", "pathlist", "[", "0", "]", ")", "!=", "b''", ":", "pathlist", "[", "0", ":", "1", "]", "=", "os", ".", "path", ".", "split", "(", "pathlist", "[", "0", "]", ")", "return", "b'/'", ".", "join", "(", "pathlist", "[", "n", ":", "]", ")" ]
Strip n leading components from the given path
[ "Strip", "n", "leading", "components", "from", "the", "given", "path" ]
python
train
36.666667
chimera0/accel-brain-code
Automatic-Summarization/demo/demo_with_n_gram_japanese_web_page.py
https://github.com/chimera0/accel-brain-code/blob/03661f6f544bed656269fcd4b3c23c9061629daa/Automatic-Summarization/demo/demo_with_n_gram_japanese_web_page.py#L10-L41
def Main(url): ''' Entry Point. Args: url: target url. ''' # The object of Web-Scraping. web_scrape = WebScraping() # Execute Web-Scraping. document = web_scrape.scrape(url) # The object of automatic summarization with N-gram. auto_abstractor = NgramAutoAbstractor() # n-gram object auto_abstractor.n_gram = Ngram() # n of n-gram auto_abstractor.n = 3 # Set tokenizer. This is japanese tokenizer with MeCab. auto_abstractor.tokenizable_doc = MeCabTokenizer() # Object of abstracting and filtering document. abstractable_doc = TopNRankAbstractor() # Execute summarization. result_dict = auto_abstractor.summarize(document, abstractable_doc) # Output 3 summarized sentences. limit = 3 i = 1 for sentence in result_dict["summarize_result"]: print(sentence) if i >= limit: break i += 1
[ "def", "Main", "(", "url", ")", ":", "# The object of Web-Scraping.", "web_scrape", "=", "WebScraping", "(", ")", "# Execute Web-Scraping.", "document", "=", "web_scrape", ".", "scrape", "(", "url", ")", "# The object of automatic summarization with N-gram.", "auto_abstractor", "=", "NgramAutoAbstractor", "(", ")", "# n-gram object", "auto_abstractor", ".", "n_gram", "=", "Ngram", "(", ")", "# n of n-gram", "auto_abstractor", ".", "n", "=", "3", "# Set tokenizer. This is japanese tokenizer with MeCab.", "auto_abstractor", ".", "tokenizable_doc", "=", "MeCabTokenizer", "(", ")", "# Object of abstracting and filtering document.", "abstractable_doc", "=", "TopNRankAbstractor", "(", ")", "# Execute summarization.", "result_dict", "=", "auto_abstractor", ".", "summarize", "(", "document", ",", "abstractable_doc", ")", "# Output 3 summarized sentences.", "limit", "=", "3", "i", "=", "1", "for", "sentence", "in", "result_dict", "[", "\"summarize_result\"", "]", ":", "print", "(", "sentence", ")", "if", "i", ">=", "limit", ":", "break", "i", "+=", "1" ]
Entry Point. Args: url: target url.
[ "Entry", "Point", ".", "Args", ":", "url", ":", "target", "url", "." ]
python
train
28.09375
Phyks/libbmc
libbmc/bibtex.py
https://github.com/Phyks/libbmc/blob/9ef1a29d2514157d1edd6c13ecbd61b07ae9315e/libbmc/bibtex.py#L43-L51
def write(filename, data): """ Create a new BibTeX file. :param filename: The name of the BibTeX file to write. :param data: A ``bibtexparser.BibDatabase`` object. """ with open(filename, 'w') as fh: fh.write(bibdatabase2bibtex(data))
[ "def", "write", "(", "filename", ",", "data", ")", ":", "with", "open", "(", "filename", ",", "'w'", ")", "as", "fh", ":", "fh", ".", "write", "(", "bibdatabase2bibtex", "(", "data", ")", ")" ]
Create a new BibTeX file. :param filename: The name of the BibTeX file to write. :param data: A ``bibtexparser.BibDatabase`` object.
[ "Create", "a", "new", "BibTeX", "file", "." ]
python
train
28.777778
fabioz/PyDev.Debugger
_pydev_imps/_pydev_inspect.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/_pydev_imps/_pydev_inspect.py#L312-L320
def getmoduleinfo(path): """Get the module name, suffix, mode, and module type for a given file.""" filename = os.path.basename(path) suffixes = map(lambda (suffix, mode, mtype): (-len(suffix), suffix, mode, mtype), imp.get_suffixes()) suffixes.sort() # try longest suffixes first, in case they overlap for neglen, suffix, mode, mtype in suffixes: if filename[neglen:] == suffix: return filename[:neglen], suffix, mode, mtype
[ "def", "getmoduleinfo", "(", "path", ")", ":", "filename", "=", "os", ".", "path", ".", "basename", "(", "path", ")", "suffixes", "=", "map", "(", "lambda", "(", "suffix", ",", "mode", ",", "mtype", ")", ":", "(", "-", "len", "(", "suffix", ")", ",", "suffix", ",", "mode", ",", "mtype", ")", ",", "imp", ".", "get_suffixes", "(", ")", ")", "suffixes", ".", "sort", "(", ")", "# try longest suffixes first, in case they overlap", "for", "neglen", ",", "suffix", ",", "mode", ",", "mtype", "in", "suffixes", ":", "if", "filename", "[", "neglen", ":", "]", "==", "suffix", ":", "return", "filename", "[", ":", "neglen", "]", ",", "suffix", ",", "mode", ",", "mtype" ]
Get the module name, suffix, mode, and module type for a given file.
[ "Get", "the", "module", "name", "suffix", "mode", "and", "module", "type", "for", "a", "given", "file", "." ]
python
train
52.888889
markfinger/python-js-host
js_host/bin.py
https://github.com/markfinger/python-js-host/blob/7727138c1eae779335d55fb4d7734698225a6322/js_host/bin.py#L78-L109
def spawn_managed_host(config_file, manager, connect_on_start=True): """ Spawns a managed host, if it is not already running """ data = manager.request_host_status(config_file) is_running = data['started'] # Managed hosts run as persistent processes, so it may already be running if is_running: host_status = json.loads(data['host']['output']) logfile = data['host']['logfile'] else: data = manager.start_host(config_file) host_status = json.loads(data['output']) logfile = data['logfile'] host = JSHost( status=host_status, logfile=logfile, config_file=config_file, manager=manager ) if not is_running and settings.VERBOSITY >= verbosity.PROCESS_START: print('Started {}'.format(host.get_name())) if connect_on_start: host.connect() return host
[ "def", "spawn_managed_host", "(", "config_file", ",", "manager", ",", "connect_on_start", "=", "True", ")", ":", "data", "=", "manager", ".", "request_host_status", "(", "config_file", ")", "is_running", "=", "data", "[", "'started'", "]", "# Managed hosts run as persistent processes, so it may already be running", "if", "is_running", ":", "host_status", "=", "json", ".", "loads", "(", "data", "[", "'host'", "]", "[", "'output'", "]", ")", "logfile", "=", "data", "[", "'host'", "]", "[", "'logfile'", "]", "else", ":", "data", "=", "manager", ".", "start_host", "(", "config_file", ")", "host_status", "=", "json", ".", "loads", "(", "data", "[", "'output'", "]", ")", "logfile", "=", "data", "[", "'logfile'", "]", "host", "=", "JSHost", "(", "status", "=", "host_status", ",", "logfile", "=", "logfile", ",", "config_file", "=", "config_file", ",", "manager", "=", "manager", ")", "if", "not", "is_running", "and", "settings", ".", "VERBOSITY", ">=", "verbosity", ".", "PROCESS_START", ":", "print", "(", "'Started {}'", ".", "format", "(", "host", ".", "get_name", "(", ")", ")", ")", "if", "connect_on_start", ":", "host", ".", "connect", "(", ")", "return", "host" ]
Spawns a managed host, if it is not already running
[ "Spawns", "a", "managed", "host", "if", "it", "is", "not", "already", "running" ]
python
train
26.90625
zikzakmedia/python-mediawiki
mediawiki/wikimarkup/__init__.py
https://github.com/zikzakmedia/python-mediawiki/blob/7c26732efa520e16c35350815ce98cd7610a0bcb/mediawiki/wikimarkup/__init__.py#L690-L707
def decodeTagAttributes(self, text): """docstring for decodeTagAttributes""" attribs = {} if text.strip() == u'': return attribs scanner = _attributePat.scanner(text) match = scanner.search() while match: key, val1, val2, val3, val4 = match.groups() value = val1 or val2 or val3 or val4 if value: value = _space.sub(u' ', value).strip() else: value = '' attribs[key] = self.decodeCharReferences(value) match = scanner.search() return attribs
[ "def", "decodeTagAttributes", "(", "self", ",", "text", ")", ":", "attribs", "=", "{", "}", "if", "text", ".", "strip", "(", ")", "==", "u''", ":", "return", "attribs", "scanner", "=", "_attributePat", ".", "scanner", "(", "text", ")", "match", "=", "scanner", ".", "search", "(", ")", "while", "match", ":", "key", ",", "val1", ",", "val2", ",", "val3", ",", "val4", "=", "match", ".", "groups", "(", ")", "value", "=", "val1", "or", "val2", "or", "val3", "or", "val4", "if", "value", ":", "value", "=", "_space", ".", "sub", "(", "u' '", ",", "value", ")", ".", "strip", "(", ")", "else", ":", "value", "=", "''", "attribs", "[", "key", "]", "=", "self", ".", "decodeCharReferences", "(", "value", ")", "match", "=", "scanner", ".", "search", "(", ")", "return", "attribs" ]
docstring for decodeTagAttributes
[ "docstring", "for", "decodeTagAttributes" ]
python
train
26
aloetesting/aloe_webdriver
aloe_webdriver/util.py
https://github.com/aloetesting/aloe_webdriver/blob/65d847da4bdc63f9c015cb19d4efdee87df8ffad/aloe_webdriver/util.py#L280-L308
def find_button(browser, value): """ Find a button with the given value. Searches for the following different kinds of buttons: <input type="submit"> <input type="reset"> <input type="button"> <input type="image"> <button> <{a,p,div,span,...} role="button"> Returns: an :class:`ElementSelector` """ field_types = ( 'submit', 'reset', 'button-element', 'button', 'image', 'button-role', ) return reduce( operator.add, (find_field_with_value(browser, field_type, value) for field_type in field_types) )
[ "def", "find_button", "(", "browser", ",", "value", ")", ":", "field_types", "=", "(", "'submit'", ",", "'reset'", ",", "'button-element'", ",", "'button'", ",", "'image'", ",", "'button-role'", ",", ")", "return", "reduce", "(", "operator", ".", "add", ",", "(", "find_field_with_value", "(", "browser", ",", "field_type", ",", "value", ")", "for", "field_type", "in", "field_types", ")", ")" ]
Find a button with the given value. Searches for the following different kinds of buttons: <input type="submit"> <input type="reset"> <input type="button"> <input type="image"> <button> <{a,p,div,span,...} role="button"> Returns: an :class:`ElementSelector`
[ "Find", "a", "button", "with", "the", "given", "value", "." ]
python
train
21.827586
iqbal-lab-org/cluster_vcf_records
cluster_vcf_records/vcf_record.py
https://github.com/iqbal-lab-org/cluster_vcf_records/blob/0db26af36b6da97a7361364457d2152dc756055c/cluster_vcf_records/vcf_record.py#L402-L410
def to_record_per_alt(self): '''Returns list of vcf_records. One per variant in the ALT column. Does not change INFO/FORMAT etc columns, which means that they are now broken''' record_list = [] for alt in self.ALT: record_list.append(copy.copy(self)) record_list[-1].ALT = [alt] return record_list
[ "def", "to_record_per_alt", "(", "self", ")", ":", "record_list", "=", "[", "]", "for", "alt", "in", "self", ".", "ALT", ":", "record_list", ".", "append", "(", "copy", ".", "copy", "(", "self", ")", ")", "record_list", "[", "-", "1", "]", ".", "ALT", "=", "[", "alt", "]", "return", "record_list" ]
Returns list of vcf_records. One per variant in the ALT column. Does not change INFO/FORMAT etc columns, which means that they are now broken
[ "Returns", "list", "of", "vcf_records", ".", "One", "per", "variant", "in", "the", "ALT", "column", ".", "Does", "not", "change", "INFO", "/", "FORMAT", "etc", "columns", "which", "means", "that", "they", "are", "now", "broken" ]
python
train
40.111111
JensRantil/rewind
rewind/server/eventstores.py
https://github.com/JensRantil/rewind/blob/7f645d20186c1db55cfe53a0310c9fd6292f91ea/rewind/server/eventstores.py#L915-L951
def from_config(config, **options): """Instantiate an `SyncedRotationEventStores` from config. Parameters: config -- the configuration file options read from file(s). **options -- various options given to the specific event store. Shall not be used with this event store. Warning will be logged for every extra non-recognized option. The only required key to this function is 'path'. returns -- a newly instantiated `SyncedRotationEventStores`. """ required_args = ('storage-backends',) optional_args = {'events_per_batch': 25000} rconfig.check_config_options("SyncedRotationEventStores", required_args, tuple(optional_args.keys()), options) if "events_per_batch" in options: events_per_batch = int(options["events_per_batch"]) else: events_per_batch = optional_args["events_per_batch"] estore = SyncedRotationEventStores(events_per_batch) for section in options['storage-backends'].split(' '): try: substore = rconfig.construct_eventstore(config, section) estore.add_rotated_store(substore) except Exception as e: _logger.exception('Could not instantiate substore from' ' section %s', section) estore.close() raise return estore
[ "def", "from_config", "(", "config", ",", "*", "*", "options", ")", ":", "required_args", "=", "(", "'storage-backends'", ",", ")", "optional_args", "=", "{", "'events_per_batch'", ":", "25000", "}", "rconfig", ".", "check_config_options", "(", "\"SyncedRotationEventStores\"", ",", "required_args", ",", "tuple", "(", "optional_args", ".", "keys", "(", ")", ")", ",", "options", ")", "if", "\"events_per_batch\"", "in", "options", ":", "events_per_batch", "=", "int", "(", "options", "[", "\"events_per_batch\"", "]", ")", "else", ":", "events_per_batch", "=", "optional_args", "[", "\"events_per_batch\"", "]", "estore", "=", "SyncedRotationEventStores", "(", "events_per_batch", ")", "for", "section", "in", "options", "[", "'storage-backends'", "]", ".", "split", "(", "' '", ")", ":", "try", ":", "substore", "=", "rconfig", ".", "construct_eventstore", "(", "config", ",", "section", ")", "estore", ".", "add_rotated_store", "(", "substore", ")", "except", "Exception", "as", "e", ":", "_logger", ".", "exception", "(", "'Could not instantiate substore from'", "' section %s'", ",", "section", ")", "estore", ".", "close", "(", ")", "raise", "return", "estore" ]
Instantiate an `SyncedRotationEventStores` from config. Parameters: config -- the configuration file options read from file(s). **options -- various options given to the specific event store. Shall not be used with this event store. Warning will be logged for every extra non-recognized option. The only required key to this function is 'path'. returns -- a newly instantiated `SyncedRotationEventStores`.
[ "Instantiate", "an", "SyncedRotationEventStores", "from", "config", "." ]
python
train
40.972973
JamesRamm/longclaw
longclaw/orders/api.py
https://github.com/JamesRamm/longclaw/blob/8bbf2e6d703271b815ec111813c7c5d1d4e4e810/longclaw/orders/api.py#L14-L19
def refund_order(self, request, pk): """Refund the order specified by the pk """ order = Order.objects.get(id=pk) order.refund() return Response(status=status.HTTP_204_NO_CONTENT)
[ "def", "refund_order", "(", "self", ",", "request", ",", "pk", ")", ":", "order", "=", "Order", ".", "objects", ".", "get", "(", "id", "=", "pk", ")", "order", ".", "refund", "(", ")", "return", "Response", "(", "status", "=", "status", ".", "HTTP_204_NO_CONTENT", ")" ]
Refund the order specified by the pk
[ "Refund", "the", "order", "specified", "by", "the", "pk" ]
python
train
35.666667
PySimpleGUI/PySimpleGUI
PySimpleGUIWx/PySimpleGUIWx.py
https://github.com/PySimpleGUI/PySimpleGUI/blob/08184197f5bd4580ab5e5aca28bdda30f87b86fc/PySimpleGUIWx/PySimpleGUIWx.py#L6768-L6821
def PopupGetFolder(message, title=None, default_path='', no_window=False, size=(None, None), button_color=None, background_color=None, text_color=None, icon=DEFAULT_WINDOW_ICON, font=None, no_titlebar=False, grab_anywhere=False, keep_on_top=False, location=(None, None), initial_folder=None): """ Display popup with text entry field and browse button. Browse for folder :param message: :param default_path: :param no_window: :param size: :param button_color: :param background_color: :param text_color: :param icon: :param font: :param no_titlebar: :param grab_anywhere: :param keep_on_top: :param location: :return: Contents of text field. None if closed using X or cancelled """ if no_window: app = wx.App(False) frame = wx.Frame() if initial_folder: dialog = wx.DirDialog(frame, style=wx.FD_OPEN) else: dialog = wx.DirDialog(frame) folder_name = '' if dialog.ShowModal() == wx.ID_OK: folder_name = dialog.GetPath() return folder_name layout = [[Text(message, auto_size_text=True, text_color=text_color, background_color=background_color)], [InputText(default_text=default_path, size=size), FolderBrowse(initial_folder=initial_folder)], [Button('Ok', size=(60, 20), bind_return_key=True), Button('Cancel', size=(60, 20))]] _title = title if title is not None else message window = Window(title=_title, icon=icon, auto_size_text=True, button_color=button_color, background_color=background_color, font=font, no_titlebar=no_titlebar, grab_anywhere=grab_anywhere, keep_on_top=keep_on_top, location=location) (button, input_values) = window.Layout(layout).Read() window.Close() if button != 'Ok': return None else: path = input_values[0] return path
[ "def", "PopupGetFolder", "(", "message", ",", "title", "=", "None", ",", "default_path", "=", "''", ",", "no_window", "=", "False", ",", "size", "=", "(", "None", ",", "None", ")", ",", "button_color", "=", "None", ",", "background_color", "=", "None", ",", "text_color", "=", "None", ",", "icon", "=", "DEFAULT_WINDOW_ICON", ",", "font", "=", "None", ",", "no_titlebar", "=", "False", ",", "grab_anywhere", "=", "False", ",", "keep_on_top", "=", "False", ",", "location", "=", "(", "None", ",", "None", ")", ",", "initial_folder", "=", "None", ")", ":", "if", "no_window", ":", "app", "=", "wx", ".", "App", "(", "False", ")", "frame", "=", "wx", ".", "Frame", "(", ")", "if", "initial_folder", ":", "dialog", "=", "wx", ".", "DirDialog", "(", "frame", ",", "style", "=", "wx", ".", "FD_OPEN", ")", "else", ":", "dialog", "=", "wx", ".", "DirDialog", "(", "frame", ")", "folder_name", "=", "''", "if", "dialog", ".", "ShowModal", "(", ")", "==", "wx", ".", "ID_OK", ":", "folder_name", "=", "dialog", ".", "GetPath", "(", ")", "return", "folder_name", "layout", "=", "[", "[", "Text", "(", "message", ",", "auto_size_text", "=", "True", ",", "text_color", "=", "text_color", ",", "background_color", "=", "background_color", ")", "]", ",", "[", "InputText", "(", "default_text", "=", "default_path", ",", "size", "=", "size", ")", ",", "FolderBrowse", "(", "initial_folder", "=", "initial_folder", ")", "]", ",", "[", "Button", "(", "'Ok'", ",", "size", "=", "(", "60", ",", "20", ")", ",", "bind_return_key", "=", "True", ")", ",", "Button", "(", "'Cancel'", ",", "size", "=", "(", "60", ",", "20", ")", ")", "]", "]", "_title", "=", "title", "if", "title", "is", "not", "None", "else", "message", "window", "=", "Window", "(", "title", "=", "_title", ",", "icon", "=", "icon", ",", "auto_size_text", "=", "True", ",", "button_color", "=", "button_color", ",", "background_color", "=", "background_color", ",", "font", "=", "font", ",", "no_titlebar", "=", "no_titlebar", ",", "grab_anywhere", "=", "grab_anywhere", ",", "keep_on_top", "=", "keep_on_top", ",", "location", "=", "location", ")", "(", "button", ",", "input_values", ")", "=", "window", ".", "Layout", "(", "layout", ")", ".", "Read", "(", ")", "window", ".", "Close", "(", ")", "if", "button", "!=", "'Ok'", ":", "return", "None", "else", ":", "path", "=", "input_values", "[", "0", "]", "return", "path" ]
Display popup with text entry field and browse button. Browse for folder :param message: :param default_path: :param no_window: :param size: :param button_color: :param background_color: :param text_color: :param icon: :param font: :param no_titlebar: :param grab_anywhere: :param keep_on_top: :param location: :return: Contents of text field. None if closed using X or cancelled
[ "Display", "popup", "with", "text", "entry", "field", "and", "browse", "button", ".", "Browse", "for", "folder", ":", "param", "message", ":", ":", "param", "default_path", ":", ":", "param", "no_window", ":", ":", "param", "size", ":", ":", "param", "button_color", ":", ":", "param", "background_color", ":", ":", "param", "text_color", ":", ":", "param", "icon", ":", ":", "param", "font", ":", ":", "param", "no_titlebar", ":", ":", "param", "grab_anywhere", ":", ":", "param", "keep_on_top", ":", ":", "param", "location", ":", ":", "return", ":", "Contents", "of", "text", "field", ".", "None", "if", "closed", "using", "X", "or", "cancelled" ]
python
train
36
hanguokai/youku
youku/youku_users.py
https://github.com/hanguokai/youku/blob/b2df060c7dccfad990bcfa289fff68bb77d1e69b/youku/youku_users.py#L115-L129
def create_friendship(self, access_token, user_id=None, user_name=None): """doc: http://open.youku.com/docs/doc?id=28 """ url = 'https://openapi.youku.com/v2/users/friendship/create.json' data = { 'client_id': self.client_id, 'access_token': access_token, 'user_id': user_id, 'user_name': user_name } data = remove_none_value(data) r = requests.post(url, data=data) check_error(r) return r.json()
[ "def", "create_friendship", "(", "self", ",", "access_token", ",", "user_id", "=", "None", ",", "user_name", "=", "None", ")", ":", "url", "=", "'https://openapi.youku.com/v2/users/friendship/create.json'", "data", "=", "{", "'client_id'", ":", "self", ".", "client_id", ",", "'access_token'", ":", "access_token", ",", "'user_id'", ":", "user_id", ",", "'user_name'", ":", "user_name", "}", "data", "=", "remove_none_value", "(", "data", ")", "r", "=", "requests", ".", "post", "(", "url", ",", "data", "=", "data", ")", "check_error", "(", "r", ")", "return", "r", ".", "json", "(", ")" ]
doc: http://open.youku.com/docs/doc?id=28
[ "doc", ":", "http", ":", "//", "open", ".", "youku", ".", "com", "/", "docs", "/", "doc?id", "=", "28" ]
python
train
35.2
SandstoneHPC/sandstone-ide
sandstone/lib/filesystem/handlers.py
https://github.com/SandstoneHPC/sandstone-ide/blob/7a47947fb07281c3e3018042863dc67e7e56dc04/sandstone/lib/filesystem/handlers.py#L56-L66
def _rename(self): """ Called during a PUT request where the action specifies a rename operation. Returns resource URI of the renamed file. """ newname = self.action['newname'] try: newpath = self.fs.rename(self.fp,newname) except OSError: raise tornado.web.HTTPError(400) return newpath
[ "def", "_rename", "(", "self", ")", ":", "newname", "=", "self", ".", "action", "[", "'newname'", "]", "try", ":", "newpath", "=", "self", ".", "fs", ".", "rename", "(", "self", ".", "fp", ",", "newname", ")", "except", "OSError", ":", "raise", "tornado", ".", "web", ".", "HTTPError", "(", "400", ")", "return", "newpath" ]
Called during a PUT request where the action specifies a rename operation. Returns resource URI of the renamed file.
[ "Called", "during", "a", "PUT", "request", "where", "the", "action", "specifies", "a", "rename", "operation", ".", "Returns", "resource", "URI", "of", "the", "renamed", "file", "." ]
python
train
33.181818
thespacedoctor/qubits
qubits/cl_utils.py
https://github.com/thespacedoctor/qubits/blob/3c02ace7226389841c6bb838d045c11bed61a3c2/qubits/cl_utils.py#L49-L433
def main(arguments=None): """ *The main function used when ``cl_utils.py`` is run as a single script from the cl, or when installed as a cl command* """ # setup the command-line util settings su = tools( arguments=arguments, docString=__doc__, logLevel="WARNING", options_first=False, projectName="qubits" ) arguments, settings, log, dbConn = su.setup() # unpack remaining cl arguments using `exec` to setup the variable names # automatically for arg, val in arguments.iteritems(): if arg[0] == "-": varname = arg.replace("-", "") + "Flag" else: varname = arg.replace("<", "").replace(">", "") if varname == "import": varname = "iimport" if isinstance(val, str) or isinstance(val, unicode): exec(varname + " = '%s'" % (val,)) else: exec(varname + " = %s" % (val,)) if arg == "--dbConn": dbConn = val log.debug('%s = %s' % (varname, val,)) ## START LOGGING ## startTime = times.get_now_sql_datetime() log.info( '--- STARTING TO RUN THE cl_utils.py AT %s' % (startTime,)) if init: from . import workspace ws = workspace( log=log, pathToWorkspace=pathToWorkspace ) ws.setup() return # IMPORT THE SIMULATION SETTINGS (allSettings, programSettings, limitingMags, sampleNumber, peakMagnitudeDistributions, explosionDaysFromSettings, extendLightCurveTail, relativeSNRates, lowerRedshiftLimit, upperRedshiftLimit, redshiftResolution, restFrameFilter, kCorrectionTemporalResolution, kCorPolyOrder, kCorMinimumDataPoints, extinctionType, extinctionConstant, hostExtinctionDistributions, galacticExtinctionDistribution, surveyCadenceSettings, snLightCurves, surveyArea, CCSNRateFraction, transientToCCSNRateFraction, extraSurveyConstraints, lightCurvePolyOrder, logLevel) = cu.read_in_survey_parameters( log, pathToSettingsFile=pathToSettingsFile ) logFilePath = pathToOutputDirectory + "/qubits.log" del log log = _set_up_command_line_tool( level=str(logLevel), logFilePath=logFilePath ) # dbConn, log = cu.settings( # pathToSettingsFile=pathToSettingsFile, # dbConn=False, # log=True # ) ## START LOGGING ## startTime = dcu.get_now_sql_datetime() log.info('--- STARTING TO RUN THE qubits AT %s' % (startTime,)) resultsDict = {} pathToOutputPlotDirectory = pathToOutputDirectory + "/plots/" dcu.dryx_mkdir( log, directoryPath=pathToOutputPlotDirectory ) pathToResultsFolder = pathToOutputDirectory + "/results/" dcu.dryx_mkdir( log, directoryPath=pathToResultsFolder ) if not programSettings['Extract Lightcurves from Spectra'] and not programSettings['Generate KCorrection Database'] and not programSettings['Run the Simulation'] and not programSettings['Compile and Plot Results']: print "All stages of the simulatation have been switched off. Please switch on at least one stage of the simulation under the 'Programming Settings' in the settings file `%(pathToSettingsFile)s`" % locals() # GENERATE THE DATA FOR SIMULATIONS if programSettings['Extract Lightcurves from Spectra']: log.info('generating the Lightcurves') dg.generate_model_lightcurves( log=log, pathToSpectralDatabase=pathToSpectralDatabase, pathToOutputDirectory=pathToOutputDirectory, pathToOutputPlotDirectory=pathToOutputPlotDirectory, explosionDaysFromSettings=explosionDaysFromSettings, extendLightCurveTail=extendLightCurveTail, polyOrder=lightCurvePolyOrder ) print "The lightcurve file can be found here: %(pathToOutputDirectory)stransient_light_curves.yaml" % locals() print "The lightcurve plots can be found in %(pathToOutputPlotDirectory)s" % locals() if programSettings['Generate KCorrection Database']: log.info('generating the kcorrection data') dg.generate_kcorrection_listing_database( log, pathToOutputDirectory=pathToOutputDirectory, pathToSpectralDatabase=pathToSpectralDatabase, restFrameFilter=restFrameFilter, temporalResolution=kCorrectionTemporalResolution, redshiftResolution=redshiftResolution, redshiftLower=lowerRedshiftLimit, redshiftUpper=upperRedshiftLimit + redshiftResolution) log.info('generating the kcorrection polynomials') dg.generate_kcorrection_polynomial_database( log, pathToOutputDirectory=pathToOutputDirectory, restFrameFilter=restFrameFilter, kCorPolyOrder=kCorPolyOrder, # ORDER OF THE POLYNOMIAL TO FIT kCorMinimumDataPoints=kCorMinimumDataPoints, redshiftResolution=redshiftResolution, redshiftLower=lowerRedshiftLimit, redshiftUpper=upperRedshiftLimit + redshiftResolution, plot=programSettings['Generate KCorrection Plots']) print "The k-correction database has been generated here: %(pathToOutputDirectory)sk_corrections" % locals() if programSettings['Generate KCorrection Plots']: print "The k-correction polynomial plots can also be found in %(pathToOutputDirectory)sk_corrections" % locals() if programSettings['Run the Simulation']: # CREATE THE OBSERVABLE UNIVERSE! log.info('generating the redshift array') redshiftArray = u.random_redshift_array( log, sampleNumber, lowerRedshiftLimit, upperRedshiftLimit, redshiftResolution=redshiftResolution, pathToOutputPlotDirectory=pathToOutputPlotDirectory, plot=programSettings['Plot Simulation Helper Plots']) resultsDict['Redshifts'] = redshiftArray.tolist() log.info('generating the SN type array') snTypesArray = u.random_sn_types_array( log, sampleNumber, relativeSNRates, pathToOutputPlotDirectory=pathToOutputPlotDirectory, plot=programSettings['Plot Simulation Helper Plots']) resultsDict['SN Types'] = snTypesArray.tolist() log.info('generating peak magnitudes for the SNe') peakMagnitudesArray = u.random_peak_magnitudes( log, peakMagnitudeDistributions, snTypesArray, plot=programSettings['Plot Simulation Helper Plots']) log.info('generating the SN host extictions array') hostExtinctionArray = u.random_host_extinction( log, sampleNumber, extinctionType, extinctionConstant, hostExtinctionDistributions, plot=programSettings['Plot Simulation Helper Plots']) log.info('generating the SN galactic extictions array') galacticExtinctionArray = u.random_galactic_extinction( log, sampleNumber, extinctionType, extinctionConstant, galacticExtinctionDistribution, plot=programSettings['Plot Simulation Helper Plots']) log.info('generating the raw lightcurves for the SNe') rawLightCurveDict = u.generate_numpy_polynomial_lightcurves( log, snLightCurves=snLightCurves, pathToOutputDirectory=pathToOutputDirectory, pathToOutputPlotDirectory=pathToOutputPlotDirectory, plot=programSettings['Plot Simulation Helper Plots']) log.info('generating the k-correction array for the SNe') kCorrectionArray = u.build_kcorrection_array( log, redshiftArray, snTypesArray, snLightCurves, pathToOutputDirectory=pathToOutputDirectory, plot=programSettings['Plot Simulation Helper Plots']) log.info('generating the observed lightcurves for the SNe') observedFrameLightCurveInfo, peakAppMagList = u.convert_lightcurves_to_observered_frame( log, snLightCurves=snLightCurves, rawLightCurveDict=rawLightCurveDict, redshiftArray=redshiftArray, snTypesArray=snTypesArray, peakMagnitudesArray=peakMagnitudesArray, kCorrectionArray=kCorrectionArray, hostExtinctionArray=hostExtinctionArray, galacticExtinctionArray=galacticExtinctionArray, restFrameFilter=restFrameFilter, pathToOutputDirectory=pathToOutputDirectory, pathToOutputPlotDirectory=pathToOutputPlotDirectory, polyOrder=lightCurvePolyOrder, plot=programSettings['Plot Simulation Helper Plots']) log.info('generating the survey observation cadence') cadenceDictionary = ss.survey_cadence_arrays( log, surveyCadenceSettings, pathToOutputDirectory=pathToOutputDirectory, pathToOutputPlotDirectory=pathToOutputPlotDirectory, plot=programSettings['Plot Simulation Helper Plots']) log.info('determining if the SNe are discoverable by the survey') discoverableList = ss.determine_if_sne_are_discoverable( log, redshiftArray=redshiftArray, limitingMags=limitingMags, observedFrameLightCurveInfo=observedFrameLightCurveInfo, pathToOutputDirectory=pathToOutputDirectory, pathToOutputPlotDirectory=pathToOutputPlotDirectory, plot=programSettings['Plot Simulation Helper Plots']) log.info( 'determining the day (if and) when each SN is first discoverable by the survey') ripeDayList = ss.determine_when_sne_are_ripe_for_discovery( log, redshiftArray=redshiftArray, limitingMags=limitingMags, discoverableList=discoverableList, observedFrameLightCurveInfo=observedFrameLightCurveInfo, plot=programSettings['Plot Simulation Helper Plots']) # log.info('determining the day when each SN is disappears fainter than the survey limiting mags') # disappearDayList = determine_when_discovered_sne_disappear( # log, # redshiftArray=redshiftArray, # limitingMags=limitingMags, # ripeDayList=ripeDayList, # observedFrameLightCurveInfo=observedFrameLightCurveInfo, # plot=programSettings['Plot Simulation Helper Plots']) log.info('determining if and when each SN is discovered by the survey') lightCurveDiscoveryDayList, surveyDiscoveryDayList, snCampaignLengthList = ss.determine_if_sne_are_discovered( log, limitingMags=limitingMags, ripeDayList=ripeDayList, cadenceDictionary=cadenceDictionary, observedFrameLightCurveInfo=observedFrameLightCurveInfo, extraSurveyConstraints=extraSurveyConstraints, plot=programSettings['Plot Simulation Helper Plots']) resultsDict[ 'Discoveries Relative to Peak Magnitudes'] = lightCurveDiscoveryDayList resultsDict[ 'Discoveries Relative to Survey Year'] = surveyDiscoveryDayList resultsDict['Campaign Length'] = snCampaignLengthList resultsDict['Cadence Dictionary'] = cadenceDictionary resultsDict['Peak Apparent Magnitudes'] = peakAppMagList now = datetime.now() now = now.strftime("%Y%m%dt%H%M%S") fileName = pathToOutputDirectory + \ "/simulation_results_%s.yaml" % (now,) stream = file(fileName, 'w') yamlContent = dict(allSettings.items() + resultsDict.items()) yaml.dump(yamlContent, stream, default_flow_style=False) stream.close() print "The simulation output file can be found here: %(fileName)s. Remember to update your settings file 'Simulation Results File Used for Plots' parameter with this filename before compiling the results." % locals() if programSettings['Plot Simulation Helper Plots']: print "The simulation helper-plots found in %(pathToOutputPlotDirectory)s" % locals() # COMPILE AND PLOT THE RESULTS if programSettings['Compile and Plot Results']: pathToYamlFile = pathToOutputDirectory + "/" + \ programSettings['Simulation Results File Used for Plots'] result_log = r.log_the_survey_settings(log, pathToYamlFile) snSurveyDiscoveryTimes, lightCurveDiscoveryTimes, snTypes, redshifts, cadenceDictionary, peakAppMagList, snCampaignLengthList = r.import_results( log, pathToYamlFile) snRatePlotLink, totalRate, tooFaintRate, shortCampaignRate = r.determine_sn_rate( log, lightCurveDiscoveryTimes, snSurveyDiscoveryTimes, redshifts, surveyCadenceSettings=surveyCadenceSettings, lowerRedshiftLimit=lowerRedshiftLimit, upperRedshiftLimit=upperRedshiftLimit, redshiftResolution=redshiftResolution, surveyArea=surveyArea, CCSNRateFraction=CCSNRateFraction, transientToCCSNRateFraction=transientToCCSNRateFraction, peakAppMagList=peakAppMagList, snCampaignLengthList=snCampaignLengthList, extraSurveyConstraints=extraSurveyConstraints, pathToOutputPlotFolder=pathToOutputPlotDirectory) result_log += """ ## Results ## This simulated survey discovered a total of **%s** transients per year. An extra **%s** transients were detected but deemed too faint to constrain a positive transient identification and a further **%s** transients where detected but an observational campaign of more than **%s** days could not be completed to ensure identification. See below for the various output plots. """ % (totalRate, tooFaintRate, shortCampaignRate, extraSurveyConstraints["Observable for at least ? number of days"]) cadenceWheelLink = r.plot_cadence_wheel( log, cadenceDictionary, pathToOutputPlotFolder=pathToOutputPlotDirectory) result_log += """%s""" % (cadenceWheelLink,) discoveryMapLink = r.plot_sn_discovery_map( log, snSurveyDiscoveryTimes, peakAppMagList, snCampaignLengthList, redshifts, extraSurveyConstraints, pathToOutputPlotFolder=pathToOutputPlotDirectory) result_log += """%s""" % (discoveryMapLink,) ratioMapLink = r.plot_sn_discovery_ratio_map( log, snSurveyDiscoveryTimes, redshifts, peakAppMagList, snCampaignLengthList, extraSurveyConstraints, pathToOutputPlotFolder=pathToOutputPlotDirectory) result_log += """%s""" % (ratioMapLink,) result_log += """%s""" % (snRatePlotLink,) now = datetime.now() now = now.strftime("%Y%m%dt%H%M%S") mdLogPath = pathToResultsFolder + \ "simulation_result_log_%s.md" % (now,) mdLog = open(mdLogPath, 'w') mdLog.write(result_log) mdLog.close() dmd.convert_to_html( log=log, pathToMMDFile=mdLogPath, css="amblin" ) print "Results can be found here: %(pathToResultsFolder)s" % locals() html = mdLogPath.replace(".md", ".html") print "Open this file in your browser: %(html)s" % locals() if "dbConn" in locals() and dbConn: dbConn.commit() dbConn.close() ## FINISH LOGGING ## endTime = times.get_now_sql_datetime() runningTime = times.calculate_time_difference(startTime, endTime) log.info('-- FINISHED ATTEMPT TO RUN THE cl_utils.py AT %s (RUNTIME: %s) --' % (endTime, runningTime, )) return
[ "def", "main", "(", "arguments", "=", "None", ")", ":", "# setup the command-line util settings", "su", "=", "tools", "(", "arguments", "=", "arguments", ",", "docString", "=", "__doc__", ",", "logLevel", "=", "\"WARNING\"", ",", "options_first", "=", "False", ",", "projectName", "=", "\"qubits\"", ")", "arguments", ",", "settings", ",", "log", ",", "dbConn", "=", "su", ".", "setup", "(", ")", "# unpack remaining cl arguments using `exec` to setup the variable names", "# automatically", "for", "arg", ",", "val", "in", "arguments", ".", "iteritems", "(", ")", ":", "if", "arg", "[", "0", "]", "==", "\"-\"", ":", "varname", "=", "arg", ".", "replace", "(", "\"-\"", ",", "\"\"", ")", "+", "\"Flag\"", "else", ":", "varname", "=", "arg", ".", "replace", "(", "\"<\"", ",", "\"\"", ")", ".", "replace", "(", "\">\"", ",", "\"\"", ")", "if", "varname", "==", "\"import\"", ":", "varname", "=", "\"iimport\"", "if", "isinstance", "(", "val", ",", "str", ")", "or", "isinstance", "(", "val", ",", "unicode", ")", ":", "exec", "(", "varname", "+", "\" = '%s'\"", "%", "(", "val", ",", ")", ")", "else", ":", "exec", "(", "varname", "+", "\" = %s\"", "%", "(", "val", ",", ")", ")", "if", "arg", "==", "\"--dbConn\"", ":", "dbConn", "=", "val", "log", ".", "debug", "(", "'%s = %s'", "%", "(", "varname", ",", "val", ",", ")", ")", "## START LOGGING ##", "startTime", "=", "times", ".", "get_now_sql_datetime", "(", ")", "log", ".", "info", "(", "'--- STARTING TO RUN THE cl_utils.py AT %s'", "%", "(", "startTime", ",", ")", ")", "if", "init", ":", "from", ".", "import", "workspace", "ws", "=", "workspace", "(", "log", "=", "log", ",", "pathToWorkspace", "=", "pathToWorkspace", ")", "ws", ".", "setup", "(", ")", "return", "# IMPORT THE SIMULATION SETTINGS", "(", "allSettings", ",", "programSettings", ",", "limitingMags", ",", "sampleNumber", ",", "peakMagnitudeDistributions", ",", "explosionDaysFromSettings", ",", "extendLightCurveTail", ",", "relativeSNRates", ",", "lowerRedshiftLimit", ",", "upperRedshiftLimit", ",", "redshiftResolution", ",", "restFrameFilter", ",", "kCorrectionTemporalResolution", ",", "kCorPolyOrder", ",", "kCorMinimumDataPoints", ",", "extinctionType", ",", "extinctionConstant", ",", "hostExtinctionDistributions", ",", "galacticExtinctionDistribution", ",", "surveyCadenceSettings", ",", "snLightCurves", ",", "surveyArea", ",", "CCSNRateFraction", ",", "transientToCCSNRateFraction", ",", "extraSurveyConstraints", ",", "lightCurvePolyOrder", ",", "logLevel", ")", "=", "cu", ".", "read_in_survey_parameters", "(", "log", ",", "pathToSettingsFile", "=", "pathToSettingsFile", ")", "logFilePath", "=", "pathToOutputDirectory", "+", "\"/qubits.log\"", "del", "log", "log", "=", "_set_up_command_line_tool", "(", "level", "=", "str", "(", "logLevel", ")", ",", "logFilePath", "=", "logFilePath", ")", "# dbConn, log = cu.settings(", "# pathToSettingsFile=pathToSettingsFile,", "# dbConn=False,", "# log=True", "# )", "## START LOGGING ##", "startTime", "=", "dcu", ".", "get_now_sql_datetime", "(", ")", "log", ".", "info", "(", "'--- STARTING TO RUN THE qubits AT %s'", "%", "(", "startTime", ",", ")", ")", "resultsDict", "=", "{", "}", "pathToOutputPlotDirectory", "=", "pathToOutputDirectory", "+", "\"/plots/\"", "dcu", ".", "dryx_mkdir", "(", "log", ",", "directoryPath", "=", "pathToOutputPlotDirectory", ")", "pathToResultsFolder", "=", "pathToOutputDirectory", "+", "\"/results/\"", "dcu", ".", "dryx_mkdir", "(", "log", ",", "directoryPath", "=", "pathToResultsFolder", ")", "if", "not", "programSettings", "[", "'Extract Lightcurves from Spectra'", "]", "and", "not", "programSettings", "[", "'Generate KCorrection Database'", "]", "and", "not", "programSettings", "[", "'Run the Simulation'", "]", "and", "not", "programSettings", "[", "'Compile and Plot Results'", "]", ":", "print", "\"All stages of the simulatation have been switched off. Please switch on at least one stage of the simulation under the 'Programming Settings' in the settings file `%(pathToSettingsFile)s`\"", "%", "locals", "(", ")", "# GENERATE THE DATA FOR SIMULATIONS", "if", "programSettings", "[", "'Extract Lightcurves from Spectra'", "]", ":", "log", ".", "info", "(", "'generating the Lightcurves'", ")", "dg", ".", "generate_model_lightcurves", "(", "log", "=", "log", ",", "pathToSpectralDatabase", "=", "pathToSpectralDatabase", ",", "pathToOutputDirectory", "=", "pathToOutputDirectory", ",", "pathToOutputPlotDirectory", "=", "pathToOutputPlotDirectory", ",", "explosionDaysFromSettings", "=", "explosionDaysFromSettings", ",", "extendLightCurveTail", "=", "extendLightCurveTail", ",", "polyOrder", "=", "lightCurvePolyOrder", ")", "print", "\"The lightcurve file can be found here: %(pathToOutputDirectory)stransient_light_curves.yaml\"", "%", "locals", "(", ")", "print", "\"The lightcurve plots can be found in %(pathToOutputPlotDirectory)s\"", "%", "locals", "(", ")", "if", "programSettings", "[", "'Generate KCorrection Database'", "]", ":", "log", ".", "info", "(", "'generating the kcorrection data'", ")", "dg", ".", "generate_kcorrection_listing_database", "(", "log", ",", "pathToOutputDirectory", "=", "pathToOutputDirectory", ",", "pathToSpectralDatabase", "=", "pathToSpectralDatabase", ",", "restFrameFilter", "=", "restFrameFilter", ",", "temporalResolution", "=", "kCorrectionTemporalResolution", ",", "redshiftResolution", "=", "redshiftResolution", ",", "redshiftLower", "=", "lowerRedshiftLimit", ",", "redshiftUpper", "=", "upperRedshiftLimit", "+", "redshiftResolution", ")", "log", ".", "info", "(", "'generating the kcorrection polynomials'", ")", "dg", ".", "generate_kcorrection_polynomial_database", "(", "log", ",", "pathToOutputDirectory", "=", "pathToOutputDirectory", ",", "restFrameFilter", "=", "restFrameFilter", ",", "kCorPolyOrder", "=", "kCorPolyOrder", ",", "# ORDER OF THE POLYNOMIAL TO FIT", "kCorMinimumDataPoints", "=", "kCorMinimumDataPoints", ",", "redshiftResolution", "=", "redshiftResolution", ",", "redshiftLower", "=", "lowerRedshiftLimit", ",", "redshiftUpper", "=", "upperRedshiftLimit", "+", "redshiftResolution", ",", "plot", "=", "programSettings", "[", "'Generate KCorrection Plots'", "]", ")", "print", "\"The k-correction database has been generated here: %(pathToOutputDirectory)sk_corrections\"", "%", "locals", "(", ")", "if", "programSettings", "[", "'Generate KCorrection Plots'", "]", ":", "print", "\"The k-correction polynomial plots can also be found in %(pathToOutputDirectory)sk_corrections\"", "%", "locals", "(", ")", "if", "programSettings", "[", "'Run the Simulation'", "]", ":", "# CREATE THE OBSERVABLE UNIVERSE!", "log", ".", "info", "(", "'generating the redshift array'", ")", "redshiftArray", "=", "u", ".", "random_redshift_array", "(", "log", ",", "sampleNumber", ",", "lowerRedshiftLimit", ",", "upperRedshiftLimit", ",", "redshiftResolution", "=", "redshiftResolution", ",", "pathToOutputPlotDirectory", "=", "pathToOutputPlotDirectory", ",", "plot", "=", "programSettings", "[", "'Plot Simulation Helper Plots'", "]", ")", "resultsDict", "[", "'Redshifts'", "]", "=", "redshiftArray", ".", "tolist", "(", ")", "log", ".", "info", "(", "'generating the SN type array'", ")", "snTypesArray", "=", "u", ".", "random_sn_types_array", "(", "log", ",", "sampleNumber", ",", "relativeSNRates", ",", "pathToOutputPlotDirectory", "=", "pathToOutputPlotDirectory", ",", "plot", "=", "programSettings", "[", "'Plot Simulation Helper Plots'", "]", ")", "resultsDict", "[", "'SN Types'", "]", "=", "snTypesArray", ".", "tolist", "(", ")", "log", ".", "info", "(", "'generating peak magnitudes for the SNe'", ")", "peakMagnitudesArray", "=", "u", ".", "random_peak_magnitudes", "(", "log", ",", "peakMagnitudeDistributions", ",", "snTypesArray", ",", "plot", "=", "programSettings", "[", "'Plot Simulation Helper Plots'", "]", ")", "log", ".", "info", "(", "'generating the SN host extictions array'", ")", "hostExtinctionArray", "=", "u", ".", "random_host_extinction", "(", "log", ",", "sampleNumber", ",", "extinctionType", ",", "extinctionConstant", ",", "hostExtinctionDistributions", ",", "plot", "=", "programSettings", "[", "'Plot Simulation Helper Plots'", "]", ")", "log", ".", "info", "(", "'generating the SN galactic extictions array'", ")", "galacticExtinctionArray", "=", "u", ".", "random_galactic_extinction", "(", "log", ",", "sampleNumber", ",", "extinctionType", ",", "extinctionConstant", ",", "galacticExtinctionDistribution", ",", "plot", "=", "programSettings", "[", "'Plot Simulation Helper Plots'", "]", ")", "log", ".", "info", "(", "'generating the raw lightcurves for the SNe'", ")", "rawLightCurveDict", "=", "u", ".", "generate_numpy_polynomial_lightcurves", "(", "log", ",", "snLightCurves", "=", "snLightCurves", ",", "pathToOutputDirectory", "=", "pathToOutputDirectory", ",", "pathToOutputPlotDirectory", "=", "pathToOutputPlotDirectory", ",", "plot", "=", "programSettings", "[", "'Plot Simulation Helper Plots'", "]", ")", "log", ".", "info", "(", "'generating the k-correction array for the SNe'", ")", "kCorrectionArray", "=", "u", ".", "build_kcorrection_array", "(", "log", ",", "redshiftArray", ",", "snTypesArray", ",", "snLightCurves", ",", "pathToOutputDirectory", "=", "pathToOutputDirectory", ",", "plot", "=", "programSettings", "[", "'Plot Simulation Helper Plots'", "]", ")", "log", ".", "info", "(", "'generating the observed lightcurves for the SNe'", ")", "observedFrameLightCurveInfo", ",", "peakAppMagList", "=", "u", ".", "convert_lightcurves_to_observered_frame", "(", "log", ",", "snLightCurves", "=", "snLightCurves", ",", "rawLightCurveDict", "=", "rawLightCurveDict", ",", "redshiftArray", "=", "redshiftArray", ",", "snTypesArray", "=", "snTypesArray", ",", "peakMagnitudesArray", "=", "peakMagnitudesArray", ",", "kCorrectionArray", "=", "kCorrectionArray", ",", "hostExtinctionArray", "=", "hostExtinctionArray", ",", "galacticExtinctionArray", "=", "galacticExtinctionArray", ",", "restFrameFilter", "=", "restFrameFilter", ",", "pathToOutputDirectory", "=", "pathToOutputDirectory", ",", "pathToOutputPlotDirectory", "=", "pathToOutputPlotDirectory", ",", "polyOrder", "=", "lightCurvePolyOrder", ",", "plot", "=", "programSettings", "[", "'Plot Simulation Helper Plots'", "]", ")", "log", ".", "info", "(", "'generating the survey observation cadence'", ")", "cadenceDictionary", "=", "ss", ".", "survey_cadence_arrays", "(", "log", ",", "surveyCadenceSettings", ",", "pathToOutputDirectory", "=", "pathToOutputDirectory", ",", "pathToOutputPlotDirectory", "=", "pathToOutputPlotDirectory", ",", "plot", "=", "programSettings", "[", "'Plot Simulation Helper Plots'", "]", ")", "log", ".", "info", "(", "'determining if the SNe are discoverable by the survey'", ")", "discoverableList", "=", "ss", ".", "determine_if_sne_are_discoverable", "(", "log", ",", "redshiftArray", "=", "redshiftArray", ",", "limitingMags", "=", "limitingMags", ",", "observedFrameLightCurveInfo", "=", "observedFrameLightCurveInfo", ",", "pathToOutputDirectory", "=", "pathToOutputDirectory", ",", "pathToOutputPlotDirectory", "=", "pathToOutputPlotDirectory", ",", "plot", "=", "programSettings", "[", "'Plot Simulation Helper Plots'", "]", ")", "log", ".", "info", "(", "'determining the day (if and) when each SN is first discoverable by the survey'", ")", "ripeDayList", "=", "ss", ".", "determine_when_sne_are_ripe_for_discovery", "(", "log", ",", "redshiftArray", "=", "redshiftArray", ",", "limitingMags", "=", "limitingMags", ",", "discoverableList", "=", "discoverableList", ",", "observedFrameLightCurveInfo", "=", "observedFrameLightCurveInfo", ",", "plot", "=", "programSettings", "[", "'Plot Simulation Helper Plots'", "]", ")", "# log.info('determining the day when each SN is disappears fainter than the survey limiting mags')", "# disappearDayList = determine_when_discovered_sne_disappear(", "# log,", "# redshiftArray=redshiftArray,", "# limitingMags=limitingMags,", "# ripeDayList=ripeDayList,", "# observedFrameLightCurveInfo=observedFrameLightCurveInfo,", "# plot=programSettings['Plot Simulation Helper Plots'])", "log", ".", "info", "(", "'determining if and when each SN is discovered by the survey'", ")", "lightCurveDiscoveryDayList", ",", "surveyDiscoveryDayList", ",", "snCampaignLengthList", "=", "ss", ".", "determine_if_sne_are_discovered", "(", "log", ",", "limitingMags", "=", "limitingMags", ",", "ripeDayList", "=", "ripeDayList", ",", "cadenceDictionary", "=", "cadenceDictionary", ",", "observedFrameLightCurveInfo", "=", "observedFrameLightCurveInfo", ",", "extraSurveyConstraints", "=", "extraSurveyConstraints", ",", "plot", "=", "programSettings", "[", "'Plot Simulation Helper Plots'", "]", ")", "resultsDict", "[", "'Discoveries Relative to Peak Magnitudes'", "]", "=", "lightCurveDiscoveryDayList", "resultsDict", "[", "'Discoveries Relative to Survey Year'", "]", "=", "surveyDiscoveryDayList", "resultsDict", "[", "'Campaign Length'", "]", "=", "snCampaignLengthList", "resultsDict", "[", "'Cadence Dictionary'", "]", "=", "cadenceDictionary", "resultsDict", "[", "'Peak Apparent Magnitudes'", "]", "=", "peakAppMagList", "now", "=", "datetime", ".", "now", "(", ")", "now", "=", "now", ".", "strftime", "(", "\"%Y%m%dt%H%M%S\"", ")", "fileName", "=", "pathToOutputDirectory", "+", "\"/simulation_results_%s.yaml\"", "%", "(", "now", ",", ")", "stream", "=", "file", "(", "fileName", ",", "'w'", ")", "yamlContent", "=", "dict", "(", "allSettings", ".", "items", "(", ")", "+", "resultsDict", ".", "items", "(", ")", ")", "yaml", ".", "dump", "(", "yamlContent", ",", "stream", ",", "default_flow_style", "=", "False", ")", "stream", ".", "close", "(", ")", "print", "\"The simulation output file can be found here: %(fileName)s. Remember to update your settings file 'Simulation Results File Used for Plots' parameter with this filename before compiling the results.\"", "%", "locals", "(", ")", "if", "programSettings", "[", "'Plot Simulation Helper Plots'", "]", ":", "print", "\"The simulation helper-plots found in %(pathToOutputPlotDirectory)s\"", "%", "locals", "(", ")", "# COMPILE AND PLOT THE RESULTS", "if", "programSettings", "[", "'Compile and Plot Results'", "]", ":", "pathToYamlFile", "=", "pathToOutputDirectory", "+", "\"/\"", "+", "programSettings", "[", "'Simulation Results File Used for Plots'", "]", "result_log", "=", "r", ".", "log_the_survey_settings", "(", "log", ",", "pathToYamlFile", ")", "snSurveyDiscoveryTimes", ",", "lightCurveDiscoveryTimes", ",", "snTypes", ",", "redshifts", ",", "cadenceDictionary", ",", "peakAppMagList", ",", "snCampaignLengthList", "=", "r", ".", "import_results", "(", "log", ",", "pathToYamlFile", ")", "snRatePlotLink", ",", "totalRate", ",", "tooFaintRate", ",", "shortCampaignRate", "=", "r", ".", "determine_sn_rate", "(", "log", ",", "lightCurveDiscoveryTimes", ",", "snSurveyDiscoveryTimes", ",", "redshifts", ",", "surveyCadenceSettings", "=", "surveyCadenceSettings", ",", "lowerRedshiftLimit", "=", "lowerRedshiftLimit", ",", "upperRedshiftLimit", "=", "upperRedshiftLimit", ",", "redshiftResolution", "=", "redshiftResolution", ",", "surveyArea", "=", "surveyArea", ",", "CCSNRateFraction", "=", "CCSNRateFraction", ",", "transientToCCSNRateFraction", "=", "transientToCCSNRateFraction", ",", "peakAppMagList", "=", "peakAppMagList", ",", "snCampaignLengthList", "=", "snCampaignLengthList", ",", "extraSurveyConstraints", "=", "extraSurveyConstraints", ",", "pathToOutputPlotFolder", "=", "pathToOutputPlotDirectory", ")", "result_log", "+=", "\"\"\"\n## Results ##\n\nThis simulated survey discovered a total of **%s** transients per year. An extra **%s** transients were detected but deemed too faint to constrain a positive transient identification and a further **%s** transients where detected but an observational campaign of more than **%s** days could not be completed to ensure identification. See below for the various output plots.\n\n \"\"\"", "%", "(", "totalRate", ",", "tooFaintRate", ",", "shortCampaignRate", ",", "extraSurveyConstraints", "[", "\"Observable for at least ? number of days\"", "]", ")", "cadenceWheelLink", "=", "r", ".", "plot_cadence_wheel", "(", "log", ",", "cadenceDictionary", ",", "pathToOutputPlotFolder", "=", "pathToOutputPlotDirectory", ")", "result_log", "+=", "\"\"\"%s\"\"\"", "%", "(", "cadenceWheelLink", ",", ")", "discoveryMapLink", "=", "r", ".", "plot_sn_discovery_map", "(", "log", ",", "snSurveyDiscoveryTimes", ",", "peakAppMagList", ",", "snCampaignLengthList", ",", "redshifts", ",", "extraSurveyConstraints", ",", "pathToOutputPlotFolder", "=", "pathToOutputPlotDirectory", ")", "result_log", "+=", "\"\"\"%s\"\"\"", "%", "(", "discoveryMapLink", ",", ")", "ratioMapLink", "=", "r", ".", "plot_sn_discovery_ratio_map", "(", "log", ",", "snSurveyDiscoveryTimes", ",", "redshifts", ",", "peakAppMagList", ",", "snCampaignLengthList", ",", "extraSurveyConstraints", ",", "pathToOutputPlotFolder", "=", "pathToOutputPlotDirectory", ")", "result_log", "+=", "\"\"\"%s\"\"\"", "%", "(", "ratioMapLink", ",", ")", "result_log", "+=", "\"\"\"%s\"\"\"", "%", "(", "snRatePlotLink", ",", ")", "now", "=", "datetime", ".", "now", "(", ")", "now", "=", "now", ".", "strftime", "(", "\"%Y%m%dt%H%M%S\"", ")", "mdLogPath", "=", "pathToResultsFolder", "+", "\"simulation_result_log_%s.md\"", "%", "(", "now", ",", ")", "mdLog", "=", "open", "(", "mdLogPath", ",", "'w'", ")", "mdLog", ".", "write", "(", "result_log", ")", "mdLog", ".", "close", "(", ")", "dmd", ".", "convert_to_html", "(", "log", "=", "log", ",", "pathToMMDFile", "=", "mdLogPath", ",", "css", "=", "\"amblin\"", ")", "print", "\"Results can be found here: %(pathToResultsFolder)s\"", "%", "locals", "(", ")", "html", "=", "mdLogPath", ".", "replace", "(", "\".md\"", ",", "\".html\"", ")", "print", "\"Open this file in your browser: %(html)s\"", "%", "locals", "(", ")", "if", "\"dbConn\"", "in", "locals", "(", ")", "and", "dbConn", ":", "dbConn", ".", "commit", "(", ")", "dbConn", ".", "close", "(", ")", "## FINISH LOGGING ##", "endTime", "=", "times", ".", "get_now_sql_datetime", "(", ")", "runningTime", "=", "times", ".", "calculate_time_difference", "(", "startTime", ",", "endTime", ")", "log", ".", "info", "(", "'-- FINISHED ATTEMPT TO RUN THE cl_utils.py AT %s (RUNTIME: %s) --'", "%", "(", "endTime", ",", "runningTime", ",", ")", ")", "return" ]
*The main function used when ``cl_utils.py`` is run as a single script from the cl, or when installed as a cl command*
[ "*", "The", "main", "function", "used", "when", "cl_utils", ".", "py", "is", "run", "as", "a", "single", "script", "from", "the", "cl", "or", "when", "installed", "as", "a", "cl", "command", "*" ]
python
train
40.937662
tanghaibao/goatools
goatools/grouper/wr_sections.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/grouper/wr_sections.py#L182-L202
def wr_txt_section_hdrgos(self, fout_txt, sortby=None, prt_section=True): """Write high GO IDs that are actually used to group current set of GO IDs.""" sec2d_go = self.grprobj.get_sections_2d() # lists of GO IDs sec2d_nt = self.get_sections_2dnt(sec2d_go) # lists of GO Grouper namedtuples if sortby is None: sortby = self.fncsortnt with open(fout_txt, 'w') as prt: self.prt_ver(prt) prt.write("# GROUP NAME: {NAME}\n".format(NAME=self.grprobj.grpname)) for section_name, nthdrgos_actual in sec2d_nt: if prt_section: prt.write("# SECTION: {SECTION}\n".format(SECTION=section_name)) self.prt_ntgos(prt, nthdrgos_actual) if prt_section: prt.write("\n") dat = SummarySec2dHdrGos().summarize_sec2hdrgos(sec2d_go) sys.stdout.write(self.grprobj.fmtsum.format( GO_DESC='hdr', SECs=len(dat['S']), GOs=len(dat['G']), UNGRP=len(dat['U']), undesc="unused", ACTION="WROTE:", FILE=fout_txt)) return sec2d_nt
[ "def", "wr_txt_section_hdrgos", "(", "self", ",", "fout_txt", ",", "sortby", "=", "None", ",", "prt_section", "=", "True", ")", ":", "sec2d_go", "=", "self", ".", "grprobj", ".", "get_sections_2d", "(", ")", "# lists of GO IDs", "sec2d_nt", "=", "self", ".", "get_sections_2dnt", "(", "sec2d_go", ")", "# lists of GO Grouper namedtuples", "if", "sortby", "is", "None", ":", "sortby", "=", "self", ".", "fncsortnt", "with", "open", "(", "fout_txt", ",", "'w'", ")", "as", "prt", ":", "self", ".", "prt_ver", "(", "prt", ")", "prt", ".", "write", "(", "\"# GROUP NAME: {NAME}\\n\"", ".", "format", "(", "NAME", "=", "self", ".", "grprobj", ".", "grpname", ")", ")", "for", "section_name", ",", "nthdrgos_actual", "in", "sec2d_nt", ":", "if", "prt_section", ":", "prt", ".", "write", "(", "\"# SECTION: {SECTION}\\n\"", ".", "format", "(", "SECTION", "=", "section_name", ")", ")", "self", ".", "prt_ntgos", "(", "prt", ",", "nthdrgos_actual", ")", "if", "prt_section", ":", "prt", ".", "write", "(", "\"\\n\"", ")", "dat", "=", "SummarySec2dHdrGos", "(", ")", ".", "summarize_sec2hdrgos", "(", "sec2d_go", ")", "sys", ".", "stdout", ".", "write", "(", "self", ".", "grprobj", ".", "fmtsum", ".", "format", "(", "GO_DESC", "=", "'hdr'", ",", "SECs", "=", "len", "(", "dat", "[", "'S'", "]", ")", ",", "GOs", "=", "len", "(", "dat", "[", "'G'", "]", ")", ",", "UNGRP", "=", "len", "(", "dat", "[", "'U'", "]", ")", ",", "undesc", "=", "\"unused\"", ",", "ACTION", "=", "\"WROTE:\"", ",", "FILE", "=", "fout_txt", ")", ")", "return", "sec2d_nt" ]
Write high GO IDs that are actually used to group current set of GO IDs.
[ "Write", "high", "GO", "IDs", "that", "are", "actually", "used", "to", "group", "current", "set", "of", "GO", "IDs", "." ]
python
train
54.047619
cackharot/suds-py3
suds/sax/element.py
https://github.com/cackharot/suds-py3/blob/7387ec7806e9be29aad0a711bea5cb3c9396469c/suds/sax/element.py#L369-L386
def replaceChild(self, child, content): """ Replace I{child} with the specified I{content}. @param child: A child element. @type child: L{Element} @param content: An element or collection of elements. @type content: L{Element} or [L{Element},] """ if child not in self.children: raise Exception('child not-found') index = self.children.index(child) self.remove(child) if not isinstance(content, (list, tuple)): content = (content,) for node in content: self.children.insert(index, node.detach()) node.parent = self index += 1
[ "def", "replaceChild", "(", "self", ",", "child", ",", "content", ")", ":", "if", "child", "not", "in", "self", ".", "children", ":", "raise", "Exception", "(", "'child not-found'", ")", "index", "=", "self", ".", "children", ".", "index", "(", "child", ")", "self", ".", "remove", "(", "child", ")", "if", "not", "isinstance", "(", "content", ",", "(", "list", ",", "tuple", ")", ")", ":", "content", "=", "(", "content", ",", ")", "for", "node", "in", "content", ":", "self", ".", "children", ".", "insert", "(", "index", ",", "node", ".", "detach", "(", ")", ")", "node", ".", "parent", "=", "self", "index", "+=", "1" ]
Replace I{child} with the specified I{content}. @param child: A child element. @type child: L{Element} @param content: An element or collection of elements. @type content: L{Element} or [L{Element},]
[ "Replace", "I", "{", "child", "}", "with", "the", "specified", "I", "{", "content", "}", "." ]
python
train
36.888889
rpcope1/PythonConfluenceAPI
PythonConfluenceAPI/api.py
https://github.com/rpcope1/PythonConfluenceAPI/blob/b7f0ca2a390f964715fdf3a60b5b0c5ef7116d40/PythonConfluenceAPI/api.py#L691-L705
def get_space_information(self, space_key, expand=None, callback=None): """ Returns information about a space. :param space_key (string): A string containing the key of the space. :param expand (string): OPTIONAL: A comma separated list of properties to expand on the space. Default: Empty. :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: The JSON data returned from the space/{spaceKey} endpoint, or the results of the callback. Will raise requests.HTTPError on bad input, potentially. """ params = {} if expand: params["expand"] = expand return self._service_get_request("rest/api/space/{key}".format(key=space_key), params=params, callback=callback)
[ "def", "get_space_information", "(", "self", ",", "space_key", ",", "expand", "=", "None", ",", "callback", "=", "None", ")", ":", "params", "=", "{", "}", "if", "expand", ":", "params", "[", "\"expand\"", "]", "=", "expand", "return", "self", ".", "_service_get_request", "(", "\"rest/api/space/{key}\"", ".", "format", "(", "key", "=", "space_key", ")", ",", "params", "=", "params", ",", "callback", "=", "callback", ")" ]
Returns information about a space. :param space_key (string): A string containing the key of the space. :param expand (string): OPTIONAL: A comma separated list of properties to expand on the space. Default: Empty. :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: The JSON data returned from the space/{spaceKey} endpoint, or the results of the callback. Will raise requests.HTTPError on bad input, potentially.
[ "Returns", "information", "about", "a", "space", ".", ":", "param", "space_key", "(", "string", ")", ":", "A", "string", "containing", "the", "key", "of", "the", "space", ".", ":", "param", "expand", "(", "string", ")", ":", "OPTIONAL", ":", "A", "comma", "separated", "list", "of", "properties", "to", "expand", "on", "the", "space", ".", "Default", ":", "Empty", ".", ":", "param", "callback", ":", "OPTIONAL", ":", "The", "callback", "to", "execute", "on", "the", "resulting", "data", "before", "the", "method", "returns", ".", "Default", ":", "None", "(", "no", "callback", "raw", "data", "returned", ")", ".", ":", "return", ":", "The", "JSON", "data", "returned", "from", "the", "space", "/", "{", "spaceKey", "}", "endpoint", "or", "the", "results", "of", "the", "callback", ".", "Will", "raise", "requests", ".", "HTTPError", "on", "bad", "input", "potentially", "." ]
python
train
61.533333
galaxyproject/pulsar
pulsar/managers/queued_external_drmaa.py
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/managers/queued_external_drmaa.py#L91-L108
def _handle_default(value, script_name): """ There are two potential variants of these scripts, the Bash scripts that are meant to be run within PULSAR_ROOT for older-style installs and the binaries created by setup.py as part of a proper pulsar installation. This method first looks for the newer style variant of these scripts and returns the full path to them if needed and falls back to the bash scripts if these cannot be found. """ if value: return value installed_script = which("pulsar-%s" % script_name.replace("_", "-")) if installed_script: return installed_script else: return "scripts/%s.bash" % script_name
[ "def", "_handle_default", "(", "value", ",", "script_name", ")", ":", "if", "value", ":", "return", "value", "installed_script", "=", "which", "(", "\"pulsar-%s\"", "%", "script_name", ".", "replace", "(", "\"_\"", ",", "\"-\"", ")", ")", "if", "installed_script", ":", "return", "installed_script", "else", ":", "return", "\"scripts/%s.bash\"", "%", "script_name" ]
There are two potential variants of these scripts, the Bash scripts that are meant to be run within PULSAR_ROOT for older-style installs and the binaries created by setup.py as part of a proper pulsar installation. This method first looks for the newer style variant of these scripts and returns the full path to them if needed and falls back to the bash scripts if these cannot be found.
[ "There", "are", "two", "potential", "variants", "of", "these", "scripts", "the", "Bash", "scripts", "that", "are", "meant", "to", "be", "run", "within", "PULSAR_ROOT", "for", "older", "-", "style", "installs", "and", "the", "binaries", "created", "by", "setup", ".", "py", "as", "part", "of", "a", "proper", "pulsar", "installation", "." ]
python
train
37.611111
saltstack/salt
salt/returners/postgres_local_cache.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/postgres_local_cache.py#L224-L253
def returner(load): ''' Return data to a postgres server ''' conn = _get_conn() if conn is None: return None cur = conn.cursor() sql = '''INSERT INTO salt_returns (fun, jid, return, id, success) VALUES (%s, %s, %s, %s, %s)''' try: ret = six.text_type(load['return']) except UnicodeDecodeError: ret = str(load['return']) job_ret = {'return': ret} if 'retcode' in load: job_ret['retcode'] = load['retcode'] if 'success' in load: job_ret['success'] = load['success'] cur.execute( sql, ( load['fun'], load['jid'], salt.utils.json.dumps(job_ret), load['id'], load.get('success'), ) ) _close_conn(conn)
[ "def", "returner", "(", "load", ")", ":", "conn", "=", "_get_conn", "(", ")", "if", "conn", "is", "None", ":", "return", "None", "cur", "=", "conn", ".", "cursor", "(", ")", "sql", "=", "'''INSERT INTO salt_returns\n (fun, jid, return, id, success)\n VALUES (%s, %s, %s, %s, %s)'''", "try", ":", "ret", "=", "six", ".", "text_type", "(", "load", "[", "'return'", "]", ")", "except", "UnicodeDecodeError", ":", "ret", "=", "str", "(", "load", "[", "'return'", "]", ")", "job_ret", "=", "{", "'return'", ":", "ret", "}", "if", "'retcode'", "in", "load", ":", "job_ret", "[", "'retcode'", "]", "=", "load", "[", "'retcode'", "]", "if", "'success'", "in", "load", ":", "job_ret", "[", "'success'", "]", "=", "load", "[", "'success'", "]", "cur", ".", "execute", "(", "sql", ",", "(", "load", "[", "'fun'", "]", ",", "load", "[", "'jid'", "]", ",", "salt", ".", "utils", ".", "json", ".", "dumps", "(", "job_ret", ")", ",", "load", "[", "'id'", "]", ",", "load", ".", "get", "(", "'success'", ")", ",", ")", ")", "_close_conn", "(", "conn", ")" ]
Return data to a postgres server
[ "Return", "data", "to", "a", "postgres", "server" ]
python
train
25.566667
astrocatalogs/astrocats
astrocats/catalog/catalog.py
https://github.com/astrocatalogs/astrocats/blob/11abc3131c6366ecd23964369e55ff264add7805/astrocats/catalog/catalog.py#L754-L851
def load_stubs(self, log_mem=False): """Load all events in their `stub` (name, alias, etc only) form. Used in `update` mode. """ # Initialize parameter related to diagnostic output of memory usage if log_mem: import psutil process = psutil.Process(os.getpid()) rss = process.memory_info().rss LOG_MEMORY_INT = 1000 MEMORY_LIMIT = 1000.0 def _add_stub_manually(_fname): """Create and add a 'stub' by manually loading parameters from JSON files. Previously this was done by creating a full `Entry` instance, then using the `Entry.get_stub()` method to trim it down. This was very slow and memory intensive, hence this improved approach. """ # FIX: should this be ``fi.endswith(``.gz')`` ? fname = uncompress_gz(_fname) if '.gz' in _fname else _fname stub = None stub_name = None with codecs.open(fname, 'r') as jfil: # Load the full JSON file data = json.load(jfil, object_pairs_hook=OrderedDict) # Extract the top-level keys (should just be the name of the # entry) stub_name = list(data.keys()) # Make sure there is only a single top-level entry if len(stub_name) != 1: err = "json file '{}' has multiple keys: {}".format( fname, list(stub_name)) self._log.error(err) raise ValueError(err) stub_name = stub_name[0] # Make sure a non-stub entry doesnt already exist with this # name if stub_name in self.entries and not self.entries[ stub_name]._stub: err_str = ( "ERROR: non-stub entry already exists with name '{}'" .format(stub_name)) self.log.error(err_str) raise RuntimeError(err_str) # Remove the outmost dict level data = data[stub_name] # Create a new `Entry` (subclass) instance proto = self.proto stub = proto(catalog=self, name=stub_name, stub=True) # Add stub parameters if they are available if proto._KEYS.ALIAS in data: stub[proto._KEYS.ALIAS] = data[proto._KEYS.ALIAS] if proto._KEYS.DISTINCT_FROM in data: stub[proto._KEYS.DISTINCT_FROM] = data[ proto._KEYS.DISTINCT_FROM] if proto._KEYS.RA in data: stub[proto._KEYS.RA] = data[proto._KEYS.RA] if proto._KEYS.DEC in data: stub[proto._KEYS.DEC] = data[proto._KEYS.DEC] if proto._KEYS.DISCOVER_DATE in data: stub[proto._KEYS.DISCOVER_DATE] = data[ proto._KEYS.DISCOVER_DATE] if proto._KEYS.SOURCES in data: stub[proto._KEYS.SOURCES] = data[ proto._KEYS.SOURCES] # Store the stub self.entries[stub_name] = stub self.log.debug("Added stub for '{}'".format(stub_name)) currenttask = 'Loading entry stubs' files = self.PATHS.get_repo_output_file_list() for ii, _fname in enumerate(pbar(files, currenttask)): # Run normally # _add_stub(_fname) # Run 'manually' (extract stub parameters directly from JSON) _add_stub_manually(_fname) if log_mem: rss = process.memory_info().rss / 1024 / 1024 if ii % LOG_MEMORY_INT == 0 or rss > MEMORY_LIMIT: log_memory(self.log, "\nLoaded stub {}".format(ii), logging.INFO) if rss > MEMORY_LIMIT: err = ( "Memory usage {}, has exceeded {} on file {} '{}'". format(rss, MEMORY_LIMIT, ii, _fname)) self.log.error(err) raise RuntimeError(err) return self.entries
[ "def", "load_stubs", "(", "self", ",", "log_mem", "=", "False", ")", ":", "# Initialize parameter related to diagnostic output of memory usage", "if", "log_mem", ":", "import", "psutil", "process", "=", "psutil", ".", "Process", "(", "os", ".", "getpid", "(", ")", ")", "rss", "=", "process", ".", "memory_info", "(", ")", ".", "rss", "LOG_MEMORY_INT", "=", "1000", "MEMORY_LIMIT", "=", "1000.0", "def", "_add_stub_manually", "(", "_fname", ")", ":", "\"\"\"Create and add a 'stub' by manually loading parameters from\n JSON files.\n\n Previously this was done by creating a full `Entry` instance, then\n using the `Entry.get_stub()` method to trim it down. This was very\n slow and memory intensive, hence this improved approach.\n \"\"\"", "# FIX: should this be ``fi.endswith(``.gz')`` ?", "fname", "=", "uncompress_gz", "(", "_fname", ")", "if", "'.gz'", "in", "_fname", "else", "_fname", "stub", "=", "None", "stub_name", "=", "None", "with", "codecs", ".", "open", "(", "fname", ",", "'r'", ")", "as", "jfil", ":", "# Load the full JSON file", "data", "=", "json", ".", "load", "(", "jfil", ",", "object_pairs_hook", "=", "OrderedDict", ")", "# Extract the top-level keys (should just be the name of the", "# entry)", "stub_name", "=", "list", "(", "data", ".", "keys", "(", ")", ")", "# Make sure there is only a single top-level entry", "if", "len", "(", "stub_name", ")", "!=", "1", ":", "err", "=", "\"json file '{}' has multiple keys: {}\"", ".", "format", "(", "fname", ",", "list", "(", "stub_name", ")", ")", "self", ".", "_log", ".", "error", "(", "err", ")", "raise", "ValueError", "(", "err", ")", "stub_name", "=", "stub_name", "[", "0", "]", "# Make sure a non-stub entry doesnt already exist with this", "# name", "if", "stub_name", "in", "self", ".", "entries", "and", "not", "self", ".", "entries", "[", "stub_name", "]", ".", "_stub", ":", "err_str", "=", "(", "\"ERROR: non-stub entry already exists with name '{}'\"", ".", "format", "(", "stub_name", ")", ")", "self", ".", "log", ".", "error", "(", "err_str", ")", "raise", "RuntimeError", "(", "err_str", ")", "# Remove the outmost dict level", "data", "=", "data", "[", "stub_name", "]", "# Create a new `Entry` (subclass) instance", "proto", "=", "self", ".", "proto", "stub", "=", "proto", "(", "catalog", "=", "self", ",", "name", "=", "stub_name", ",", "stub", "=", "True", ")", "# Add stub parameters if they are available", "if", "proto", ".", "_KEYS", ".", "ALIAS", "in", "data", ":", "stub", "[", "proto", ".", "_KEYS", ".", "ALIAS", "]", "=", "data", "[", "proto", ".", "_KEYS", ".", "ALIAS", "]", "if", "proto", ".", "_KEYS", ".", "DISTINCT_FROM", "in", "data", ":", "stub", "[", "proto", ".", "_KEYS", ".", "DISTINCT_FROM", "]", "=", "data", "[", "proto", ".", "_KEYS", ".", "DISTINCT_FROM", "]", "if", "proto", ".", "_KEYS", ".", "RA", "in", "data", ":", "stub", "[", "proto", ".", "_KEYS", ".", "RA", "]", "=", "data", "[", "proto", ".", "_KEYS", ".", "RA", "]", "if", "proto", ".", "_KEYS", ".", "DEC", "in", "data", ":", "stub", "[", "proto", ".", "_KEYS", ".", "DEC", "]", "=", "data", "[", "proto", ".", "_KEYS", ".", "DEC", "]", "if", "proto", ".", "_KEYS", ".", "DISCOVER_DATE", "in", "data", ":", "stub", "[", "proto", ".", "_KEYS", ".", "DISCOVER_DATE", "]", "=", "data", "[", "proto", ".", "_KEYS", ".", "DISCOVER_DATE", "]", "if", "proto", ".", "_KEYS", ".", "SOURCES", "in", "data", ":", "stub", "[", "proto", ".", "_KEYS", ".", "SOURCES", "]", "=", "data", "[", "proto", ".", "_KEYS", ".", "SOURCES", "]", "# Store the stub", "self", ".", "entries", "[", "stub_name", "]", "=", "stub", "self", ".", "log", ".", "debug", "(", "\"Added stub for '{}'\"", ".", "format", "(", "stub_name", ")", ")", "currenttask", "=", "'Loading entry stubs'", "files", "=", "self", ".", "PATHS", ".", "get_repo_output_file_list", "(", ")", "for", "ii", ",", "_fname", "in", "enumerate", "(", "pbar", "(", "files", ",", "currenttask", ")", ")", ":", "# Run normally", "# _add_stub(_fname)", "# Run 'manually' (extract stub parameters directly from JSON)", "_add_stub_manually", "(", "_fname", ")", "if", "log_mem", ":", "rss", "=", "process", ".", "memory_info", "(", ")", ".", "rss", "/", "1024", "/", "1024", "if", "ii", "%", "LOG_MEMORY_INT", "==", "0", "or", "rss", ">", "MEMORY_LIMIT", ":", "log_memory", "(", "self", ".", "log", ",", "\"\\nLoaded stub {}\"", ".", "format", "(", "ii", ")", ",", "logging", ".", "INFO", ")", "if", "rss", ">", "MEMORY_LIMIT", ":", "err", "=", "(", "\"Memory usage {}, has exceeded {} on file {} '{}'\"", ".", "format", "(", "rss", ",", "MEMORY_LIMIT", ",", "ii", ",", "_fname", ")", ")", "self", ".", "log", ".", "error", "(", "err", ")", "raise", "RuntimeError", "(", "err", ")", "return", "self", ".", "entries" ]
Load all events in their `stub` (name, alias, etc only) form. Used in `update` mode.
[ "Load", "all", "events", "in", "their", "stub", "(", "name", "alias", "etc", "only", ")", "form", "." ]
python
train
43.285714
JetBrains/teamcity-messages
teamcity/pylint_reporter.py
https://github.com/JetBrains/teamcity-messages/blob/44f6d1fde33a48547a8f9fe31814522347a87b39/teamcity/pylint_reporter.py#L64-L71
def display_reports(self, layout): """Issues the final PyLint score as a TeamCity build statistic value""" try: score = self.linter.stats['global_note'] except (AttributeError, KeyError): pass else: self.tc.message('buildStatisticValue', key='PyLintScore', value=str(score))
[ "def", "display_reports", "(", "self", ",", "layout", ")", ":", "try", ":", "score", "=", "self", ".", "linter", ".", "stats", "[", "'global_note'", "]", "except", "(", "AttributeError", ",", "KeyError", ")", ":", "pass", "else", ":", "self", ".", "tc", ".", "message", "(", "'buildStatisticValue'", ",", "key", "=", "'PyLintScore'", ",", "value", "=", "str", "(", "score", ")", ")" ]
Issues the final PyLint score as a TeamCity build statistic value
[ "Issues", "the", "final", "PyLint", "score", "as", "a", "TeamCity", "build", "statistic", "value" ]
python
train
41.875
saltstack/salt
salt/modules/pkgng.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/pkgng.py#L129-L141
def _contextkey(jail=None, chroot=None, root=None, prefix='pkg.list_pkgs'): ''' As this module is designed to manipulate packages in jails and chroots, use the passed jail/chroot to ensure that a key in the __context__ dict that is unique to that jail/chroot is used. ''' if jail: return six.text_type(prefix) + '.jail_{0}'.format(jail) elif chroot: return six.text_type(prefix) + '.chroot_{0}'.format(chroot) elif root: return six.text_type(prefix) + '.root_{0}'.format(root) return prefix
[ "def", "_contextkey", "(", "jail", "=", "None", ",", "chroot", "=", "None", ",", "root", "=", "None", ",", "prefix", "=", "'pkg.list_pkgs'", ")", ":", "if", "jail", ":", "return", "six", ".", "text_type", "(", "prefix", ")", "+", "'.jail_{0}'", ".", "format", "(", "jail", ")", "elif", "chroot", ":", "return", "six", ".", "text_type", "(", "prefix", ")", "+", "'.chroot_{0}'", ".", "format", "(", "chroot", ")", "elif", "root", ":", "return", "six", ".", "text_type", "(", "prefix", ")", "+", "'.root_{0}'", ".", "format", "(", "root", ")", "return", "prefix" ]
As this module is designed to manipulate packages in jails and chroots, use the passed jail/chroot to ensure that a key in the __context__ dict that is unique to that jail/chroot is used.
[ "As", "this", "module", "is", "designed", "to", "manipulate", "packages", "in", "jails", "and", "chroots", "use", "the", "passed", "jail", "/", "chroot", "to", "ensure", "that", "a", "key", "in", "the", "__context__", "dict", "that", "is", "unique", "to", "that", "jail", "/", "chroot", "is", "used", "." ]
python
train
41.384615