repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
codelv/enaml-native
src/enamlnative/android/app.py
https://github.com/codelv/enaml-native/blob/c33986e9eda468c508806e0a3e73c771401e5718/src/enamlnative/android/app.py#L153-L171
def show_toast(self, msg, long=True): """ Show a toast message for the given duration. This is an android specific api. Parameters ----------- msg: str Text to display in the toast message long: bool Display for a long or short (system defined) duration """ from .android_toast import Toast def on_toast(ref): t = Toast(__id__=ref) t.show() Toast.makeText(self, msg, 1 if long else 0).then(on_toast)
[ "def", "show_toast", "(", "self", ",", "msg", ",", "long", "=", "True", ")", ":", "from", ".", "android_toast", "import", "Toast", "def", "on_toast", "(", "ref", ")", ":", "t", "=", "Toast", "(", "__id__", "=", "ref", ")", "t", ".", "show", "(", ")", "Toast", ".", "makeText", "(", "self", ",", "msg", ",", "1", "if", "long", "else", "0", ")", ".", "then", "(", "on_toast", ")" ]
Show a toast message for the given duration. This is an android specific api. Parameters ----------- msg: str Text to display in the toast message long: bool Display for a long or short (system defined) duration
[ "Show", "a", "toast", "message", "for", "the", "given", "duration", ".", "This", "is", "an", "android", "specific", "api", "." ]
python
train
27
idlesign/uwsgiconf
uwsgiconf/config.py
https://github.com/idlesign/uwsgiconf/blob/475407acb44199edbf7e0a66261bfeb51de1afae/uwsgiconf/config.py#L363-L375
def set_fallback(self, target): """Sets a fallback configuration for section. Re-exec uWSGI with the specified config when exit code is 1. :param str|unicode|Section target: File path or Section to include. """ if isinstance(target, Section): target = ':' + target.name self._set('fallback-config', target) return self
[ "def", "set_fallback", "(", "self", ",", "target", ")", ":", "if", "isinstance", "(", "target", ",", "Section", ")", ":", "target", "=", "':'", "+", "target", ".", "name", "self", ".", "_set", "(", "'fallback-config'", ",", "target", ")", "return", "self" ]
Sets a fallback configuration for section. Re-exec uWSGI with the specified config when exit code is 1. :param str|unicode|Section target: File path or Section to include.
[ "Sets", "a", "fallback", "configuration", "for", "section", "." ]
python
train
29.076923
trec-kba/streamcorpus-pipeline
streamcorpus_pipeline/_pipeline.py
https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_pipeline.py#L352-L371
def _init_all_stages(self, config): '''Create stages that are used for the pipeline. :param dict config: `streamcorpus_pipeline` configuration :return: tuple of (reader, incremental transforms, batch transforms, post-batch incremental transforms, writers, temporary directory) ''' reader = self._init_stage(config, 'reader') incremental_transforms = self._init_stages( config, 'incremental_transforms') batch_transforms = self._init_stages(config, 'batch_transforms') post_batch_incremental_transforms = self._init_stages( config, 'post_batch_incremental_transforms') writers = self._init_stages(config, 'writers') tmp_dir_path = os.path.join(config['tmp_dir_path'], self.tmp_dir_suffix) return (reader, incremental_transforms, batch_transforms, post_batch_incremental_transforms, writers, tmp_dir_path)
[ "def", "_init_all_stages", "(", "self", ",", "config", ")", ":", "reader", "=", "self", ".", "_init_stage", "(", "config", ",", "'reader'", ")", "incremental_transforms", "=", "self", ".", "_init_stages", "(", "config", ",", "'incremental_transforms'", ")", "batch_transforms", "=", "self", ".", "_init_stages", "(", "config", ",", "'batch_transforms'", ")", "post_batch_incremental_transforms", "=", "self", ".", "_init_stages", "(", "config", ",", "'post_batch_incremental_transforms'", ")", "writers", "=", "self", ".", "_init_stages", "(", "config", ",", "'writers'", ")", "tmp_dir_path", "=", "os", ".", "path", ".", "join", "(", "config", "[", "'tmp_dir_path'", "]", ",", "self", ".", "tmp_dir_suffix", ")", "return", "(", "reader", ",", "incremental_transforms", ",", "batch_transforms", ",", "post_batch_incremental_transforms", ",", "writers", ",", "tmp_dir_path", ")" ]
Create stages that are used for the pipeline. :param dict config: `streamcorpus_pipeline` configuration :return: tuple of (reader, incremental transforms, batch transforms, post-batch incremental transforms, writers, temporary directory)
[ "Create", "stages", "that", "are", "used", "for", "the", "pipeline", "." ]
python
test
48.5
DarkEnergySurvey/ugali
ugali/isochrone/model.py
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/isochrone/model.py#L350-L381
def absolute_magnitude(self, richness=1, steps=1e4): """ Calculate the absolute visual magnitude (Mv) from the richness by transforming the isochrone in the SDSS system and using the g,r -> V transform equations from Jester 2005 [astro-ph/0506022]. Parameters: ----------- richness : isochrone normalization parameter steps : number of isochrone sampling steps Returns: -------- abs_mag : Absolute magnitude (Mv) """ # Using the SDSS g,r -> V from Jester 2005 [astro-ph/0506022] # for stars with R-I < 1.15 # V = g_sdss - 0.59*(g_sdss - r_sdss) - 0.01 # Create a copy of the isochrone in the SDSS system params = {k:v.value for k,v in self._params.items()} params.update(band_1='g',band_2='r',survey='sdss') iso = self.__class__(**params) # g, r are absolute magnitude mass_init, mass_pdf, mass_act, sdss_g, sdss_r = iso.sample(mass_steps=steps) V = jester_mag_v(sdss_g,sdss_r) # Sum the V-band absolute magnitudes return sum_mags(V,weights=mass_pdf*richness)
[ "def", "absolute_magnitude", "(", "self", ",", "richness", "=", "1", ",", "steps", "=", "1e4", ")", ":", "# Using the SDSS g,r -> V from Jester 2005 [astro-ph/0506022]", "# for stars with R-I < 1.15", "# V = g_sdss - 0.59*(g_sdss - r_sdss) - 0.01", "# Create a copy of the isochrone in the SDSS system", "params", "=", "{", "k", ":", "v", ".", "value", "for", "k", ",", "v", "in", "self", ".", "_params", ".", "items", "(", ")", "}", "params", ".", "update", "(", "band_1", "=", "'g'", ",", "band_2", "=", "'r'", ",", "survey", "=", "'sdss'", ")", "iso", "=", "self", ".", "__class__", "(", "*", "*", "params", ")", "# g, r are absolute magnitude", "mass_init", ",", "mass_pdf", ",", "mass_act", ",", "sdss_g", ",", "sdss_r", "=", "iso", ".", "sample", "(", "mass_steps", "=", "steps", ")", "V", "=", "jester_mag_v", "(", "sdss_g", ",", "sdss_r", ")", "# Sum the V-band absolute magnitudes", "return", "sum_mags", "(", "V", ",", "weights", "=", "mass_pdf", "*", "richness", ")" ]
Calculate the absolute visual magnitude (Mv) from the richness by transforming the isochrone in the SDSS system and using the g,r -> V transform equations from Jester 2005 [astro-ph/0506022]. Parameters: ----------- richness : isochrone normalization parameter steps : number of isochrone sampling steps Returns: -------- abs_mag : Absolute magnitude (Mv)
[ "Calculate", "the", "absolute", "visual", "magnitude", "(", "Mv", ")", "from", "the", "richness", "by", "transforming", "the", "isochrone", "in", "the", "SDSS", "system", "and", "using", "the", "g", "r", "-", ">", "V", "transform", "equations", "from", "Jester", "2005", "[", "astro", "-", "ph", "/", "0506022", "]", "." ]
python
train
35.53125
jtwhite79/pyemu
pyemu/utils/helpers.py
https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/utils/helpers.py#L3628-L3677
def write_const_tpl(name, tpl_file, suffix, zn_array=None, shape=None, spatial_reference=None, longnames=False): """ write a constant (uniform) template file Parameters ---------- name : str the base parameter name tpl_file : str the template file to write - include path zn_array : numpy.ndarray an array used to skip inactive cells Returns ------- df : pandas.DataFrame a dataframe with parameter information """ if shape is None and zn_array is None: raise Exception("must pass either zn_array or shape") elif shape is None: shape = zn_array.shape parnme = [] with open(tpl_file, 'w') as f: f.write("ptf ~\n") for i in range(shape[0]): for j in range(shape[1]): if zn_array is not None and zn_array[i, j] < 1: pname = " 1.0 " else: if longnames: pname = "const_{0}_{1}".format(name,suffix) else: pname = "{0}{1}".format(name, suffix) if len(pname) > 12: raise("zone pname too long:{0}". \ format(pname)) parnme.append(pname) pname = " ~ {0} ~".format(pname) f.write(pname) f.write("\n") df = pd.DataFrame({"parnme": parnme}, index=parnme) # df.loc[:,"pargp"] = "{0}{1}".format(self.cn_suffixname) df.loc[:, "pargp"] = "{0}_{1}".format(name, suffix.replace('_', '')) df.loc[:, "tpl"] = tpl_file return df
[ "def", "write_const_tpl", "(", "name", ",", "tpl_file", ",", "suffix", ",", "zn_array", "=", "None", ",", "shape", "=", "None", ",", "spatial_reference", "=", "None", ",", "longnames", "=", "False", ")", ":", "if", "shape", "is", "None", "and", "zn_array", "is", "None", ":", "raise", "Exception", "(", "\"must pass either zn_array or shape\"", ")", "elif", "shape", "is", "None", ":", "shape", "=", "zn_array", ".", "shape", "parnme", "=", "[", "]", "with", "open", "(", "tpl_file", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "\"ptf ~\\n\"", ")", "for", "i", "in", "range", "(", "shape", "[", "0", "]", ")", ":", "for", "j", "in", "range", "(", "shape", "[", "1", "]", ")", ":", "if", "zn_array", "is", "not", "None", "and", "zn_array", "[", "i", ",", "j", "]", "<", "1", ":", "pname", "=", "\" 1.0 \"", "else", ":", "if", "longnames", ":", "pname", "=", "\"const_{0}_{1}\"", ".", "format", "(", "name", ",", "suffix", ")", "else", ":", "pname", "=", "\"{0}{1}\"", ".", "format", "(", "name", ",", "suffix", ")", "if", "len", "(", "pname", ")", ">", "12", ":", "raise", "(", "\"zone pname too long:{0}\"", ".", "format", "(", "pname", ")", ")", "parnme", ".", "append", "(", "pname", ")", "pname", "=", "\" ~ {0} ~\"", ".", "format", "(", "pname", ")", "f", ".", "write", "(", "pname", ")", "f", ".", "write", "(", "\"\\n\"", ")", "df", "=", "pd", ".", "DataFrame", "(", "{", "\"parnme\"", ":", "parnme", "}", ",", "index", "=", "parnme", ")", "# df.loc[:,\"pargp\"] = \"{0}{1}\".format(self.cn_suffixname)", "df", ".", "loc", "[", ":", ",", "\"pargp\"", "]", "=", "\"{0}_{1}\"", ".", "format", "(", "name", ",", "suffix", ".", "replace", "(", "'_'", ",", "''", ")", ")", "df", ".", "loc", "[", ":", ",", "\"tpl\"", "]", "=", "tpl_file", "return", "df" ]
write a constant (uniform) template file Parameters ---------- name : str the base parameter name tpl_file : str the template file to write - include path zn_array : numpy.ndarray an array used to skip inactive cells Returns ------- df : pandas.DataFrame a dataframe with parameter information
[ "write", "a", "constant", "(", "uniform", ")", "template", "file" ]
python
train
32.96
pri22296/beautifultable
beautifultable/beautifultable.py
https://github.com/pri22296/beautifultable/blob/c9638f73dff4bb1f341c9ee783e4e47f26efba0b/beautifultable/beautifultable.py#L629-L701
def _calculate_column_widths(self): """Calculate width of column automatically based on data.""" table_width = self.get_table_width() lpw, rpw = self._left_padding_widths, self._right_padding_widths pad_widths = [(lpw[i] + rpw[i]) for i in range(self._column_count)] max_widths = [0 for index in range(self._column_count)] offset = table_width - sum(self._column_widths) + sum(pad_widths) self._max_table_width = max(self._max_table_width, offset + self._column_count) for index, column in enumerate(zip(*self._table)): max_length = 0 for i in column: for j in to_unicode(i).split('\n'): output_str = get_output_str(j, self.detect_numerics, self.numeric_precision, self.sign_mode.value) max_length = max(max_length, termwidth(output_str)) for i in to_unicode(self._column_headers[index]).split('\n'): output_str = get_output_str(i, self.detect_numerics, self.numeric_precision, self.sign_mode.value) max_length = max(max_length, termwidth(output_str)) max_widths[index] += max_length sum_ = sum(max_widths) desired_sum = self._max_table_width - offset # Set flag for columns who are within their fair share temp_sum = 0 flag = [0] * len(max_widths) for i, width in enumerate(max_widths): if width <= int(desired_sum / self._column_count): temp_sum += width flag[i] = 1 else: # Allocate atleast 1 character width to the column temp_sum += 1 avail_space = desired_sum - temp_sum actual_space = sum_ - temp_sum shrinked_columns = {} # Columns which exceed their fair share should be shrinked based on # how much space is left for the table for i, width in enumerate(max_widths): self.column_widths[i] = width if not flag[i]: new_width = 1 + int((width-1) * avail_space / actual_space) if new_width < width: self.column_widths[i] = new_width shrinked_columns[new_width] = i # Divide any remaining space among shrinked columns if shrinked_columns: extra = (self._max_table_width - offset - sum(self.column_widths)) actual_space = sum(shrinked_columns) if extra > 0: for i, width in enumerate(sorted(shrinked_columns)): index = shrinked_columns[width] extra_width = int(width * extra / actual_space) self.column_widths[i] += extra_width if i == (len(shrinked_columns) - 1): extra = (self._max_table_width - offset - sum(self.column_widths)) self.column_widths[index] += extra for i in range(self.column_count): self.column_widths[i] += pad_widths[i]
[ "def", "_calculate_column_widths", "(", "self", ")", ":", "table_width", "=", "self", ".", "get_table_width", "(", ")", "lpw", ",", "rpw", "=", "self", ".", "_left_padding_widths", ",", "self", ".", "_right_padding_widths", "pad_widths", "=", "[", "(", "lpw", "[", "i", "]", "+", "rpw", "[", "i", "]", ")", "for", "i", "in", "range", "(", "self", ".", "_column_count", ")", "]", "max_widths", "=", "[", "0", "for", "index", "in", "range", "(", "self", ".", "_column_count", ")", "]", "offset", "=", "table_width", "-", "sum", "(", "self", ".", "_column_widths", ")", "+", "sum", "(", "pad_widths", ")", "self", ".", "_max_table_width", "=", "max", "(", "self", ".", "_max_table_width", ",", "offset", "+", "self", ".", "_column_count", ")", "for", "index", ",", "column", "in", "enumerate", "(", "zip", "(", "*", "self", ".", "_table", ")", ")", ":", "max_length", "=", "0", "for", "i", "in", "column", ":", "for", "j", "in", "to_unicode", "(", "i", ")", ".", "split", "(", "'\\n'", ")", ":", "output_str", "=", "get_output_str", "(", "j", ",", "self", ".", "detect_numerics", ",", "self", ".", "numeric_precision", ",", "self", ".", "sign_mode", ".", "value", ")", "max_length", "=", "max", "(", "max_length", ",", "termwidth", "(", "output_str", ")", ")", "for", "i", "in", "to_unicode", "(", "self", ".", "_column_headers", "[", "index", "]", ")", ".", "split", "(", "'\\n'", ")", ":", "output_str", "=", "get_output_str", "(", "i", ",", "self", ".", "detect_numerics", ",", "self", ".", "numeric_precision", ",", "self", ".", "sign_mode", ".", "value", ")", "max_length", "=", "max", "(", "max_length", ",", "termwidth", "(", "output_str", ")", ")", "max_widths", "[", "index", "]", "+=", "max_length", "sum_", "=", "sum", "(", "max_widths", ")", "desired_sum", "=", "self", ".", "_max_table_width", "-", "offset", "# Set flag for columns who are within their fair share", "temp_sum", "=", "0", "flag", "=", "[", "0", "]", "*", "len", "(", "max_widths", ")", "for", "i", ",", "width", "in", "enumerate", "(", "max_widths", ")", ":", "if", "width", "<=", "int", "(", "desired_sum", "/", "self", ".", "_column_count", ")", ":", "temp_sum", "+=", "width", "flag", "[", "i", "]", "=", "1", "else", ":", "# Allocate atleast 1 character width to the column", "temp_sum", "+=", "1", "avail_space", "=", "desired_sum", "-", "temp_sum", "actual_space", "=", "sum_", "-", "temp_sum", "shrinked_columns", "=", "{", "}", "# Columns which exceed their fair share should be shrinked based on", "# how much space is left for the table", "for", "i", ",", "width", "in", "enumerate", "(", "max_widths", ")", ":", "self", ".", "column_widths", "[", "i", "]", "=", "width", "if", "not", "flag", "[", "i", "]", ":", "new_width", "=", "1", "+", "int", "(", "(", "width", "-", "1", ")", "*", "avail_space", "/", "actual_space", ")", "if", "new_width", "<", "width", ":", "self", ".", "column_widths", "[", "i", "]", "=", "new_width", "shrinked_columns", "[", "new_width", "]", "=", "i", "# Divide any remaining space among shrinked columns", "if", "shrinked_columns", ":", "extra", "=", "(", "self", ".", "_max_table_width", "-", "offset", "-", "sum", "(", "self", ".", "column_widths", ")", ")", "actual_space", "=", "sum", "(", "shrinked_columns", ")", "if", "extra", ">", "0", ":", "for", "i", ",", "width", "in", "enumerate", "(", "sorted", "(", "shrinked_columns", ")", ")", ":", "index", "=", "shrinked_columns", "[", "width", "]", "extra_width", "=", "int", "(", "width", "*", "extra", "/", "actual_space", ")", "self", ".", "column_widths", "[", "i", "]", "+=", "extra_width", "if", "i", "==", "(", "len", "(", "shrinked_columns", ")", "-", "1", ")", ":", "extra", "=", "(", "self", ".", "_max_table_width", "-", "offset", "-", "sum", "(", "self", ".", "column_widths", ")", ")", "self", ".", "column_widths", "[", "index", "]", "+=", "extra", "for", "i", "in", "range", "(", "self", ".", "column_count", ")", ":", "self", ".", "column_widths", "[", "i", "]", "+=", "pad_widths", "[", "i", "]" ]
Calculate width of column automatically based on data.
[ "Calculate", "width", "of", "column", "automatically", "based", "on", "data", "." ]
python
train
45.246575
ScriptSmith/socialreaper
socialreaper/tools.py
https://github.com/ScriptSmith/socialreaper/blob/87fcc3b74bbed6c4f8e7f49a5f0eb8a616cf38da/socialreaper/tools.py#L31-L49
def fill_gaps(list_dicts): """ Fill gaps in a list of dictionaries. Add empty keys to dictionaries in the list that don't contain other entries' keys :param list_dicts: A list of dictionaries :return: A list of field names, a list of dictionaries with identical keys """ field_names = [] # != set bc. preserving order is better for output for datum in list_dicts: for key in datum.keys(): if key not in field_names: field_names.append(key) for datum in list_dicts: for key in field_names: if key not in datum: datum[key] = '' return list(field_names), list_dicts
[ "def", "fill_gaps", "(", "list_dicts", ")", ":", "field_names", "=", "[", "]", "# != set bc. preserving order is better for output\r", "for", "datum", "in", "list_dicts", ":", "for", "key", "in", "datum", ".", "keys", "(", ")", ":", "if", "key", "not", "in", "field_names", ":", "field_names", ".", "append", "(", "key", ")", "for", "datum", "in", "list_dicts", ":", "for", "key", "in", "field_names", ":", "if", "key", "not", "in", "datum", ":", "datum", "[", "key", "]", "=", "''", "return", "list", "(", "field_names", ")", ",", "list_dicts" ]
Fill gaps in a list of dictionaries. Add empty keys to dictionaries in the list that don't contain other entries' keys :param list_dicts: A list of dictionaries :return: A list of field names, a list of dictionaries with identical keys
[ "Fill", "gaps", "in", "a", "list", "of", "dictionaries", ".", "Add", "empty", "keys", "to", "dictionaries", "in", "the", "list", "that", "don", "t", "contain", "other", "entries", "keys", ":", "param", "list_dicts", ":", "A", "list", "of", "dictionaries", ":", "return", ":", "A", "list", "of", "field", "names", "a", "list", "of", "dictionaries", "with", "identical", "keys" ]
python
valid
35.631579
Microsoft/LightGBM
python-package/lightgbm/basic.py
https://github.com/Microsoft/LightGBM/blob/8d2ec69f4f685b0ab1c4624d59ee2d3287bb3147/python-package/lightgbm/basic.py#L1114-L1160
def set_field(self, field_name, data): """Set property into the Dataset. Parameters ---------- field_name : string The field name of the information. data : list, numpy 1-D array, pandas Series or None The array of data to be set. Returns ------- self : Dataset Dataset with set property. """ if self.handle is None: raise Exception("Cannot set %s before construct dataset" % field_name) if data is None: # set to None _safe_call(_LIB.LGBM_DatasetSetField( self.handle, c_str(field_name), None, ctypes.c_int(0), ctypes.c_int(FIELD_TYPE_MAPPER[field_name]))) return self dtype = np.float32 if field_name == 'group': dtype = np.int32 elif field_name == 'init_score': dtype = np.float64 data = list_to_1d_numpy(data, dtype, name=field_name) if data.dtype == np.float32 or data.dtype == np.float64: ptr_data, type_data, _ = c_float_array(data) elif data.dtype == np.int32: ptr_data, type_data, _ = c_int_array(data) else: raise TypeError("Excepted np.float32/64 or np.int32, meet type({})".format(data.dtype)) if type_data != FIELD_TYPE_MAPPER[field_name]: raise TypeError("Input type error for set_field") _safe_call(_LIB.LGBM_DatasetSetField( self.handle, c_str(field_name), ptr_data, ctypes.c_int(len(data)), ctypes.c_int(type_data))) return self
[ "def", "set_field", "(", "self", ",", "field_name", ",", "data", ")", ":", "if", "self", ".", "handle", "is", "None", ":", "raise", "Exception", "(", "\"Cannot set %s before construct dataset\"", "%", "field_name", ")", "if", "data", "is", "None", ":", "# set to None", "_safe_call", "(", "_LIB", ".", "LGBM_DatasetSetField", "(", "self", ".", "handle", ",", "c_str", "(", "field_name", ")", ",", "None", ",", "ctypes", ".", "c_int", "(", "0", ")", ",", "ctypes", ".", "c_int", "(", "FIELD_TYPE_MAPPER", "[", "field_name", "]", ")", ")", ")", "return", "self", "dtype", "=", "np", ".", "float32", "if", "field_name", "==", "'group'", ":", "dtype", "=", "np", ".", "int32", "elif", "field_name", "==", "'init_score'", ":", "dtype", "=", "np", ".", "float64", "data", "=", "list_to_1d_numpy", "(", "data", ",", "dtype", ",", "name", "=", "field_name", ")", "if", "data", ".", "dtype", "==", "np", ".", "float32", "or", "data", ".", "dtype", "==", "np", ".", "float64", ":", "ptr_data", ",", "type_data", ",", "_", "=", "c_float_array", "(", "data", ")", "elif", "data", ".", "dtype", "==", "np", ".", "int32", ":", "ptr_data", ",", "type_data", ",", "_", "=", "c_int_array", "(", "data", ")", "else", ":", "raise", "TypeError", "(", "\"Excepted np.float32/64 or np.int32, meet type({})\"", ".", "format", "(", "data", ".", "dtype", ")", ")", "if", "type_data", "!=", "FIELD_TYPE_MAPPER", "[", "field_name", "]", ":", "raise", "TypeError", "(", "\"Input type error for set_field\"", ")", "_safe_call", "(", "_LIB", ".", "LGBM_DatasetSetField", "(", "self", ".", "handle", ",", "c_str", "(", "field_name", ")", ",", "ptr_data", ",", "ctypes", ".", "c_int", "(", "len", "(", "data", ")", ")", ",", "ctypes", ".", "c_int", "(", "type_data", ")", ")", ")", "return", "self" ]
Set property into the Dataset. Parameters ---------- field_name : string The field name of the information. data : list, numpy 1-D array, pandas Series or None The array of data to be set. Returns ------- self : Dataset Dataset with set property.
[ "Set", "property", "into", "the", "Dataset", "." ]
python
train
35.425532
twisted/axiom
axiom/item.py
https://github.com/twisted/axiom/blob/7de70bc8fe1bb81f9c2339fba8daec9eb2e92b68/axiom/item.py#L517-L525
def _schemaPrepareInsert(self, store): """ Prepare each attribute in my schema for insertion into a given store, either by upgrade or by creation. This makes sure all references point to this store and all relative paths point to this store's files directory. """ for name, atr in self.getSchema(): atr.prepareInsert(self, store)
[ "def", "_schemaPrepareInsert", "(", "self", ",", "store", ")", ":", "for", "name", ",", "atr", "in", "self", ".", "getSchema", "(", ")", ":", "atr", ".", "prepareInsert", "(", "self", ",", "store", ")" ]
Prepare each attribute in my schema for insertion into a given store, either by upgrade or by creation. This makes sure all references point to this store and all relative paths point to this store's files directory.
[ "Prepare", "each", "attribute", "in", "my", "schema", "for", "insertion", "into", "a", "given", "store", "either", "by", "upgrade", "or", "by", "creation", ".", "This", "makes", "sure", "all", "references", "point", "to", "this", "store", "and", "all", "relative", "paths", "point", "to", "this", "store", "s", "files", "directory", "." ]
python
train
43.333333
google/grr
grr/server/grr_response_server/file_store.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/file_store.py#L211-L304
def AddFilesWithUnknownHashes( client_path_blob_refs, use_external_stores = True ): """Adds new files consisting of given blob references. Args: client_path_blob_refs: A dictionary mapping `db.ClientPath` instances to lists of blob references. use_external_stores: A flag indicating if the files should also be added to external file stores. Returns: A dictionary mapping `db.ClientPath` to hash ids of the file. Raises: BlobNotFoundError: If one of the referenced blobs cannot be found. """ hash_id_blob_refs = dict() client_path_hash_id = dict() metadatas = dict() all_client_path_blob_refs = list() for client_path, blob_refs in iteritems(client_path_blob_refs): # In the special case where there is only one blob, we don't need to go to # the data store to read said blob and rehash it, we have all that # information already available. For empty files without blobs, we can just # hash the empty string instead. if len(blob_refs) <= 1: if blob_refs: hash_id = rdf_objects.SHA256HashID.FromBytes( blob_refs[0].blob_id.AsBytes()) else: hash_id = rdf_objects.SHA256HashID.FromData(b"") client_path_hash_id[client_path] = hash_id hash_id_blob_refs[hash_id] = blob_refs metadatas[hash_id] = FileMetadata( client_path=client_path, blob_refs=blob_refs) else: for blob_ref in blob_refs: all_client_path_blob_refs.append((client_path, blob_ref)) client_path_offset = collections.defaultdict(lambda: 0) client_path_sha256 = collections.defaultdict(hashlib.sha256) verified_client_path_blob_refs = collections.defaultdict(list) client_path_blob_ref_batches = collection.Batch( items=all_client_path_blob_refs, size=_BLOBS_READ_BATCH_SIZE) for client_path_blob_ref_batch in client_path_blob_ref_batches: blob_id_batch = set( blob_ref.blob_id for _, blob_ref in client_path_blob_ref_batch) blobs = data_store.BLOBS.ReadBlobs(blob_id_batch) for client_path, blob_ref in client_path_blob_ref_batch: blob = blobs[blob_ref.blob_id] if blob is None: message = "Could not find one of referenced blobs: {}".format( blob_ref.blob_id) raise BlobNotFoundError(message) offset = client_path_offset[client_path] if blob_ref.size != len(blob): raise ValueError( "Got conflicting size information for blob %s: %d vs %d." % (blob_ref.blob_id, blob_ref.size, len(blob))) if blob_ref.offset != offset: raise ValueError( "Got conflicting offset information for blob %s: %d vs %d." % (blob_ref.blob_id, blob_ref.offset, offset)) verified_client_path_blob_refs[client_path].append(blob_ref) client_path_offset[client_path] = offset + len(blob) client_path_sha256[client_path].update(blob) for client_path in iterkeys(client_path_sha256): sha256 = client_path_sha256[client_path].digest() hash_id = rdf_objects.SHA256HashID.FromBytes(sha256) client_path_hash_id[client_path] = hash_id hash_id_blob_refs[hash_id] = verified_client_path_blob_refs[client_path] data_store.REL_DB.WriteHashBlobReferences(hash_id_blob_refs) if use_external_stores: for client_path in iterkeys(verified_client_path_blob_refs): metadatas[client_path_hash_id[client_path]] = FileMetadata( client_path=client_path, blob_refs=verified_client_path_blob_refs[client_path]) EXTERNAL_FILE_STORE.AddFiles(metadatas) return client_path_hash_id
[ "def", "AddFilesWithUnknownHashes", "(", "client_path_blob_refs", ",", "use_external_stores", "=", "True", ")", ":", "hash_id_blob_refs", "=", "dict", "(", ")", "client_path_hash_id", "=", "dict", "(", ")", "metadatas", "=", "dict", "(", ")", "all_client_path_blob_refs", "=", "list", "(", ")", "for", "client_path", ",", "blob_refs", "in", "iteritems", "(", "client_path_blob_refs", ")", ":", "# In the special case where there is only one blob, we don't need to go to", "# the data store to read said blob and rehash it, we have all that", "# information already available. For empty files without blobs, we can just", "# hash the empty string instead.", "if", "len", "(", "blob_refs", ")", "<=", "1", ":", "if", "blob_refs", ":", "hash_id", "=", "rdf_objects", ".", "SHA256HashID", ".", "FromBytes", "(", "blob_refs", "[", "0", "]", ".", "blob_id", ".", "AsBytes", "(", ")", ")", "else", ":", "hash_id", "=", "rdf_objects", ".", "SHA256HashID", ".", "FromData", "(", "b\"\"", ")", "client_path_hash_id", "[", "client_path", "]", "=", "hash_id", "hash_id_blob_refs", "[", "hash_id", "]", "=", "blob_refs", "metadatas", "[", "hash_id", "]", "=", "FileMetadata", "(", "client_path", "=", "client_path", ",", "blob_refs", "=", "blob_refs", ")", "else", ":", "for", "blob_ref", "in", "blob_refs", ":", "all_client_path_blob_refs", ".", "append", "(", "(", "client_path", ",", "blob_ref", ")", ")", "client_path_offset", "=", "collections", ".", "defaultdict", "(", "lambda", ":", "0", ")", "client_path_sha256", "=", "collections", ".", "defaultdict", "(", "hashlib", ".", "sha256", ")", "verified_client_path_blob_refs", "=", "collections", ".", "defaultdict", "(", "list", ")", "client_path_blob_ref_batches", "=", "collection", ".", "Batch", "(", "items", "=", "all_client_path_blob_refs", ",", "size", "=", "_BLOBS_READ_BATCH_SIZE", ")", "for", "client_path_blob_ref_batch", "in", "client_path_blob_ref_batches", ":", "blob_id_batch", "=", "set", "(", "blob_ref", ".", "blob_id", "for", "_", ",", "blob_ref", "in", "client_path_blob_ref_batch", ")", "blobs", "=", "data_store", ".", "BLOBS", ".", "ReadBlobs", "(", "blob_id_batch", ")", "for", "client_path", ",", "blob_ref", "in", "client_path_blob_ref_batch", ":", "blob", "=", "blobs", "[", "blob_ref", ".", "blob_id", "]", "if", "blob", "is", "None", ":", "message", "=", "\"Could not find one of referenced blobs: {}\"", ".", "format", "(", "blob_ref", ".", "blob_id", ")", "raise", "BlobNotFoundError", "(", "message", ")", "offset", "=", "client_path_offset", "[", "client_path", "]", "if", "blob_ref", ".", "size", "!=", "len", "(", "blob", ")", ":", "raise", "ValueError", "(", "\"Got conflicting size information for blob %s: %d vs %d.\"", "%", "(", "blob_ref", ".", "blob_id", ",", "blob_ref", ".", "size", ",", "len", "(", "blob", ")", ")", ")", "if", "blob_ref", ".", "offset", "!=", "offset", ":", "raise", "ValueError", "(", "\"Got conflicting offset information for blob %s: %d vs %d.\"", "%", "(", "blob_ref", ".", "blob_id", ",", "blob_ref", ".", "offset", ",", "offset", ")", ")", "verified_client_path_blob_refs", "[", "client_path", "]", ".", "append", "(", "blob_ref", ")", "client_path_offset", "[", "client_path", "]", "=", "offset", "+", "len", "(", "blob", ")", "client_path_sha256", "[", "client_path", "]", ".", "update", "(", "blob", ")", "for", "client_path", "in", "iterkeys", "(", "client_path_sha256", ")", ":", "sha256", "=", "client_path_sha256", "[", "client_path", "]", ".", "digest", "(", ")", "hash_id", "=", "rdf_objects", ".", "SHA256HashID", ".", "FromBytes", "(", "sha256", ")", "client_path_hash_id", "[", "client_path", "]", "=", "hash_id", "hash_id_blob_refs", "[", "hash_id", "]", "=", "verified_client_path_blob_refs", "[", "client_path", "]", "data_store", ".", "REL_DB", ".", "WriteHashBlobReferences", "(", "hash_id_blob_refs", ")", "if", "use_external_stores", ":", "for", "client_path", "in", "iterkeys", "(", "verified_client_path_blob_refs", ")", ":", "metadatas", "[", "client_path_hash_id", "[", "client_path", "]", "]", "=", "FileMetadata", "(", "client_path", "=", "client_path", ",", "blob_refs", "=", "verified_client_path_blob_refs", "[", "client_path", "]", ")", "EXTERNAL_FILE_STORE", ".", "AddFiles", "(", "metadatas", ")", "return", "client_path_hash_id" ]
Adds new files consisting of given blob references. Args: client_path_blob_refs: A dictionary mapping `db.ClientPath` instances to lists of blob references. use_external_stores: A flag indicating if the files should also be added to external file stores. Returns: A dictionary mapping `db.ClientPath` to hash ids of the file. Raises: BlobNotFoundError: If one of the referenced blobs cannot be found.
[ "Adds", "new", "files", "consisting", "of", "given", "blob", "references", "." ]
python
train
37.244681
gwastro/pycbc
pycbc/workflow/psdfiles.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/workflow/psdfiles.py#L91-L151
def setup_psd_pregenerated(workflow, tags=None): ''' Setup CBC workflow to use pregenerated psd files. The file given in cp.get('workflow','pregenerated-psd-file-(ifo)') will be used as the --psd-file argument to geom_nonspinbank, geom_aligned_bank and pycbc_plot_psd_file. Parameters ---------- workflow: pycbc.workflow.core.Workflow An instanced class that manages the constructed workflow. tags : list of strings If given these tags are used to uniquely name and identify output files that would be produced in multiple calls to this function. Returns -------- psd_files : pycbc.workflow.core.FileList The FileList holding the gating files ''' if tags is None: tags = [] psd_files = FileList([]) cp = workflow.cp global_seg = workflow.analysis_time user_tag = "PREGEN_PSD" # Check for one psd for all ifos try: pre_gen_file = cp.get_opt_tags('workflow-psd', 'psd-pregenerated-file', tags) pre_gen_file = resolve_url(pre_gen_file) file_url = urlparse.urljoin('file:', urllib.pathname2url(pre_gen_file)) curr_file = File(workflow.ifos, user_tag, global_seg, file_url, tags=tags) curr_file.PFN(file_url, site='local') psd_files.append(curr_file) except ConfigParser.Error: # Check for one psd per ifo for ifo in workflow.ifos: try: pre_gen_file = cp.get_opt_tags('workflow-psd', 'psd-pregenerated-file-%s' % ifo.lower(), tags) pre_gen_file = resolve_url(pre_gen_file) file_url = urlparse.urljoin('file:', urllib.pathname2url(pre_gen_file)) curr_file = File(ifo, user_tag, global_seg, file_url, tags=tags) curr_file.PFN(file_url, site='local') psd_files.append(curr_file) except ConfigParser.Error: # It's unlikely, but not impossible, that only some ifos # will have pregenerated PSDs logging.warn("No psd file specified for IFO %s." % (ifo,)) pass return psd_files
[ "def", "setup_psd_pregenerated", "(", "workflow", ",", "tags", "=", "None", ")", ":", "if", "tags", "is", "None", ":", "tags", "=", "[", "]", "psd_files", "=", "FileList", "(", "[", "]", ")", "cp", "=", "workflow", ".", "cp", "global_seg", "=", "workflow", ".", "analysis_time", "user_tag", "=", "\"PREGEN_PSD\"", "# Check for one psd for all ifos", "try", ":", "pre_gen_file", "=", "cp", ".", "get_opt_tags", "(", "'workflow-psd'", ",", "'psd-pregenerated-file'", ",", "tags", ")", "pre_gen_file", "=", "resolve_url", "(", "pre_gen_file", ")", "file_url", "=", "urlparse", ".", "urljoin", "(", "'file:'", ",", "urllib", ".", "pathname2url", "(", "pre_gen_file", ")", ")", "curr_file", "=", "File", "(", "workflow", ".", "ifos", ",", "user_tag", ",", "global_seg", ",", "file_url", ",", "tags", "=", "tags", ")", "curr_file", ".", "PFN", "(", "file_url", ",", "site", "=", "'local'", ")", "psd_files", ".", "append", "(", "curr_file", ")", "except", "ConfigParser", ".", "Error", ":", "# Check for one psd per ifo", "for", "ifo", "in", "workflow", ".", "ifos", ":", "try", ":", "pre_gen_file", "=", "cp", ".", "get_opt_tags", "(", "'workflow-psd'", ",", "'psd-pregenerated-file-%s'", "%", "ifo", ".", "lower", "(", ")", ",", "tags", ")", "pre_gen_file", "=", "resolve_url", "(", "pre_gen_file", ")", "file_url", "=", "urlparse", ".", "urljoin", "(", "'file:'", ",", "urllib", ".", "pathname2url", "(", "pre_gen_file", ")", ")", "curr_file", "=", "File", "(", "ifo", ",", "user_tag", ",", "global_seg", ",", "file_url", ",", "tags", "=", "tags", ")", "curr_file", ".", "PFN", "(", "file_url", ",", "site", "=", "'local'", ")", "psd_files", ".", "append", "(", "curr_file", ")", "except", "ConfigParser", ".", "Error", ":", "# It's unlikely, but not impossible, that only some ifos", "# will have pregenerated PSDs", "logging", ".", "warn", "(", "\"No psd file specified for IFO %s.\"", "%", "(", "ifo", ",", ")", ")", "pass", "return", "psd_files" ]
Setup CBC workflow to use pregenerated psd files. The file given in cp.get('workflow','pregenerated-psd-file-(ifo)') will be used as the --psd-file argument to geom_nonspinbank, geom_aligned_bank and pycbc_plot_psd_file. Parameters ---------- workflow: pycbc.workflow.core.Workflow An instanced class that manages the constructed workflow. tags : list of strings If given these tags are used to uniquely name and identify output files that would be produced in multiple calls to this function. Returns -------- psd_files : pycbc.workflow.core.FileList The FileList holding the gating files
[ "Setup", "CBC", "workflow", "to", "use", "pregenerated", "psd", "files", ".", "The", "file", "given", "in", "cp", ".", "get", "(", "workflow", "pregenerated", "-", "psd", "-", "file", "-", "(", "ifo", ")", ")", "will", "be", "used", "as", "the", "--", "psd", "-", "file", "argument", "to", "geom_nonspinbank", "geom_aligned_bank", "and", "pycbc_plot_psd_file", "." ]
python
train
38.836066
ibis-project/ibis
ibis/clickhouse/client.py
https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/clickhouse/client.py#L317-L336
def list_databases(self, like=None): """ List databases in the Clickhouse cluster. Like the SHOW DATABASES command in the clickhouse-shell. Parameters ---------- like : string, default None e.g. 'foo*' to match all tables starting with 'foo' Returns ------- databases : list of strings """ statement = 'SELECT name FROM system.databases' if like: statement += " WHERE name LIKE '{0}'".format(like) data, _, _ = self.raw_sql(statement, results=True) return data[0]
[ "def", "list_databases", "(", "self", ",", "like", "=", "None", ")", ":", "statement", "=", "'SELECT name FROM system.databases'", "if", "like", ":", "statement", "+=", "\" WHERE name LIKE '{0}'\"", ".", "format", "(", "like", ")", "data", ",", "_", ",", "_", "=", "self", ".", "raw_sql", "(", "statement", ",", "results", "=", "True", ")", "return", "data", "[", "0", "]" ]
List databases in the Clickhouse cluster. Like the SHOW DATABASES command in the clickhouse-shell. Parameters ---------- like : string, default None e.g. 'foo*' to match all tables starting with 'foo' Returns ------- databases : list of strings
[ "List", "databases", "in", "the", "Clickhouse", "cluster", ".", "Like", "the", "SHOW", "DATABASES", "command", "in", "the", "clickhouse", "-", "shell", "." ]
python
train
29.05
shazow/workerpool
samples/blockingworker.py
https://github.com/shazow/workerpool/blob/2c5b29ec64ffbc94fc3623a4531eaf7c7c1a9ab5/samples/blockingworker.py#L16-L20
def put(self, job, result): "Perform a job by a member in the pool and return the result." self.job.put(job) r = result.get() return r
[ "def", "put", "(", "self", ",", "job", ",", "result", ")", ":", "self", ".", "job", ".", "put", "(", "job", ")", "r", "=", "result", ".", "get", "(", ")", "return", "r" ]
Perform a job by a member in the pool and return the result.
[ "Perform", "a", "job", "by", "a", "member", "in", "the", "pool", "and", "return", "the", "result", "." ]
python
train
32.4
pallets/flask-sqlalchemy
examples/flaskr/flaskr/blog/views.py
https://github.com/pallets/flask-sqlalchemy/blob/3d3261f4fc6d28f5bf407cf7d523e36a09a8c144/examples/flaskr/flaskr/blog/views.py#L46-L63
def create(): """Create a new post for the current user.""" if request.method == "POST": title = request.form["title"] body = request.form["body"] error = None if not title: error = "Title is required." if error is not None: flash(error) else: db.session.add(Post(title=title, body=body, author=g.user)) db.session.commit() return redirect(url_for("blog.index")) return render_template("blog/create.html")
[ "def", "create", "(", ")", ":", "if", "request", ".", "method", "==", "\"POST\"", ":", "title", "=", "request", ".", "form", "[", "\"title\"", "]", "body", "=", "request", ".", "form", "[", "\"body\"", "]", "error", "=", "None", "if", "not", "title", ":", "error", "=", "\"Title is required.\"", "if", "error", "is", "not", "None", ":", "flash", "(", "error", ")", "else", ":", "db", ".", "session", ".", "add", "(", "Post", "(", "title", "=", "title", ",", "body", "=", "body", ",", "author", "=", "g", ".", "user", ")", ")", "db", ".", "session", ".", "commit", "(", ")", "return", "redirect", "(", "url_for", "(", "\"blog.index\"", ")", ")", "return", "render_template", "(", "\"blog/create.html\"", ")" ]
Create a new post for the current user.
[ "Create", "a", "new", "post", "for", "the", "current", "user", "." ]
python
train
28.388889
cyrus-/cypy
cypy/cg.py
https://github.com/cyrus-/cypy/blob/04bb59e91fa314e8cf987743189c77a9b6bc371d/cypy/cg.py#L298-L308
def pop_context(self): """Pops the last set of keyword arguments provided to the processor.""" processor = getattr(self, 'processor', None) if processor is not None: pop_context = getattr(processor, 'pop_context', None) if pop_context is None: pop_context = getattr(processor, 'pop', None) if pop_context is not None: return pop_context() if self._pop_next: self._pop_next = False
[ "def", "pop_context", "(", "self", ")", ":", "processor", "=", "getattr", "(", "self", ",", "'processor'", ",", "None", ")", "if", "processor", "is", "not", "None", ":", "pop_context", "=", "getattr", "(", "processor", ",", "'pop_context'", ",", "None", ")", "if", "pop_context", "is", "None", ":", "pop_context", "=", "getattr", "(", "processor", ",", "'pop'", ",", "None", ")", "if", "pop_context", "is", "not", "None", ":", "return", "pop_context", "(", ")", "if", "self", ".", "_pop_next", ":", "self", ".", "_pop_next", "=", "False" ]
Pops the last set of keyword arguments provided to the processor.
[ "Pops", "the", "last", "set", "of", "keyword", "arguments", "provided", "to", "the", "processor", "." ]
python
train
43.818182
mozilla-releng/scriptworker
scriptworker/artifacts.py
https://github.com/mozilla-releng/scriptworker/blob/8e97bbd83b9b578565ec57904c966dd6ae4ef0ae/scriptworker/artifacts.py#L111-L129
def guess_content_type_and_encoding(path): """Guess the content type of a path, using ``mimetypes``. Falls back to "application/binary" if no content type is found. Args: path (str): the path to guess the mimetype of Returns: str: the content type of the file """ for ext, content_type in _EXTENSION_TO_MIME_TYPE.items(): if path.endswith(ext): return content_type content_type, encoding = mimetypes.guess_type(path) content_type = content_type or "application/binary" return content_type, encoding
[ "def", "guess_content_type_and_encoding", "(", "path", ")", ":", "for", "ext", ",", "content_type", "in", "_EXTENSION_TO_MIME_TYPE", ".", "items", "(", ")", ":", "if", "path", ".", "endswith", "(", "ext", ")", ":", "return", "content_type", "content_type", ",", "encoding", "=", "mimetypes", ".", "guess_type", "(", "path", ")", "content_type", "=", "content_type", "or", "\"application/binary\"", "return", "content_type", ",", "encoding" ]
Guess the content type of a path, using ``mimetypes``. Falls back to "application/binary" if no content type is found. Args: path (str): the path to guess the mimetype of Returns: str: the content type of the file
[ "Guess", "the", "content", "type", "of", "a", "path", "using", "mimetypes", "." ]
python
train
29.315789
spencerahill/aospy
aospy/data_loader.py
https://github.com/spencerahill/aospy/blob/2f6e775b9b9956c54af117fdcdce2c87196afb6c/aospy/data_loader.py#L277-L324
def load_variable(self, var=None, start_date=None, end_date=None, time_offset=None, grid_attrs=None, **DataAttrs): """Load a DataArray for requested variable and time range. Automatically renames all grid attributes to match aospy conventions. Parameters ---------- var : Var aospy Var object start_date : datetime.datetime start date for interval end_date : datetime.datetime end date for interval time_offset : dict Option to add a time offset to the time coordinate to correct for incorrect metadata. grid_attrs : dict (optional) Overriding dictionary of grid attributes mapping aospy internal names to names of grid attributes used in a particular model. **DataAttrs Attributes needed to identify a unique set of files to load from Returns ------- da : DataArray DataArray for the specified variable, date range, and interval in """ file_set = self._generate_file_set(var=var, start_date=start_date, end_date=end_date, **DataAttrs) ds = _load_data_from_disk( file_set, self.preprocess_func, data_vars=self.data_vars, coords=self.coords, start_date=start_date, end_date=end_date, time_offset=time_offset, grid_attrs=grid_attrs, **DataAttrs ) if var.def_time: ds = _prep_time_data(ds) start_date = times.maybe_convert_to_index_date_type( ds.indexes[TIME_STR], start_date) end_date = times.maybe_convert_to_index_date_type( ds.indexes[TIME_STR], end_date) ds = set_grid_attrs_as_coords(ds) da = _sel_var(ds, var, self.upcast_float32) if var.def_time: da = self._maybe_apply_time_shift(da, time_offset, **DataAttrs) return times.sel_time(da, start_date, end_date).load() else: return da.load()
[ "def", "load_variable", "(", "self", ",", "var", "=", "None", ",", "start_date", "=", "None", ",", "end_date", "=", "None", ",", "time_offset", "=", "None", ",", "grid_attrs", "=", "None", ",", "*", "*", "DataAttrs", ")", ":", "file_set", "=", "self", ".", "_generate_file_set", "(", "var", "=", "var", ",", "start_date", "=", "start_date", ",", "end_date", "=", "end_date", ",", "*", "*", "DataAttrs", ")", "ds", "=", "_load_data_from_disk", "(", "file_set", ",", "self", ".", "preprocess_func", ",", "data_vars", "=", "self", ".", "data_vars", ",", "coords", "=", "self", ".", "coords", ",", "start_date", "=", "start_date", ",", "end_date", "=", "end_date", ",", "time_offset", "=", "time_offset", ",", "grid_attrs", "=", "grid_attrs", ",", "*", "*", "DataAttrs", ")", "if", "var", ".", "def_time", ":", "ds", "=", "_prep_time_data", "(", "ds", ")", "start_date", "=", "times", ".", "maybe_convert_to_index_date_type", "(", "ds", ".", "indexes", "[", "TIME_STR", "]", ",", "start_date", ")", "end_date", "=", "times", ".", "maybe_convert_to_index_date_type", "(", "ds", ".", "indexes", "[", "TIME_STR", "]", ",", "end_date", ")", "ds", "=", "set_grid_attrs_as_coords", "(", "ds", ")", "da", "=", "_sel_var", "(", "ds", ",", "var", ",", "self", ".", "upcast_float32", ")", "if", "var", ".", "def_time", ":", "da", "=", "self", ".", "_maybe_apply_time_shift", "(", "da", ",", "time_offset", ",", "*", "*", "DataAttrs", ")", "return", "times", ".", "sel_time", "(", "da", ",", "start_date", ",", "end_date", ")", ".", "load", "(", ")", "else", ":", "return", "da", ".", "load", "(", ")" ]
Load a DataArray for requested variable and time range. Automatically renames all grid attributes to match aospy conventions. Parameters ---------- var : Var aospy Var object start_date : datetime.datetime start date for interval end_date : datetime.datetime end date for interval time_offset : dict Option to add a time offset to the time coordinate to correct for incorrect metadata. grid_attrs : dict (optional) Overriding dictionary of grid attributes mapping aospy internal names to names of grid attributes used in a particular model. **DataAttrs Attributes needed to identify a unique set of files to load from Returns ------- da : DataArray DataArray for the specified variable, date range, and interval in
[ "Load", "a", "DataArray", "for", "requested", "variable", "and", "time", "range", "." ]
python
train
42.479167
yaz/yaz
examples/02_food.py
https://github.com/yaz/yaz/blob/48c842fe053bf9cd6446c4b33fb081c65339aa48/examples/02_food.py#L46-L48
def breakfast(self, message="Breakfast is ready", shout: bool = False): """Say something in the morning""" return self.helper.output(message, shout)
[ "def", "breakfast", "(", "self", ",", "message", "=", "\"Breakfast is ready\"", ",", "shout", ":", "bool", "=", "False", ")", ":", "return", "self", ".", "helper", ".", "output", "(", "message", ",", "shout", ")" ]
Say something in the morning
[ "Say", "something", "in", "the", "morning" ]
python
valid
54
manahl/arctic
arctic/store/_pandas_ndarray_store.py
https://github.com/manahl/arctic/blob/57e110b6e182dbab00e7e214dc26f7d9ec47c120/arctic/store/_pandas_ndarray_store.py#L74-L90
def _index_range(self, version, symbol, date_range=None, **kwargs): """ Given a version, read the segment_index and return the chunks associated with the date_range. As the segment index is (id -> last datetime) we need to take care in choosing the correct chunks. """ if date_range and 'segment_index' in version: # index is read-only but it's never written to index = np.frombuffer(decompress(version['segment_index']), dtype=INDEX_DTYPE) dtcol = self._datetime64_index(index) if dtcol and len(index): dts = index[dtcol] start, end = _start_end(date_range, dts) if start > dts[-1]: return -1, -1 idxstart = min(np.searchsorted(dts, start), len(dts) - 1) idxend = min(np.searchsorted(dts, end, side='right'), len(dts) - 1) return int(index['index'][idxstart]), int(index['index'][idxend] + 1) return super(PandasStore, self)._index_range(version, symbol, **kwargs)
[ "def", "_index_range", "(", "self", ",", "version", ",", "symbol", ",", "date_range", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "date_range", "and", "'segment_index'", "in", "version", ":", "# index is read-only but it's never written to", "index", "=", "np", ".", "frombuffer", "(", "decompress", "(", "version", "[", "'segment_index'", "]", ")", ",", "dtype", "=", "INDEX_DTYPE", ")", "dtcol", "=", "self", ".", "_datetime64_index", "(", "index", ")", "if", "dtcol", "and", "len", "(", "index", ")", ":", "dts", "=", "index", "[", "dtcol", "]", "start", ",", "end", "=", "_start_end", "(", "date_range", ",", "dts", ")", "if", "start", ">", "dts", "[", "-", "1", "]", ":", "return", "-", "1", ",", "-", "1", "idxstart", "=", "min", "(", "np", ".", "searchsorted", "(", "dts", ",", "start", ")", ",", "len", "(", "dts", ")", "-", "1", ")", "idxend", "=", "min", "(", "np", ".", "searchsorted", "(", "dts", ",", "end", ",", "side", "=", "'right'", ")", ",", "len", "(", "dts", ")", "-", "1", ")", "return", "int", "(", "index", "[", "'index'", "]", "[", "idxstart", "]", ")", ",", "int", "(", "index", "[", "'index'", "]", "[", "idxend", "]", "+", "1", ")", "return", "super", "(", "PandasStore", ",", "self", ")", ".", "_index_range", "(", "version", ",", "symbol", ",", "*", "*", "kwargs", ")" ]
Given a version, read the segment_index and return the chunks associated with the date_range. As the segment index is (id -> last datetime) we need to take care in choosing the correct chunks.
[ "Given", "a", "version", "read", "the", "segment_index", "and", "return", "the", "chunks", "associated", "with", "the", "date_range", ".", "As", "the", "segment", "index", "is", "(", "id", "-", ">", "last", "datetime", ")", "we", "need", "to", "take", "care", "in", "choosing", "the", "correct", "chunks", "." ]
python
train
61.941176
miguelgrinberg/Flask-MarrowMailer
flask_marrowmailer.py
https://github.com/miguelgrinberg/Flask-MarrowMailer/blob/daf1ac0745fb31db2f43f4f7dc24c6f50ae96764/flask_marrowmailer.py#L11-L25
def render_template(self, plain, rich = None, **context): '''Render the body of the message from a template. The plain body will be rendered from a template named ``plain`` or ``plain + '.txt'`` (in that order of preference). The rich body will be rendered from ``rich`` if given, or else from ``plain + '.html'``. If neither exists, then the message will have no rich body.''' self.plain = render_template([plain, plain + '.txt'], **context) if rich is not None: self.rich = render_template(rich, **context) else: try: self.rich = render_template(plain + '.html', **context) except TemplateNotFound: pass
[ "def", "render_template", "(", "self", ",", "plain", ",", "rich", "=", "None", ",", "*", "*", "context", ")", ":", "self", ".", "plain", "=", "render_template", "(", "[", "plain", ",", "plain", "+", "'.txt'", "]", ",", "*", "*", "context", ")", "if", "rich", "is", "not", "None", ":", "self", ".", "rich", "=", "render_template", "(", "rich", ",", "*", "*", "context", ")", "else", ":", "try", ":", "self", ".", "rich", "=", "render_template", "(", "plain", "+", "'.html'", ",", "*", "*", "context", ")", "except", "TemplateNotFound", ":", "pass" ]
Render the body of the message from a template. The plain body will be rendered from a template named ``plain`` or ``plain + '.txt'`` (in that order of preference). The rich body will be rendered from ``rich`` if given, or else from ``plain + '.html'``. If neither exists, then the message will have no rich body.
[ "Render", "the", "body", "of", "the", "message", "from", "a", "template", ".", "The", "plain", "body", "will", "be", "rendered", "from", "a", "template", "named", "plain", "or", "plain", "+", ".", "txt", "(", "in", "that", "order", "of", "preference", ")", ".", "The", "rich", "body", "will", "be", "rendered", "from", "rich", "if", "given", "or", "else", "from", "plain", "+", ".", "html", ".", "If", "neither", "exists", "then", "the", "message", "will", "have", "no", "rich", "body", "." ]
python
train
48.933333
praekelt/vumi-http-api
vumi_http_api/concurrency_limiter.py
https://github.com/praekelt/vumi-http-api/blob/0d7cf1cb71794c93272c19095cf8c37f4c250a59/vumi_http_api/concurrency_limiter.py#L143-L152
def stop(self, key): """ Stop a concurrent operation. This gets the concurrency limiter for the given key (creating it if necessary) and stops a concurrent operation on it. If the concurrency limiter is empty, it is deleted. """ self._get_limiter(key).stop() self._cleanup_limiter(key)
[ "def", "stop", "(", "self", ",", "key", ")", ":", "self", ".", "_get_limiter", "(", "key", ")", ".", "stop", "(", ")", "self", ".", "_cleanup_limiter", "(", "key", ")" ]
Stop a concurrent operation. This gets the concurrency limiter for the given key (creating it if necessary) and stops a concurrent operation on it. If the concurrency limiter is empty, it is deleted.
[ "Stop", "a", "concurrent", "operation", "." ]
python
train
34.1
fhs/pyhdf
pyhdf/SD.py
https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L2939-L2956
def setname(self, dim_name): """Set the dimension name. Args:: dim_name dimension name; setting 2 dimensions to the same name make the dimensions "shared"; in order to be shared, the dimesions must be deined similarly. Returns:: None C library equivalent : SDsetdimname """ status = _C.SDsetdimname(self._id, dim_name) _checkErr('setname', status, 'cannot execute')
[ "def", "setname", "(", "self", ",", "dim_name", ")", ":", "status", "=", "_C", ".", "SDsetdimname", "(", "self", ".", "_id", ",", "dim_name", ")", "_checkErr", "(", "'setname'", ",", "status", ",", "'cannot execute'", ")" ]
Set the dimension name. Args:: dim_name dimension name; setting 2 dimensions to the same name make the dimensions "shared"; in order to be shared, the dimesions must be deined similarly. Returns:: None C library equivalent : SDsetdimname
[ "Set", "the", "dimension", "name", "." ]
python
train
29.388889
eerimoq/bincopy
bincopy.py
https://github.com/eerimoq/bincopy/blob/5e02cd001c3e9b54729425db6bffad5f03e1beac/bincopy.py#L146-L156
def pack_ihex(type_, address, size, data): """Create a Intel HEX record of given data. """ line = '{:02X}{:04X}{:02X}'.format(size, address, type_) if data: line += binascii.hexlify(data).decode('ascii').upper() return ':{}{:02X}'.format(line, crc_ihex(line))
[ "def", "pack_ihex", "(", "type_", ",", "address", ",", "size", ",", "data", ")", ":", "line", "=", "'{:02X}{:04X}{:02X}'", ".", "format", "(", "size", ",", "address", ",", "type_", ")", "if", "data", ":", "line", "+=", "binascii", ".", "hexlify", "(", "data", ")", ".", "decode", "(", "'ascii'", ")", ".", "upper", "(", ")", "return", "':{}{:02X}'", ".", "format", "(", "line", ",", "crc_ihex", "(", "line", ")", ")" ]
Create a Intel HEX record of given data.
[ "Create", "a", "Intel", "HEX", "record", "of", "given", "data", "." ]
python
train
25.545455
fastai/fastai
old/fastai/nlp.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/old/fastai/nlp.py#L263-L279
def get_model(self, opt_fn, emb_sz, n_hid, n_layers, **kwargs): """ Method returns a RNN_Learner object, that wraps an instance of the RNN_Encoder module. Args: opt_fn (Optimizer): the torch optimizer function to use emb_sz (int): embedding size n_hid (int): number of hidden inputs n_layers (int): number of hidden layers kwargs: other arguments Returns: An instance of the RNN_Learner class. """ m = get_language_model(self.nt, emb_sz, n_hid, n_layers, self.pad_idx, **kwargs) model = SingleModel(to_gpu(m)) return RNN_Learner(self, model, opt_fn=opt_fn)
[ "def", "get_model", "(", "self", ",", "opt_fn", ",", "emb_sz", ",", "n_hid", ",", "n_layers", ",", "*", "*", "kwargs", ")", ":", "m", "=", "get_language_model", "(", "self", ".", "nt", ",", "emb_sz", ",", "n_hid", ",", "n_layers", ",", "self", ".", "pad_idx", ",", "*", "*", "kwargs", ")", "model", "=", "SingleModel", "(", "to_gpu", "(", "m", ")", ")", "return", "RNN_Learner", "(", "self", ",", "model", ",", "opt_fn", "=", "opt_fn", ")" ]
Method returns a RNN_Learner object, that wraps an instance of the RNN_Encoder module. Args: opt_fn (Optimizer): the torch optimizer function to use emb_sz (int): embedding size n_hid (int): number of hidden inputs n_layers (int): number of hidden layers kwargs: other arguments Returns: An instance of the RNN_Learner class.
[ "Method", "returns", "a", "RNN_Learner", "object", "that", "wraps", "an", "instance", "of", "the", "RNN_Encoder", "module", "." ]
python
train
39.470588
dossier/dossier.fc
python/dossier/fc/feature_tokens.py
https://github.com/dossier/dossier.fc/blob/3e969d0cb2592fc06afc1c849d2b22283450b5e2/python/dossier/fc/feature_tokens.py#L45-L55
def tokens(self, si, k): '''`si` is a stream item and `k` is a key in this feature. The purpose of this method is to dereference the token pointers with respect to the given stream item. That is, it translates each sequence of token pointers to a sequence of `Token`. ''' for tokens in self[k]: yield [si.body.sentences[tagid][sid].tokens[tid] for tagid, sid, tid in tokens]
[ "def", "tokens", "(", "self", ",", "si", ",", "k", ")", ":", "for", "tokens", "in", "self", "[", "k", "]", ":", "yield", "[", "si", ".", "body", ".", "sentences", "[", "tagid", "]", "[", "sid", "]", ".", "tokens", "[", "tid", "]", "for", "tagid", ",", "sid", ",", "tid", "in", "tokens", "]" ]
`si` is a stream item and `k` is a key in this feature. The purpose of this method is to dereference the token pointers with respect to the given stream item. That is, it translates each sequence of token pointers to a sequence of `Token`.
[ "si", "is", "a", "stream", "item", "and", "k", "is", "a", "key", "in", "this", "feature", ".", "The", "purpose", "of", "this", "method", "is", "to", "dereference", "the", "token", "pointers", "with", "respect", "to", "the", "given", "stream", "item", ".", "That", "is", "it", "translates", "each", "sequence", "of", "token", "pointers", "to", "a", "sequence", "of", "Token", "." ]
python
train
40.454545
quintusdias/glymur
glymur/codestream.py
https://github.com/quintusdias/glymur/blob/8b8fb091130fff00f1028dc82219e69e3f9baf6d/glymur/codestream.py#L451-L489
def _parse_plt_segment(self, fptr): """Parse the PLT segment. The packet headers are not parsed, i.e. they remain uninterpreted raw data buffers. Parameters ---------- fptr : file Open file object. Returns ------- PLTSegment The current PLT segment. """ offset = fptr.tell() - 2 read_buffer = fptr.read(3) length, zplt = struct.unpack('>HB', read_buffer) numbytes = length - 3 read_buffer = fptr.read(numbytes) iplt = np.frombuffer(read_buffer, dtype=np.uint8) packet_len = [] plen = 0 for byte in iplt: plen |= (byte & 0x7f) if byte & 0x80: # Continue by or-ing in the next byte. plen <<= 7 else: packet_len.append(plen) plen = 0 iplt = packet_len return PLTsegment(zplt, iplt, length, offset)
[ "def", "_parse_plt_segment", "(", "self", ",", "fptr", ")", ":", "offset", "=", "fptr", ".", "tell", "(", ")", "-", "2", "read_buffer", "=", "fptr", ".", "read", "(", "3", ")", "length", ",", "zplt", "=", "struct", ".", "unpack", "(", "'>HB'", ",", "read_buffer", ")", "numbytes", "=", "length", "-", "3", "read_buffer", "=", "fptr", ".", "read", "(", "numbytes", ")", "iplt", "=", "np", ".", "frombuffer", "(", "read_buffer", ",", "dtype", "=", "np", ".", "uint8", ")", "packet_len", "=", "[", "]", "plen", "=", "0", "for", "byte", "in", "iplt", ":", "plen", "|=", "(", "byte", "&", "0x7f", ")", "if", "byte", "&", "0x80", ":", "# Continue by or-ing in the next byte.", "plen", "<<=", "7", "else", ":", "packet_len", ".", "append", "(", "plen", ")", "plen", "=", "0", "iplt", "=", "packet_len", "return", "PLTsegment", "(", "zplt", ",", "iplt", ",", "length", ",", "offset", ")" ]
Parse the PLT segment. The packet headers are not parsed, i.e. they remain uninterpreted raw data buffers. Parameters ---------- fptr : file Open file object. Returns ------- PLTSegment The current PLT segment.
[ "Parse", "the", "PLT", "segment", "." ]
python
train
24.512821
flyingrub/scdl
scdl/scdl.py
https://github.com/flyingrub/scdl/blob/e833a22dd6676311b72fadd8a1c80f4a06acfad9/scdl/scdl.py#L591-L613
def in_download_archive(track): """ Returns True if a track_id exists in the download archive """ global arguments if not arguments['--download-archive']: return archive_filename = arguments.get('--download-archive') try: with open(archive_filename, 'a+', encoding='utf-8') as file: logger.debug('Contents of {0}:'.format(archive_filename)) file.seek(0) track_id = '{0}'.format(track['id']) for line in file: logger.debug('"'+line.strip()+'"') if line.strip() == track_id: return True except IOError as ioe: logger.error('Error trying to read download archive...') logger.debug(ioe) return False
[ "def", "in_download_archive", "(", "track", ")", ":", "global", "arguments", "if", "not", "arguments", "[", "'--download-archive'", "]", ":", "return", "archive_filename", "=", "arguments", ".", "get", "(", "'--download-archive'", ")", "try", ":", "with", "open", "(", "archive_filename", ",", "'a+'", ",", "encoding", "=", "'utf-8'", ")", "as", "file", ":", "logger", ".", "debug", "(", "'Contents of {0}:'", ".", "format", "(", "archive_filename", ")", ")", "file", ".", "seek", "(", "0", ")", "track_id", "=", "'{0}'", ".", "format", "(", "track", "[", "'id'", "]", ")", "for", "line", "in", "file", ":", "logger", ".", "debug", "(", "'\"'", "+", "line", ".", "strip", "(", ")", "+", "'\"'", ")", "if", "line", ".", "strip", "(", ")", "==", "track_id", ":", "return", "True", "except", "IOError", "as", "ioe", ":", "logger", ".", "error", "(", "'Error trying to read download archive...'", ")", "logger", ".", "debug", "(", "ioe", ")", "return", "False" ]
Returns True if a track_id exists in the download archive
[ "Returns", "True", "if", "a", "track_id", "exists", "in", "the", "download", "archive" ]
python
train
32.304348
google/grr
grr/server/grr_response_server/databases/mysql_hunts.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mysql_hunts.py#L401-L409
def CountHuntLogEntries(self, hunt_id, cursor=None): """Returns number of hunt log entries of a given hunt.""" hunt_id_int = db_utils.HuntIDToInt(hunt_id) query = ("SELECT COUNT(*) FROM flow_log_entries " "FORCE INDEX(flow_log_entries_by_hunt) " "WHERE hunt_id = %s AND flow_id = hunt_id") cursor.execute(query, [hunt_id_int]) return cursor.fetchone()[0]
[ "def", "CountHuntLogEntries", "(", "self", ",", "hunt_id", ",", "cursor", "=", "None", ")", ":", "hunt_id_int", "=", "db_utils", ".", "HuntIDToInt", "(", "hunt_id", ")", "query", "=", "(", "\"SELECT COUNT(*) FROM flow_log_entries \"", "\"FORCE INDEX(flow_log_entries_by_hunt) \"", "\"WHERE hunt_id = %s AND flow_id = hunt_id\"", ")", "cursor", ".", "execute", "(", "query", ",", "[", "hunt_id_int", "]", ")", "return", "cursor", ".", "fetchone", "(", ")", "[", "0", "]" ]
Returns number of hunt log entries of a given hunt.
[ "Returns", "number", "of", "hunt", "log", "entries", "of", "a", "given", "hunt", "." ]
python
train
43.666667
materialsproject/pymatgen
pymatgen/io/abinit/works.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/works.py#L903-L911
def plot_ebands(self, **kwargs): """ Plot the band structure. kwargs are passed to the plot method of :class:`ElectronBands`. Returns: `matplotlib` figure """ with self.nscf_task.open_gsr() as gsr: return gsr.ebands.plot(**kwargs)
[ "def", "plot_ebands", "(", "self", ",", "*", "*", "kwargs", ")", ":", "with", "self", ".", "nscf_task", ".", "open_gsr", "(", ")", "as", "gsr", ":", "return", "gsr", ".", "ebands", ".", "plot", "(", "*", "*", "kwargs", ")" ]
Plot the band structure. kwargs are passed to the plot method of :class:`ElectronBands`. Returns: `matplotlib` figure
[ "Plot", "the", "band", "structure", ".", "kwargs", "are", "passed", "to", "the", "plot", "method", "of", ":", "class", ":", "ElectronBands", "." ]
python
train
31.888889
wglass/lighthouse
lighthouse/haproxy/control.py
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/haproxy/control.py#L152-L160
def enable_node(self, service_name, node_name): """ Enables a given node name for the given service name via the "enable server" HAProxy command. """ logger.info("Enabling server %s/%s", service_name, node_name) return self.send_command( "enable server %s/%s" % (service_name, node_name) )
[ "def", "enable_node", "(", "self", ",", "service_name", ",", "node_name", ")", ":", "logger", ".", "info", "(", "\"Enabling server %s/%s\"", ",", "service_name", ",", "node_name", ")", "return", "self", ".", "send_command", "(", "\"enable server %s/%s\"", "%", "(", "service_name", ",", "node_name", ")", ")" ]
Enables a given node name for the given service name via the "enable server" HAProxy command.
[ "Enables", "a", "given", "node", "name", "for", "the", "given", "service", "name", "via", "the", "enable", "server", "HAProxy", "command", "." ]
python
train
38.777778
jessevdk/cldoc
cldoc/clang/cindex.py
https://github.com/jessevdk/cldoc/blob/fc7f59405c4a891b8367c80a700f5aa3c5c9230c/cldoc/clang/cindex.py#L1569-L1574
def linkage(self): """Return the linkage of this cursor.""" if not hasattr(self, '_linkage'): self._linkage = conf.lib.clang_getCursorLinkage(self) return LinkageKind.from_id(self._linkage)
[ "def", "linkage", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_linkage'", ")", ":", "self", ".", "_linkage", "=", "conf", ".", "lib", ".", "clang_getCursorLinkage", "(", "self", ")", "return", "LinkageKind", ".", "from_id", "(", "self", ".", "_linkage", ")" ]
Return the linkage of this cursor.
[ "Return", "the", "linkage", "of", "this", "cursor", "." ]
python
train
36.833333
ChrisBeaumont/smother
smother/python.py
https://github.com/ChrisBeaumont/smother/blob/65d1ea6ae0060d213b0dcbb983c5aa8e7fee07bb/smother/python.py#L210-L221
def context(self, line): """ Return the context for a given 1-offset line number. """ # XXX due to a limitation in Visitor, # non-python code after the last python code # in a file is not added to self.lines, so we # have to guard against IndexErrors. idx = line - 1 if idx >= len(self.lines): return self.prefix return self.lines[idx]
[ "def", "context", "(", "self", ",", "line", ")", ":", "# XXX due to a limitation in Visitor,", "# non-python code after the last python code", "# in a file is not added to self.lines, so we", "# have to guard against IndexErrors.", "idx", "=", "line", "-", "1", "if", "idx", ">=", "len", "(", "self", ".", "lines", ")", ":", "return", "self", ".", "prefix", "return", "self", ".", "lines", "[", "idx", "]" ]
Return the context for a given 1-offset line number.
[ "Return", "the", "context", "for", "a", "given", "1", "-", "offset", "line", "number", "." ]
python
train
34.666667
djgagne/hagelslag
hagelslag/processing/STObject.py
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/STObject.py#L187-L195
def max_size(self): """ Gets the largest size of the object over all timesteps. Returns: Maximum size of the object in pixels """ sizes = np.array([m.sum() for m in self.masks]) return sizes.max()
[ "def", "max_size", "(", "self", ")", ":", "sizes", "=", "np", ".", "array", "(", "[", "m", ".", "sum", "(", ")", "for", "m", "in", "self", ".", "masks", "]", ")", "return", "sizes", ".", "max", "(", ")" ]
Gets the largest size of the object over all timesteps. Returns: Maximum size of the object in pixels
[ "Gets", "the", "largest", "size", "of", "the", "object", "over", "all", "timesteps", ".", "Returns", ":", "Maximum", "size", "of", "the", "object", "in", "pixels" ]
python
train
28.555556
yolothreat/utilitybelt
utilitybelt/utilitybelt.py
https://github.com/yolothreat/utilitybelt/blob/55ac6c31f87963d5e97be0402a4343c84846d118/utilitybelt/utilitybelt.py#L202-L226
def ip_to_geojson(ipaddress, name="Point"): """Generate GeoJSON for given IP address""" geo = ip_to_geo(ipaddress) point = { "type": "FeatureCollection", "features": [ { "type": "Feature", "properties": { "name": name }, "geometry": { "type": "Point", "coordinates": [ geo["longitude"], geo["latitude"] ] } } ] } return point
[ "def", "ip_to_geojson", "(", "ipaddress", ",", "name", "=", "\"Point\"", ")", ":", "geo", "=", "ip_to_geo", "(", "ipaddress", ")", "point", "=", "{", "\"type\"", ":", "\"FeatureCollection\"", ",", "\"features\"", ":", "[", "{", "\"type\"", ":", "\"Feature\"", ",", "\"properties\"", ":", "{", "\"name\"", ":", "name", "}", ",", "\"geometry\"", ":", "{", "\"type\"", ":", "\"Point\"", ",", "\"coordinates\"", ":", "[", "geo", "[", "\"longitude\"", "]", ",", "geo", "[", "\"latitude\"", "]", "]", "}", "}", "]", "}", "return", "point" ]
Generate GeoJSON for given IP address
[ "Generate", "GeoJSON", "for", "given", "IP", "address" ]
python
train
23.2
h2oai/h2o-3
h2o-py/h2o/frame.py
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/frame.py#L3205-L3216
def match(self, table, nomatch=0): """ Make a vector of the positions of (first) matches of its first argument in its second. Only applicable to single-column categorical/string frames. :param List table: the list of items to match against :param int nomatch: value that should be returned when there is no match. :returns: a new H2OFrame containing for each cell from the source frame the index where the pattern ``table`` first occurs within that cell. """ return H2OFrame._expr(expr=ExprNode("match", self, table, nomatch, None))
[ "def", "match", "(", "self", ",", "table", ",", "nomatch", "=", "0", ")", ":", "return", "H2OFrame", ".", "_expr", "(", "expr", "=", "ExprNode", "(", "\"match\"", ",", "self", ",", "table", ",", "nomatch", ",", "None", ")", ")" ]
Make a vector of the positions of (first) matches of its first argument in its second. Only applicable to single-column categorical/string frames. :param List table: the list of items to match against :param int nomatch: value that should be returned when there is no match. :returns: a new H2OFrame containing for each cell from the source frame the index where the pattern ``table`` first occurs within that cell.
[ "Make", "a", "vector", "of", "the", "positions", "of", "(", "first", ")", "matches", "of", "its", "first", "argument", "in", "its", "second", "." ]
python
test
49.916667
googleapis/google-cloud-python
spanner/google/cloud/spanner_v1/instance.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/spanner/google/cloud/spanner_v1/instance.py#L164-L180
def copy(self): """Make a copy of this instance. Copies the local data stored as simple types and copies the client attached to this instance. :rtype: :class:`~google.cloud.spanner_v1.instance.Instance` :returns: A copy of the current instance. """ new_client = self._client.copy() return self.__class__( self.instance_id, new_client, self.configuration_name, node_count=self.node_count, display_name=self.display_name, )
[ "def", "copy", "(", "self", ")", ":", "new_client", "=", "self", ".", "_client", ".", "copy", "(", ")", "return", "self", ".", "__class__", "(", "self", ".", "instance_id", ",", "new_client", ",", "self", ".", "configuration_name", ",", "node_count", "=", "self", ".", "node_count", ",", "display_name", "=", "self", ".", "display_name", ",", ")" ]
Make a copy of this instance. Copies the local data stored as simple types and copies the client attached to this instance. :rtype: :class:`~google.cloud.spanner_v1.instance.Instance` :returns: A copy of the current instance.
[ "Make", "a", "copy", "of", "this", "instance", "." ]
python
train
31.705882
llazzaro/analyzerdam
analyzerdam/yahooDAM.py
https://github.com/llazzaro/analyzerdam/blob/c5bc7483dae23bd2e14bbf36147b7a43a0067bc0/analyzerdam/yahooDAM.py#L20-L26
def readQuotes(self, start, end): ''' read quotes from Yahoo Financial''' if self.symbol is None: LOG.debug('Symbol is None') return [] return self.__yf.getQuotes(self.symbol, start, end)
[ "def", "readQuotes", "(", "self", ",", "start", ",", "end", ")", ":", "if", "self", ".", "symbol", "is", "None", ":", "LOG", ".", "debug", "(", "'Symbol is None'", ")", "return", "[", "]", "return", "self", ".", "__yf", ".", "getQuotes", "(", "self", ".", "symbol", ",", "start", ",", "end", ")" ]
read quotes from Yahoo Financial
[ "read", "quotes", "from", "Yahoo", "Financial" ]
python
train
33.714286
briancappello/flask-unchained
flask_unchained/bundles/security/services/security_service.py
https://github.com/briancappello/flask-unchained/blob/4d536cb90e2cc4829c1c05f2c74d3e22901a1399/flask_unchained/bundles/security/services/security_service.py#L229-L247
def send_reset_password_instructions(self, user): """ Sends the reset password instructions email for the specified user. Sends signal `reset_password_instructions_sent`. :param user: The user to send the instructions to. """ token = self.security_utils_service.generate_reset_password_token(user) reset_link = url_for('security_controller.reset_password', token=token, _external=True) self.send_mail( _('flask_unchained.bundles.security:email_subject.reset_password_instructions'), to=user.email, template='security/email/reset_password_instructions.html', user=user, reset_link=reset_link) reset_password_instructions_sent.send(app._get_current_object(), user=user, token=token)
[ "def", "send_reset_password_instructions", "(", "self", ",", "user", ")", ":", "token", "=", "self", ".", "security_utils_service", ".", "generate_reset_password_token", "(", "user", ")", "reset_link", "=", "url_for", "(", "'security_controller.reset_password'", ",", "token", "=", "token", ",", "_external", "=", "True", ")", "self", ".", "send_mail", "(", "_", "(", "'flask_unchained.bundles.security:email_subject.reset_password_instructions'", ")", ",", "to", "=", "user", ".", "email", ",", "template", "=", "'security/email/reset_password_instructions.html'", ",", "user", "=", "user", ",", "reset_link", "=", "reset_link", ")", "reset_password_instructions_sent", ".", "send", "(", "app", ".", "_get_current_object", "(", ")", ",", "user", "=", "user", ",", "token", "=", "token", ")" ]
Sends the reset password instructions email for the specified user. Sends signal `reset_password_instructions_sent`. :param user: The user to send the instructions to.
[ "Sends", "the", "reset", "password", "instructions", "email", "for", "the", "specified", "user", "." ]
python
train
45.842105
Jajcus/pyxmpp2
pyxmpp2/roster.py
https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/roster.py#L237-L260
def as_xml(self, parent = None): """Make an XML element from self. :Parameters: - `parent`: Parent element :Types: - `parent`: :etree:`ElementTree.Element` """ if parent is not None: element = ElementTree.SubElement(parent, ITEM_TAG) else: element = ElementTree.Element(ITEM_TAG) element.set("jid", unicode(self.jid)) if self.name is not None: element.set("name", self.name) if self.subscription is not None: element.set("subscription", self.subscription) if self.ask: element.set("ask", self.ask) if self.approved: element.set("approved", "true") for group in self.groups: ElementTree.SubElement(element, GROUP_TAG).text = group return element
[ "def", "as_xml", "(", "self", ",", "parent", "=", "None", ")", ":", "if", "parent", "is", "not", "None", ":", "element", "=", "ElementTree", ".", "SubElement", "(", "parent", ",", "ITEM_TAG", ")", "else", ":", "element", "=", "ElementTree", ".", "Element", "(", "ITEM_TAG", ")", "element", ".", "set", "(", "\"jid\"", ",", "unicode", "(", "self", ".", "jid", ")", ")", "if", "self", ".", "name", "is", "not", "None", ":", "element", ".", "set", "(", "\"name\"", ",", "self", ".", "name", ")", "if", "self", ".", "subscription", "is", "not", "None", ":", "element", ".", "set", "(", "\"subscription\"", ",", "self", ".", "subscription", ")", "if", "self", ".", "ask", ":", "element", ".", "set", "(", "\"ask\"", ",", "self", ".", "ask", ")", "if", "self", ".", "approved", ":", "element", ".", "set", "(", "\"approved\"", ",", "\"true\"", ")", "for", "group", "in", "self", ".", "groups", ":", "ElementTree", ".", "SubElement", "(", "element", ",", "GROUP_TAG", ")", ".", "text", "=", "group", "return", "element" ]
Make an XML element from self. :Parameters: - `parent`: Parent element :Types: - `parent`: :etree:`ElementTree.Element`
[ "Make", "an", "XML", "element", "from", "self", "." ]
python
valid
34.75
quantopian/metautils
metautils/compat.py
https://github.com/quantopian/metautils/blob/10e11c5bd8bd7ded52b97261f61c3186607bd617/metautils/compat.py#L72-L96
def compose(*fs): """ Compose functions together in order: compose(f, g, h) = lambda n: f(g(h(n))) """ # Pull the iterator out into a tuple so we can call `composed` # more than once. rs = tuple(reversed(fs)) def composed(n): return reduce(lambda a, b: b(a), rs, n) # Attempt to make the function look pretty with # a fresh docstring and name. try: composed.__doc__ = 'lambda n: ' + _composed_doc(fs) except AttributeError: # One of our callables does not have a `__name__`, whatever. pass else: # We already know that for all `f` in `fs`, there exists `f.__name__` composed.__name__ = '_of_'.join(f.__name__ for f in fs) return composed
[ "def", "compose", "(", "*", "fs", ")", ":", "# Pull the iterator out into a tuple so we can call `composed`", "# more than once.", "rs", "=", "tuple", "(", "reversed", "(", "fs", ")", ")", "def", "composed", "(", "n", ")", ":", "return", "reduce", "(", "lambda", "a", ",", "b", ":", "b", "(", "a", ")", ",", "rs", ",", "n", ")", "# Attempt to make the function look pretty with", "# a fresh docstring and name.", "try", ":", "composed", ".", "__doc__", "=", "'lambda n: '", "+", "_composed_doc", "(", "fs", ")", "except", "AttributeError", ":", "# One of our callables does not have a `__name__`, whatever.", "pass", "else", ":", "# We already know that for all `f` in `fs`, there exists `f.__name__`", "composed", ".", "__name__", "=", "'_of_'", ".", "join", "(", "f", ".", "__name__", "for", "f", "in", "fs", ")", "return", "composed" ]
Compose functions together in order: compose(f, g, h) = lambda n: f(g(h(n)))
[ "Compose", "functions", "together", "in", "order", ":" ]
python
train
28.84
Parsely/birding
src/birding/search.py
https://github.com/Parsely/birding/blob/c7f6eee56424234e361b1a455595de202e744dac/src/birding/search.py#L8-L19
def search_manager_from_config(config, **default_init): """Get a `SearchManager` instance dynamically based on config. `config` is a dictionary containing ``class`` and ``init`` keys as defined in :mod:`birding.config`. """ manager_cls = import_name(config['class'], default_ns='birding.search') init = {} init.update(default_init) init.update(config['init']) manager = manager_cls(**init) return manager
[ "def", "search_manager_from_config", "(", "config", ",", "*", "*", "default_init", ")", ":", "manager_cls", "=", "import_name", "(", "config", "[", "'class'", "]", ",", "default_ns", "=", "'birding.search'", ")", "init", "=", "{", "}", "init", ".", "update", "(", "default_init", ")", "init", ".", "update", "(", "config", "[", "'init'", "]", ")", "manager", "=", "manager_cls", "(", "*", "*", "init", ")", "return", "manager" ]
Get a `SearchManager` instance dynamically based on config. `config` is a dictionary containing ``class`` and ``init`` keys as defined in :mod:`birding.config`.
[ "Get", "a", "SearchManager", "instance", "dynamically", "based", "on", "config", "." ]
python
train
36.166667
rkhleics/wagtailmenus
wagtailmenus/models/menus.py
https://github.com/rkhleics/wagtailmenus/blob/a41f240bed0d362e0d4dd4ef04a230f2b1827a93/wagtailmenus/models/menus.py#L451-L463
def _replace_with_specific_page(page, menu_item): """ If ``page`` is a vanilla ``Page` object, replace it with a 'specific' version of itself. Also update ``menu_item``, depending on whether it's a ``MenuItem`` object or a ``Page`` object. """ if type(page) is Page: page = page.specific if isinstance(menu_item, MenuItem): menu_item.link_page = page else: menu_item = page return page, menu_item
[ "def", "_replace_with_specific_page", "(", "page", ",", "menu_item", ")", ":", "if", "type", "(", "page", ")", "is", "Page", ":", "page", "=", "page", ".", "specific", "if", "isinstance", "(", "menu_item", ",", "MenuItem", ")", ":", "menu_item", ".", "link_page", "=", "page", "else", ":", "menu_item", "=", "page", "return", "page", ",", "menu_item" ]
If ``page`` is a vanilla ``Page` object, replace it with a 'specific' version of itself. Also update ``menu_item``, depending on whether it's a ``MenuItem`` object or a ``Page`` object.
[ "If", "page", "is", "a", "vanilla", "Page", "object", "replace", "it", "with", "a", "specific", "version", "of", "itself", ".", "Also", "update", "menu_item", "depending", "on", "whether", "it", "s", "a", "MenuItem", "object", "or", "a", "Page", "object", "." ]
python
train
39.076923
spyder-ide/spyder
spyder/plugins/console/widgets/shell.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/console/widgets/shell.py#L768-L777
def _key_question(self, text): """Action for '?'""" if self.get_current_line_to_cursor(): last_obj = self.get_last_obj() if last_obj and not last_obj.isdigit(): self.show_object_info(last_obj) self.insert_text(text) # In case calltip and completion are shown at the same time: if self.is_completion_widget_visible(): self.completion_text += '?'
[ "def", "_key_question", "(", "self", ",", "text", ")", ":", "if", "self", ".", "get_current_line_to_cursor", "(", ")", ":", "last_obj", "=", "self", ".", "get_last_obj", "(", ")", "if", "last_obj", "and", "not", "last_obj", ".", "isdigit", "(", ")", ":", "self", ".", "show_object_info", "(", "last_obj", ")", "self", ".", "insert_text", "(", "text", ")", "# In case calltip and completion are shown at the same time:\r", "if", "self", ".", "is_completion_widget_visible", "(", ")", ":", "self", ".", "completion_text", "+=", "'?'" ]
Action for '?
[ "Action", "for", "?" ]
python
train
43.6
RJT1990/pyflux
pyflux/inference/bbvi.py
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/inference/bbvi.py#L138-L147
def get_means_and_scales_from_q(self): """ Gets the mean and scales for normal approximating parameters """ means = np.zeros(len(self.q)) scale = np.zeros(len(self.q)) for i in range(len(self.q)): means[i] = self.q[i].mu0 scale[i] = self.q[i].sigma0 return means, scale
[ "def", "get_means_and_scales_from_q", "(", "self", ")", ":", "means", "=", "np", ".", "zeros", "(", "len", "(", "self", ".", "q", ")", ")", "scale", "=", "np", ".", "zeros", "(", "len", "(", "self", ".", "q", ")", ")", "for", "i", "in", "range", "(", "len", "(", "self", ".", "q", ")", ")", ":", "means", "[", "i", "]", "=", "self", ".", "q", "[", "i", "]", ".", "mu0", "scale", "[", "i", "]", "=", "self", ".", "q", "[", "i", "]", ".", "sigma0", "return", "means", ",", "scale" ]
Gets the mean and scales for normal approximating parameters
[ "Gets", "the", "mean", "and", "scales", "for", "normal", "approximating", "parameters" ]
python
train
34.1
project-rig/rig
rig/place_and_route/wrapper.py
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/wrapper.py#L171-L296
def wrapper(vertices_resources, vertices_applications, nets, net_keys, machine, constraints=[], reserve_monitor=True, align_sdram=True, place=default_place, place_kwargs={}, allocate=default_allocate, allocate_kwargs={}, route=default_route, route_kwargs={}, core_resource=Cores, sdram_resource=SDRAM): """Wrapper for core place-and-route tasks for the common case. At a high level this function essentially takes a set of vertices and nets and produces placements, memory allocations, routing tables and application loading information. .. warning:: This function is deprecated. New users should use :py:func:`.place_and_route_wrapper` along with :py:meth:`rig.machine_control.MachineController.get_system_info` in place of this function. The new wrapper automatically reserves cores and SDRAM already in use in the target machine, improving on the behaviour of this wrapper which blindly reserves certain ranges of resources presuming only core 0 (the monitor processor) is not idle. Parameters ---------- vertices_resources : {vertex: {resource: quantity, ...}, ...} A dictionary from vertex to the required resources for that vertex. This dictionary must include an entry for every vertex in the application. Resource requirements are specified by a dictionary `{resource: quantity, ...}` where `resource` is some resource identifier and `quantity` is a non-negative integer representing the quantity of that resource required. vertices_applications : {vertex: application, ...} A dictionary from vertices to the application binary to load onto cores associated with that vertex. Applications are given as a string containing the file name of the binary to load. nets : [:py:class:`~rig.netlist.Net`, ...] A list (in no particular order) defining the nets connecting vertices. net_keys : {:py:class:`~rig.netlist.Net`: (key, mask), ...} A dictionary from nets to (key, mask) tuples to be used in SpiNNaker routing tables for routes implementing this net. The key and mask should be given as 32-bit integers. machine : :py:class:`rig.place_and_route.Machine` A data structure which defines the resources available in the target SpiNNaker machine. constraints : [constraint, ...] A list of constraints on placement, allocation and routing. Available constraints are provided in the :py:mod:`rig.place_and_route.constraints` module. reserve_monitor : bool (Default: True) **Optional.** If True, reserve core zero since it will be used as the monitor processor using a :py:class:`rig.place_and_route.constraints.ReserveResourceConstraint`. align_sdram : bool (Default: True) **Optional.** If True, SDRAM allocations will be aligned to 4-byte addresses. Specifically, the supplied constraints will be augmented with an `AlignResourceConstraint(sdram_resource, 4)`. place : function (Default: :py:func:`rig.place_and_route.place`) **Optional.** Placement algorithm to use. place_kwargs : dict (Default: {}) **Optional.** Algorithm-specific arguments for the placer. allocate : function (Default: :py:func:`rig.place_and_route.allocate`) **Optional.** Allocation algorithm to use. allocate_kwargs : dict (Default: {}) **Optional.** Algorithm-specific arguments for the allocator. route : function (Default: :py:func:`rig.place_and_route.route`) **Optional.** Routing algorithm to use. route_kwargs : dict (Default: {}) **Optional.** Algorithm-specific arguments for the router. core_resource : resource (Default: :py:data:`~rig.place_and_route.Cores`) **Optional.** The resource identifier used for cores. sdram_resource : resource (Default: :py:data:`~rig.place_and_route.SDRAM`) **Optional.** The resource identifier used for SDRAM. Returns ------- placements : {vertex: (x, y), ...} A dictionary from vertices to the chip coordinate produced by placement. allocations : {vertex: {resource: slice, ...}, ...} A dictionary from vertices to the resources allocated to it. Resource allocations are dictionaries from resources to a :py:class:`slice` defining the range of the given resource type allocated to the vertex. These :py:class:`slice` objects have `start` <= `end` and `step` set to None. application_map : {application: {(x, y): set([core_num, ...]), ...}, ...} A dictionary from application to the set of cores it should be loaded onto. The set of cores is given as a dictionary from chip to sets of core numbers. routing_tables : {(x, y): \ [:py:class:`~rig.routing_table.RoutingTableEntry`, \ ...], ...} The generated routing tables. Provided as a dictionary from chip to a list of routing table entries. """ warnings.warn("rig.place_and_route.wrapper is deprecated " "use rig.place_and_route.place_and_route_wrapper instead in " "new applications.", DeprecationWarning) constraints = constraints[:] # Augment constraints with (historically) commonly used constraints if reserve_monitor: constraints.append( ReserveResourceConstraint(core_resource, slice(0, 1))) if align_sdram: constraints.append(AlignResourceConstraint(sdram_resource, 4)) # Place/Allocate/Route placements = place(vertices_resources, nets, machine, constraints, **place_kwargs) allocations = allocate(vertices_resources, nets, machine, constraints, placements, **allocate_kwargs) routes = route(vertices_resources, nets, machine, constraints, placements, allocations, core_resource, **route_kwargs) # Build data-structures ready to feed to the machine loading functions application_map = build_application_map(vertices_applications, placements, allocations, core_resource) # Build data-structures ready to feed to the machine loading functions from rig.place_and_route.utils import build_routing_tables routing_tables = build_routing_tables(routes, net_keys) return placements, allocations, application_map, routing_tables
[ "def", "wrapper", "(", "vertices_resources", ",", "vertices_applications", ",", "nets", ",", "net_keys", ",", "machine", ",", "constraints", "=", "[", "]", ",", "reserve_monitor", "=", "True", ",", "align_sdram", "=", "True", ",", "place", "=", "default_place", ",", "place_kwargs", "=", "{", "}", ",", "allocate", "=", "default_allocate", ",", "allocate_kwargs", "=", "{", "}", ",", "route", "=", "default_route", ",", "route_kwargs", "=", "{", "}", ",", "core_resource", "=", "Cores", ",", "sdram_resource", "=", "SDRAM", ")", ":", "warnings", ".", "warn", "(", "\"rig.place_and_route.wrapper is deprecated \"", "\"use rig.place_and_route.place_and_route_wrapper instead in \"", "\"new applications.\"", ",", "DeprecationWarning", ")", "constraints", "=", "constraints", "[", ":", "]", "# Augment constraints with (historically) commonly used constraints", "if", "reserve_monitor", ":", "constraints", ".", "append", "(", "ReserveResourceConstraint", "(", "core_resource", ",", "slice", "(", "0", ",", "1", ")", ")", ")", "if", "align_sdram", ":", "constraints", ".", "append", "(", "AlignResourceConstraint", "(", "sdram_resource", ",", "4", ")", ")", "# Place/Allocate/Route", "placements", "=", "place", "(", "vertices_resources", ",", "nets", ",", "machine", ",", "constraints", ",", "*", "*", "place_kwargs", ")", "allocations", "=", "allocate", "(", "vertices_resources", ",", "nets", ",", "machine", ",", "constraints", ",", "placements", ",", "*", "*", "allocate_kwargs", ")", "routes", "=", "route", "(", "vertices_resources", ",", "nets", ",", "machine", ",", "constraints", ",", "placements", ",", "allocations", ",", "core_resource", ",", "*", "*", "route_kwargs", ")", "# Build data-structures ready to feed to the machine loading functions", "application_map", "=", "build_application_map", "(", "vertices_applications", ",", "placements", ",", "allocations", ",", "core_resource", ")", "# Build data-structures ready to feed to the machine loading functions", "from", "rig", ".", "place_and_route", ".", "utils", "import", "build_routing_tables", "routing_tables", "=", "build_routing_tables", "(", "routes", ",", "net_keys", ")", "return", "placements", ",", "allocations", ",", "application_map", ",", "routing_tables" ]
Wrapper for core place-and-route tasks for the common case. At a high level this function essentially takes a set of vertices and nets and produces placements, memory allocations, routing tables and application loading information. .. warning:: This function is deprecated. New users should use :py:func:`.place_and_route_wrapper` along with :py:meth:`rig.machine_control.MachineController.get_system_info` in place of this function. The new wrapper automatically reserves cores and SDRAM already in use in the target machine, improving on the behaviour of this wrapper which blindly reserves certain ranges of resources presuming only core 0 (the monitor processor) is not idle. Parameters ---------- vertices_resources : {vertex: {resource: quantity, ...}, ...} A dictionary from vertex to the required resources for that vertex. This dictionary must include an entry for every vertex in the application. Resource requirements are specified by a dictionary `{resource: quantity, ...}` where `resource` is some resource identifier and `quantity` is a non-negative integer representing the quantity of that resource required. vertices_applications : {vertex: application, ...} A dictionary from vertices to the application binary to load onto cores associated with that vertex. Applications are given as a string containing the file name of the binary to load. nets : [:py:class:`~rig.netlist.Net`, ...] A list (in no particular order) defining the nets connecting vertices. net_keys : {:py:class:`~rig.netlist.Net`: (key, mask), ...} A dictionary from nets to (key, mask) tuples to be used in SpiNNaker routing tables for routes implementing this net. The key and mask should be given as 32-bit integers. machine : :py:class:`rig.place_and_route.Machine` A data structure which defines the resources available in the target SpiNNaker machine. constraints : [constraint, ...] A list of constraints on placement, allocation and routing. Available constraints are provided in the :py:mod:`rig.place_and_route.constraints` module. reserve_monitor : bool (Default: True) **Optional.** If True, reserve core zero since it will be used as the monitor processor using a :py:class:`rig.place_and_route.constraints.ReserveResourceConstraint`. align_sdram : bool (Default: True) **Optional.** If True, SDRAM allocations will be aligned to 4-byte addresses. Specifically, the supplied constraints will be augmented with an `AlignResourceConstraint(sdram_resource, 4)`. place : function (Default: :py:func:`rig.place_and_route.place`) **Optional.** Placement algorithm to use. place_kwargs : dict (Default: {}) **Optional.** Algorithm-specific arguments for the placer. allocate : function (Default: :py:func:`rig.place_and_route.allocate`) **Optional.** Allocation algorithm to use. allocate_kwargs : dict (Default: {}) **Optional.** Algorithm-specific arguments for the allocator. route : function (Default: :py:func:`rig.place_and_route.route`) **Optional.** Routing algorithm to use. route_kwargs : dict (Default: {}) **Optional.** Algorithm-specific arguments for the router. core_resource : resource (Default: :py:data:`~rig.place_and_route.Cores`) **Optional.** The resource identifier used for cores. sdram_resource : resource (Default: :py:data:`~rig.place_and_route.SDRAM`) **Optional.** The resource identifier used for SDRAM. Returns ------- placements : {vertex: (x, y), ...} A dictionary from vertices to the chip coordinate produced by placement. allocations : {vertex: {resource: slice, ...}, ...} A dictionary from vertices to the resources allocated to it. Resource allocations are dictionaries from resources to a :py:class:`slice` defining the range of the given resource type allocated to the vertex. These :py:class:`slice` objects have `start` <= `end` and `step` set to None. application_map : {application: {(x, y): set([core_num, ...]), ...}, ...} A dictionary from application to the set of cores it should be loaded onto. The set of cores is given as a dictionary from chip to sets of core numbers. routing_tables : {(x, y): \ [:py:class:`~rig.routing_table.RoutingTableEntry`, \ ...], ...} The generated routing tables. Provided as a dictionary from chip to a list of routing table entries.
[ "Wrapper", "for", "core", "place", "-", "and", "-", "route", "tasks", "for", "the", "common", "case", ".", "At", "a", "high", "level", "this", "function", "essentially", "takes", "a", "set", "of", "vertices", "and", "nets", "and", "produces", "placements", "memory", "allocations", "routing", "tables", "and", "application", "loading", "information", "." ]
python
train
51.634921
erocarrera/pefile
pefile.py
https://github.com/erocarrera/pefile/blob/8a78a2e251a3f2336c232bf411133927b479edf2/pefile.py#L5214-L5220
def get_word_from_offset(self, offset): """Return the word value at the given file offset. (little endian)""" if offset+2 > len(self.__data__): return None return self.get_word_from_data(self.__data__[offset:offset+2], 0)
[ "def", "get_word_from_offset", "(", "self", ",", "offset", ")", ":", "if", "offset", "+", "2", ">", "len", "(", "self", ".", "__data__", ")", ":", "return", "None", "return", "self", ".", "get_word_from_data", "(", "self", ".", "__data__", "[", "offset", ":", "offset", "+", "2", "]", ",", "0", ")" ]
Return the word value at the given file offset. (little endian)
[ "Return", "the", "word", "value", "at", "the", "given", "file", "offset", ".", "(", "little", "endian", ")" ]
python
train
36.142857
rocky/python3-trepan
trepan/cli.py
https://github.com/rocky/python3-trepan/blob/14e91bc0acce090d67be145b1ac040cab92ac5f3/trepan/cli.py#L41-L242
def main(dbg=None, sys_argv=list(sys.argv)): """Routine which gets run if we were invoked directly""" global __title__ # Save the original just for use in the restart that works via exec. orig_sys_argv = list(sys_argv) opts, dbg_opts, sys_argv = Moptions.process_options(__title__, VERSION, sys_argv) if opts.server is not None: if opts.server == 'tcp': connection_opts={'IO': 'TCP', 'PORT': opts.port} else: connection_opts={'IO': 'FIFO'} intf = Mserver.ServerInterface(connection_opts=connection_opts) dbg_opts['interface'] = intf if 'FIFO' == intf.server_type: print('Starting FIFO server for process %s.' % os.getpid()) elif 'TCP' == intf.server_type: print('Starting TCP server listening on port %s.' % intf.inout.PORT) pass elif opts.client: Mclient.run(opts, sys_argv) return dbg_opts['orig_sys_argv'] = orig_sys_argv if dbg is None: dbg = Mdebugger.Trepan(dbg_opts) dbg.core.add_ignore(main) pass Moptions._postprocess_options(dbg, opts) # process_options has munged sys.argv to remove any options that # options that belong to this debugger. The original options to # invoke the debugger and script are in global sys_argv if len(sys_argv) == 0: # No program given to debug. Set to go into a command loop # anyway mainpyfile = None else: mainpyfile = sys_argv[0] # Get script filename. if not osp.isfile(mainpyfile): mainpyfile=Mclifns.whence_file(mainpyfile) is_readable = Mfile.readable(mainpyfile) if is_readable is None: print("%s: Python script file '%s' does not exist" % (__title__, mainpyfile,)) sys.exit(1) elif not is_readable: print("%s: Can't read Python script file '%s'" % (__title__, mainpyfile, )) sys.exit(1) return if Mfile.is_compiled_py(mainpyfile): try: from xdis import load_module, PYTHON_VERSION, IS_PYPY (python_version, timestamp, magic_int, co, is_pypy, source_size) = load_module(mainpyfile, code_objects=None, fast_load=True) assert is_pypy == IS_PYPY assert python_version == PYTHON_VERSION, \ "bytecode is for version %s but we are version %s" % ( python_version, PYTHON_VERSION) # We should we check version magic_int py_file = co.co_filename if osp.isabs(py_file): try_file = py_file else: mainpydir = osp.dirname(mainpyfile) tag = sys.implementation.cache_tag dirnames = [osp.join(mainpydir, tag), mainpydir] + os.environ['PATH'].split(osp.pathsep) + ['.'] try_file = Mclifns.whence_file(py_file, dirnames) if osp.isfile(try_file): mainpyfile = try_file pass else: # Move onto the except branch raise IOError("Python file name embedded in code %s not found" % try_file) except IOError: try: from uncompyle6 import decompile_file except ImportError: print("%s: Compiled python file '%s', but uncompyle6 not found" % (__title__, mainpyfile), file=sys.stderr) sys.exit(1) return short_name = osp.basename(mainpyfile).strip('.pyc') fd = tempfile.NamedTemporaryFile(suffix='.py', prefix=short_name + "_", delete=False) old_write = fd.file.write def write_wrapper(*args, **kwargs): if isinstance(args[0], str): new_args = list(args) new_args[0] = args[0].encode('utf-8') old_write(*new_args, **kwargs) else: old_write(*args, **kwargs) fd.file.write = write_wrapper # from io import StringIO # linemap_io = StringIO() try: decompile_file(mainpyfile, fd.file, mapstream=fd) except: print("%s: error decompiling '%s'" % (__title__, mainpyfile), file=sys.stderr) sys.exit(1) return # # Get the line associations between the original and # # decompiled program # mapline = linemap_io.getvalue() # fd.write(mapline + "\n\n") # linemap = eval(mapline[3:]) mainpyfile = fd.name fd.close() # Since we are actually running the recreated source, # there is little no need to remap line numbers. # The mapping is given at the end of the file. # However we should consider adding this information # and original file name. print("%s: couldn't find Python source so we recreated it at '%s'" % (__title__, mainpyfile), file=sys.stderr) pass # If mainpyfile is an optimized Python script try to find and # use non-optimized alternative. mainpyfile_noopt = pyficache.pyc2py(mainpyfile) if mainpyfile != mainpyfile_noopt \ and Mfile.readable(mainpyfile_noopt): print("%s: Compiled Python script given and we can't use that." % __title__) print("%s: Substituting non-compiled name: %s" % ( __title__, mainpyfile_noopt,)) mainpyfile = mainpyfile_noopt pass # Replace trepan's dir with script's dir in front of # module search path. sys.path[0] = dbg.main_dirname = osp.dirname(mainpyfile) # XXX If a signal has been received we continue in the loop, otherwise # the loop exits for some reason. dbg.sig_received = False # if not mainpyfile: # print('For now, you need to specify a Python script name!') # sys.exit(2) # pass while True: # Run the debugged script over and over again until we get it # right. try: if dbg.program_sys_argv and mainpyfile: normal_termination = dbg.run_script(mainpyfile) if not normal_termination: break else: dbg.core.execution_status = 'No program' dbg.core.processor.process_commands() pass dbg.core.execution_status = 'Terminated' dbg.intf[-1].msg("The program finished - quit or restart") dbg.core.processor.process_commands() except Mexcept.DebuggerQuit: break except Mexcept.DebuggerRestart: dbg.core.execution_status = 'Restart requested' if dbg.program_sys_argv: sys.argv = list(dbg.program_sys_argv) part1 = ('Restarting %s with arguments:' % dbg.core.filename(mainpyfile)) args = ' '.join(dbg.program_sys_argv[1:]) dbg.intf[-1].msg( Mmisc.wrapped_lines(part1, args, dbg.settings['width'])) else: break except SystemExit: # In most cases SystemExit does not warrant a post-mortem session. break pass # Restore old sys.argv sys.argv = orig_sys_argv return
[ "def", "main", "(", "dbg", "=", "None", ",", "sys_argv", "=", "list", "(", "sys", ".", "argv", ")", ")", ":", "global", "__title__", "# Save the original just for use in the restart that works via exec.", "orig_sys_argv", "=", "list", "(", "sys_argv", ")", "opts", ",", "dbg_opts", ",", "sys_argv", "=", "Moptions", ".", "process_options", "(", "__title__", ",", "VERSION", ",", "sys_argv", ")", "if", "opts", ".", "server", "is", "not", "None", ":", "if", "opts", ".", "server", "==", "'tcp'", ":", "connection_opts", "=", "{", "'IO'", ":", "'TCP'", ",", "'PORT'", ":", "opts", ".", "port", "}", "else", ":", "connection_opts", "=", "{", "'IO'", ":", "'FIFO'", "}", "intf", "=", "Mserver", ".", "ServerInterface", "(", "connection_opts", "=", "connection_opts", ")", "dbg_opts", "[", "'interface'", "]", "=", "intf", "if", "'FIFO'", "==", "intf", ".", "server_type", ":", "print", "(", "'Starting FIFO server for process %s.'", "%", "os", ".", "getpid", "(", ")", ")", "elif", "'TCP'", "==", "intf", ".", "server_type", ":", "print", "(", "'Starting TCP server listening on port %s.'", "%", "intf", ".", "inout", ".", "PORT", ")", "pass", "elif", "opts", ".", "client", ":", "Mclient", ".", "run", "(", "opts", ",", "sys_argv", ")", "return", "dbg_opts", "[", "'orig_sys_argv'", "]", "=", "orig_sys_argv", "if", "dbg", "is", "None", ":", "dbg", "=", "Mdebugger", ".", "Trepan", "(", "dbg_opts", ")", "dbg", ".", "core", ".", "add_ignore", "(", "main", ")", "pass", "Moptions", ".", "_postprocess_options", "(", "dbg", ",", "opts", ")", "# process_options has munged sys.argv to remove any options that", "# options that belong to this debugger. The original options to", "# invoke the debugger and script are in global sys_argv", "if", "len", "(", "sys_argv", ")", "==", "0", ":", "# No program given to debug. Set to go into a command loop", "# anyway", "mainpyfile", "=", "None", "else", ":", "mainpyfile", "=", "sys_argv", "[", "0", "]", "# Get script filename.", "if", "not", "osp", ".", "isfile", "(", "mainpyfile", ")", ":", "mainpyfile", "=", "Mclifns", ".", "whence_file", "(", "mainpyfile", ")", "is_readable", "=", "Mfile", ".", "readable", "(", "mainpyfile", ")", "if", "is_readable", "is", "None", ":", "print", "(", "\"%s: Python script file '%s' does not exist\"", "%", "(", "__title__", ",", "mainpyfile", ",", ")", ")", "sys", ".", "exit", "(", "1", ")", "elif", "not", "is_readable", ":", "print", "(", "\"%s: Can't read Python script file '%s'\"", "%", "(", "__title__", ",", "mainpyfile", ",", ")", ")", "sys", ".", "exit", "(", "1", ")", "return", "if", "Mfile", ".", "is_compiled_py", "(", "mainpyfile", ")", ":", "try", ":", "from", "xdis", "import", "load_module", ",", "PYTHON_VERSION", ",", "IS_PYPY", "(", "python_version", ",", "timestamp", ",", "magic_int", ",", "co", ",", "is_pypy", ",", "source_size", ")", "=", "load_module", "(", "mainpyfile", ",", "code_objects", "=", "None", ",", "fast_load", "=", "True", ")", "assert", "is_pypy", "==", "IS_PYPY", "assert", "python_version", "==", "PYTHON_VERSION", ",", "\"bytecode is for version %s but we are version %s\"", "%", "(", "python_version", ",", "PYTHON_VERSION", ")", "# We should we check version magic_int", "py_file", "=", "co", ".", "co_filename", "if", "osp", ".", "isabs", "(", "py_file", ")", ":", "try_file", "=", "py_file", "else", ":", "mainpydir", "=", "osp", ".", "dirname", "(", "mainpyfile", ")", "tag", "=", "sys", ".", "implementation", ".", "cache_tag", "dirnames", "=", "[", "osp", ".", "join", "(", "mainpydir", ",", "tag", ")", ",", "mainpydir", "]", "+", "os", ".", "environ", "[", "'PATH'", "]", ".", "split", "(", "osp", ".", "pathsep", ")", "+", "[", "'.'", "]", "try_file", "=", "Mclifns", ".", "whence_file", "(", "py_file", ",", "dirnames", ")", "if", "osp", ".", "isfile", "(", "try_file", ")", ":", "mainpyfile", "=", "try_file", "pass", "else", ":", "# Move onto the except branch", "raise", "IOError", "(", "\"Python file name embedded in code %s not found\"", "%", "try_file", ")", "except", "IOError", ":", "try", ":", "from", "uncompyle6", "import", "decompile_file", "except", "ImportError", ":", "print", "(", "\"%s: Compiled python file '%s', but uncompyle6 not found\"", "%", "(", "__title__", ",", "mainpyfile", ")", ",", "file", "=", "sys", ".", "stderr", ")", "sys", ".", "exit", "(", "1", ")", "return", "short_name", "=", "osp", ".", "basename", "(", "mainpyfile", ")", ".", "strip", "(", "'.pyc'", ")", "fd", "=", "tempfile", ".", "NamedTemporaryFile", "(", "suffix", "=", "'.py'", ",", "prefix", "=", "short_name", "+", "\"_\"", ",", "delete", "=", "False", ")", "old_write", "=", "fd", ".", "file", ".", "write", "def", "write_wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "args", "[", "0", "]", ",", "str", ")", ":", "new_args", "=", "list", "(", "args", ")", "new_args", "[", "0", "]", "=", "args", "[", "0", "]", ".", "encode", "(", "'utf-8'", ")", "old_write", "(", "*", "new_args", ",", "*", "*", "kwargs", ")", "else", ":", "old_write", "(", "*", "args", ",", "*", "*", "kwargs", ")", "fd", ".", "file", ".", "write", "=", "write_wrapper", "# from io import StringIO", "# linemap_io = StringIO()", "try", ":", "decompile_file", "(", "mainpyfile", ",", "fd", ".", "file", ",", "mapstream", "=", "fd", ")", "except", ":", "print", "(", "\"%s: error decompiling '%s'\"", "%", "(", "__title__", ",", "mainpyfile", ")", ",", "file", "=", "sys", ".", "stderr", ")", "sys", ".", "exit", "(", "1", ")", "return", "# # Get the line associations between the original and", "# # decompiled program", "# mapline = linemap_io.getvalue()", "# fd.write(mapline + \"\\n\\n\")", "# linemap = eval(mapline[3:])", "mainpyfile", "=", "fd", ".", "name", "fd", ".", "close", "(", ")", "# Since we are actually running the recreated source,", "# there is little no need to remap line numbers.", "# The mapping is given at the end of the file.", "# However we should consider adding this information", "# and original file name.", "print", "(", "\"%s: couldn't find Python source so we recreated it at '%s'\"", "%", "(", "__title__", ",", "mainpyfile", ")", ",", "file", "=", "sys", ".", "stderr", ")", "pass", "# If mainpyfile is an optimized Python script try to find and", "# use non-optimized alternative.", "mainpyfile_noopt", "=", "pyficache", ".", "pyc2py", "(", "mainpyfile", ")", "if", "mainpyfile", "!=", "mainpyfile_noopt", "and", "Mfile", ".", "readable", "(", "mainpyfile_noopt", ")", ":", "print", "(", "\"%s: Compiled Python script given and we can't use that.\"", "%", "__title__", ")", "print", "(", "\"%s: Substituting non-compiled name: %s\"", "%", "(", "__title__", ",", "mainpyfile_noopt", ",", ")", ")", "mainpyfile", "=", "mainpyfile_noopt", "pass", "# Replace trepan's dir with script's dir in front of", "# module search path.", "sys", ".", "path", "[", "0", "]", "=", "dbg", ".", "main_dirname", "=", "osp", ".", "dirname", "(", "mainpyfile", ")", "# XXX If a signal has been received we continue in the loop, otherwise", "# the loop exits for some reason.", "dbg", ".", "sig_received", "=", "False", "# if not mainpyfile:", "# print('For now, you need to specify a Python script name!')", "# sys.exit(2)", "# pass", "while", "True", ":", "# Run the debugged script over and over again until we get it", "# right.", "try", ":", "if", "dbg", ".", "program_sys_argv", "and", "mainpyfile", ":", "normal_termination", "=", "dbg", ".", "run_script", "(", "mainpyfile", ")", "if", "not", "normal_termination", ":", "break", "else", ":", "dbg", ".", "core", ".", "execution_status", "=", "'No program'", "dbg", ".", "core", ".", "processor", ".", "process_commands", "(", ")", "pass", "dbg", ".", "core", ".", "execution_status", "=", "'Terminated'", "dbg", ".", "intf", "[", "-", "1", "]", ".", "msg", "(", "\"The program finished - quit or restart\"", ")", "dbg", ".", "core", ".", "processor", ".", "process_commands", "(", ")", "except", "Mexcept", ".", "DebuggerQuit", ":", "break", "except", "Mexcept", ".", "DebuggerRestart", ":", "dbg", ".", "core", ".", "execution_status", "=", "'Restart requested'", "if", "dbg", ".", "program_sys_argv", ":", "sys", ".", "argv", "=", "list", "(", "dbg", ".", "program_sys_argv", ")", "part1", "=", "(", "'Restarting %s with arguments:'", "%", "dbg", ".", "core", ".", "filename", "(", "mainpyfile", ")", ")", "args", "=", "' '", ".", "join", "(", "dbg", ".", "program_sys_argv", "[", "1", ":", "]", ")", "dbg", ".", "intf", "[", "-", "1", "]", ".", "msg", "(", "Mmisc", ".", "wrapped_lines", "(", "part1", ",", "args", ",", "dbg", ".", "settings", "[", "'width'", "]", ")", ")", "else", ":", "break", "except", "SystemExit", ":", "# In most cases SystemExit does not warrant a post-mortem session.", "break", "pass", "# Restore old sys.argv", "sys", ".", "argv", "=", "orig_sys_argv", "return" ]
Routine which gets run if we were invoked directly
[ "Routine", "which", "gets", "run", "if", "we", "were", "invoked", "directly" ]
python
test
39.539604
mandiant/ioc_writer
ioc_writer/managers/upgrade_10.py
https://github.com/mandiant/ioc_writer/blob/712247f3a10bdc2584fa18ac909fc763f71df21a/ioc_writer/managers/upgrade_10.py#L79-L94
def parse(self, fn): """ Parses a file into a lxml.etree structure with namespaces remove. This tree is added to self.iocs. :param fn: File to parse. :return: """ ioc_xml = xmlutils.read_xml_no_ns(fn) if not ioc_xml: return False root = ioc_xml.getroot() iocid = root.get('id', None) if not iocid: return False self.iocs[iocid] = ioc_xml return True
[ "def", "parse", "(", "self", ",", "fn", ")", ":", "ioc_xml", "=", "xmlutils", ".", "read_xml_no_ns", "(", "fn", ")", "if", "not", "ioc_xml", ":", "return", "False", "root", "=", "ioc_xml", ".", "getroot", "(", ")", "iocid", "=", "root", ".", "get", "(", "'id'", ",", "None", ")", "if", "not", "iocid", ":", "return", "False", "self", ".", "iocs", "[", "iocid", "]", "=", "ioc_xml", "return", "True" ]
Parses a file into a lxml.etree structure with namespaces remove. This tree is added to self.iocs. :param fn: File to parse. :return:
[ "Parses", "a", "file", "into", "a", "lxml", ".", "etree", "structure", "with", "namespaces", "remove", ".", "This", "tree", "is", "added", "to", "self", ".", "iocs", "." ]
python
train
28.5
materialsproject/pymatgen
pymatgen/apps/borg/queen.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/apps/borg/queen.py#L64-L83
def parallel_assimilate(self, rootpath): """ Assimilate the entire subdirectory structure in rootpath. """ logger.info('Scanning for valid paths...') valid_paths = [] for (parent, subdirs, files) in os.walk(rootpath): valid_paths.extend(self._drone.get_valid_paths((parent, subdirs, files))) manager = Manager() data = manager.list() status = manager.dict() status['count'] = 0 status['total'] = len(valid_paths) logger.info('{} valid paths found.'.format(len(valid_paths))) p = Pool(self._num_drones) p.map(order_assimilation, ((path, self._drone, data, status) for path in valid_paths)) for d in data: self._data.append(json.loads(d, cls=MontyDecoder))
[ "def", "parallel_assimilate", "(", "self", ",", "rootpath", ")", ":", "logger", ".", "info", "(", "'Scanning for valid paths...'", ")", "valid_paths", "=", "[", "]", "for", "(", "parent", ",", "subdirs", ",", "files", ")", "in", "os", ".", "walk", "(", "rootpath", ")", ":", "valid_paths", ".", "extend", "(", "self", ".", "_drone", ".", "get_valid_paths", "(", "(", "parent", ",", "subdirs", ",", "files", ")", ")", ")", "manager", "=", "Manager", "(", ")", "data", "=", "manager", ".", "list", "(", ")", "status", "=", "manager", ".", "dict", "(", ")", "status", "[", "'count'", "]", "=", "0", "status", "[", "'total'", "]", "=", "len", "(", "valid_paths", ")", "logger", ".", "info", "(", "'{} valid paths found.'", ".", "format", "(", "len", "(", "valid_paths", ")", ")", ")", "p", "=", "Pool", "(", "self", ".", "_num_drones", ")", "p", ".", "map", "(", "order_assimilation", ",", "(", "(", "path", ",", "self", ".", "_drone", ",", "data", ",", "status", ")", "for", "path", "in", "valid_paths", ")", ")", "for", "d", "in", "data", ":", "self", ".", "_data", ".", "append", "(", "json", ".", "loads", "(", "d", ",", "cls", "=", "MontyDecoder", ")", ")" ]
Assimilate the entire subdirectory structure in rootpath.
[ "Assimilate", "the", "entire", "subdirectory", "structure", "in", "rootpath", "." ]
python
train
43.7
jim-easterbrook/pywws
src/pywws/conversions.py
https://github.com/jim-easterbrook/pywws/blob/4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c/src/pywws/conversions.py#L212-L238
def usaheatindex(temp, humidity, dew=None): """Calculate Heat Index as per USA National Weather Service Standards See http://en.wikipedia.org/wiki/Heat_index, formula 1. The formula is not valid for T < 26.7C, Dew Point < 12C, or RH < 40% """ if temp is None or humidity is None: return None if dew is None: dew = dew_point(temp, humidity) if temp < 26.7 or humidity < 40 or dew < 12.0: return temp T = (temp * 1.8) + 32.0 R = humidity c_1 = -42.379 c_2 = 2.04901523 c_3 = 10.14333127 c_4 = -0.22475541 c_5 = -0.00683783 c_6 = -0.05481717 c_7 = 0.00122874 c_8 = 0.00085282 c_9 = -0.00000199 return ((c_1 + (c_2 * T) + (c_3 * R) + (c_4 * T * R) + (c_5 * (T**2)) + (c_6 * (R**2)) + (c_7 * (T**2) * R) + (c_8 * T * (R**2)) + (c_9 * (T**2) * (R**2))) - 32.0) / 1.8
[ "def", "usaheatindex", "(", "temp", ",", "humidity", ",", "dew", "=", "None", ")", ":", "if", "temp", "is", "None", "or", "humidity", "is", "None", ":", "return", "None", "if", "dew", "is", "None", ":", "dew", "=", "dew_point", "(", "temp", ",", "humidity", ")", "if", "temp", "<", "26.7", "or", "humidity", "<", "40", "or", "dew", "<", "12.0", ":", "return", "temp", "T", "=", "(", "temp", "*", "1.8", ")", "+", "32.0", "R", "=", "humidity", "c_1", "=", "-", "42.379", "c_2", "=", "2.04901523", "c_3", "=", "10.14333127", "c_4", "=", "-", "0.22475541", "c_5", "=", "-", "0.00683783", "c_6", "=", "-", "0.05481717", "c_7", "=", "0.00122874", "c_8", "=", "0.00085282", "c_9", "=", "-", "0.00000199", "return", "(", "(", "c_1", "+", "(", "c_2", "*", "T", ")", "+", "(", "c_3", "*", "R", ")", "+", "(", "c_4", "*", "T", "*", "R", ")", "+", "(", "c_5", "*", "(", "T", "**", "2", ")", ")", "+", "(", "c_6", "*", "(", "R", "**", "2", ")", ")", "+", "(", "c_7", "*", "(", "T", "**", "2", ")", "*", "R", ")", "+", "(", "c_8", "*", "T", "*", "(", "R", "**", "2", ")", ")", "+", "(", "c_9", "*", "(", "T", "**", "2", ")", "*", "(", "R", "**", "2", ")", ")", ")", "-", "32.0", ")", "/", "1.8" ]
Calculate Heat Index as per USA National Weather Service Standards See http://en.wikipedia.org/wiki/Heat_index, formula 1. The formula is not valid for T < 26.7C, Dew Point < 12C, or RH < 40%
[ "Calculate", "Heat", "Index", "as", "per", "USA", "National", "Weather", "Service", "Standards" ]
python
train
31.925926
saltstack/salt
salt/modules/junos.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/junos.py#L1367-L1478
def get_table(table, table_file, path=None, target=None, key=None, key_items=None, filters=None, template_args=None): ''' Retrieve data from a Junos device using Tables/Views table (required) Name of PyEZ Table table_file (required) YAML file that has the table specified in table parameter path: Path of location of the YAML file. defaults to op directory in jnpr.junos.op target: if command need to run on FPC, can specify fpc target key: To overwrite key provided in YAML key_items: To select only given key items filters: To select only filter for the dictionary from columns template_args: key/value pair which should render Jinja template command CLI Example: .. code-block:: bash salt 'device_name' junos.get_table ''' conn = __proxy__['junos.conn']() ret = {} ret['out'] = True ret['hostname'] = conn._hostname ret['tablename'] = table get_kvargs = {} if target is not None: get_kvargs['target'] = target if key is not None: get_kvargs['key'] = key if key_items is not None: get_kvargs['key_items'] = key_items if filters is not None: get_kvargs['filters'] = filters if template_args is not None and isinstance(template_args, dict): get_kvargs['args'] = template_args pyez_tables_path = os.path.dirname(os.path.abspath(tables_dir.__file__)) try: if path is not None: file_loc = glob.glob(os.path.join(path, '{}'.format(table_file))) else: file_loc = glob.glob(os.path.join(pyez_tables_path, '{}'.format(table_file))) if len(file_loc) == 1: file_name = file_loc[0] else: ret['message'] = 'Given table file {} cannot be located'.format(table_file) ret['out'] = False return ret try: with salt.utils.files.fopen(file_name) as fp: ret['table'] = yaml.load(fp.read(), Loader=yamlordereddictloader.Loader) globals().update(FactoryLoader().load(ret['table'])) except IOError as err: ret['message'] = 'Uncaught exception during YAML Load - please ' \ 'report: {0}'.format(six.text_type(err)) ret['out'] = False return ret try: data = globals()[table](conn) data.get(**get_kvargs) except KeyError as err: ret['message'] = 'Uncaught exception during get API call - please ' \ 'report: {0}'.format(six.text_type(err)) ret['out'] = False return ret except ConnectClosedError: ret['message'] = 'Got ConnectClosedError exception. Connection lost ' \ 'with {}'.format(conn) ret['out'] = False return ret ret['reply'] = json.loads(data.to_json()) if data.__class__.__bases__[0] == OpTable: # Sets key value if not present in YAML. To be used by returner if ret['table'][table].get('key') is None: ret['table'][table]['key'] = data.ITEM_NAME_XPATH # If key is provided from salt state file. if key is not None: ret['table'][table]['key'] = data.KEY else: if target is not None: ret['table'][table]['target'] = data.TARGET if key is not None: ret['table'][table]['key'] = data.KEY if key_items is not None: ret['table'][table]['key_items'] = data.KEY_ITEMS if template_args is not None: ret['table'][table]['args'] = data.CMD_ARGS ret['table'][table]['command'] = data.GET_CMD except Exception as err: ret['message'] = 'Uncaught exception - please report: {0}'.format( str(err)) traceback.print_exc() ret['out'] = False return ret return ret
[ "def", "get_table", "(", "table", ",", "table_file", ",", "path", "=", "None", ",", "target", "=", "None", ",", "key", "=", "None", ",", "key_items", "=", "None", ",", "filters", "=", "None", ",", "template_args", "=", "None", ")", ":", "conn", "=", "__proxy__", "[", "'junos.conn'", "]", "(", ")", "ret", "=", "{", "}", "ret", "[", "'out'", "]", "=", "True", "ret", "[", "'hostname'", "]", "=", "conn", ".", "_hostname", "ret", "[", "'tablename'", "]", "=", "table", "get_kvargs", "=", "{", "}", "if", "target", "is", "not", "None", ":", "get_kvargs", "[", "'target'", "]", "=", "target", "if", "key", "is", "not", "None", ":", "get_kvargs", "[", "'key'", "]", "=", "key", "if", "key_items", "is", "not", "None", ":", "get_kvargs", "[", "'key_items'", "]", "=", "key_items", "if", "filters", "is", "not", "None", ":", "get_kvargs", "[", "'filters'", "]", "=", "filters", "if", "template_args", "is", "not", "None", "and", "isinstance", "(", "template_args", ",", "dict", ")", ":", "get_kvargs", "[", "'args'", "]", "=", "template_args", "pyez_tables_path", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "tables_dir", ".", "__file__", ")", ")", "try", ":", "if", "path", "is", "not", "None", ":", "file_loc", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "path", ",", "'{}'", ".", "format", "(", "table_file", ")", ")", ")", "else", ":", "file_loc", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "pyez_tables_path", ",", "'{}'", ".", "format", "(", "table_file", ")", ")", ")", "if", "len", "(", "file_loc", ")", "==", "1", ":", "file_name", "=", "file_loc", "[", "0", "]", "else", ":", "ret", "[", "'message'", "]", "=", "'Given table file {} cannot be located'", ".", "format", "(", "table_file", ")", "ret", "[", "'out'", "]", "=", "False", "return", "ret", "try", ":", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "file_name", ")", "as", "fp", ":", "ret", "[", "'table'", "]", "=", "yaml", ".", "load", "(", "fp", ".", "read", "(", ")", ",", "Loader", "=", "yamlordereddictloader", ".", "Loader", ")", "globals", "(", ")", ".", "update", "(", "FactoryLoader", "(", ")", ".", "load", "(", "ret", "[", "'table'", "]", ")", ")", "except", "IOError", "as", "err", ":", "ret", "[", "'message'", "]", "=", "'Uncaught exception during YAML Load - please '", "'report: {0}'", ".", "format", "(", "six", ".", "text_type", "(", "err", ")", ")", "ret", "[", "'out'", "]", "=", "False", "return", "ret", "try", ":", "data", "=", "globals", "(", ")", "[", "table", "]", "(", "conn", ")", "data", ".", "get", "(", "*", "*", "get_kvargs", ")", "except", "KeyError", "as", "err", ":", "ret", "[", "'message'", "]", "=", "'Uncaught exception during get API call - please '", "'report: {0}'", ".", "format", "(", "six", ".", "text_type", "(", "err", ")", ")", "ret", "[", "'out'", "]", "=", "False", "return", "ret", "except", "ConnectClosedError", ":", "ret", "[", "'message'", "]", "=", "'Got ConnectClosedError exception. Connection lost '", "'with {}'", ".", "format", "(", "conn", ")", "ret", "[", "'out'", "]", "=", "False", "return", "ret", "ret", "[", "'reply'", "]", "=", "json", ".", "loads", "(", "data", ".", "to_json", "(", ")", ")", "if", "data", ".", "__class__", ".", "__bases__", "[", "0", "]", "==", "OpTable", ":", "# Sets key value if not present in YAML. To be used by returner", "if", "ret", "[", "'table'", "]", "[", "table", "]", ".", "get", "(", "'key'", ")", "is", "None", ":", "ret", "[", "'table'", "]", "[", "table", "]", "[", "'key'", "]", "=", "data", ".", "ITEM_NAME_XPATH", "# If key is provided from salt state file.", "if", "key", "is", "not", "None", ":", "ret", "[", "'table'", "]", "[", "table", "]", "[", "'key'", "]", "=", "data", ".", "KEY", "else", ":", "if", "target", "is", "not", "None", ":", "ret", "[", "'table'", "]", "[", "table", "]", "[", "'target'", "]", "=", "data", ".", "TARGET", "if", "key", "is", "not", "None", ":", "ret", "[", "'table'", "]", "[", "table", "]", "[", "'key'", "]", "=", "data", ".", "KEY", "if", "key_items", "is", "not", "None", ":", "ret", "[", "'table'", "]", "[", "table", "]", "[", "'key_items'", "]", "=", "data", ".", "KEY_ITEMS", "if", "template_args", "is", "not", "None", ":", "ret", "[", "'table'", "]", "[", "table", "]", "[", "'args'", "]", "=", "data", ".", "CMD_ARGS", "ret", "[", "'table'", "]", "[", "table", "]", "[", "'command'", "]", "=", "data", ".", "GET_CMD", "except", "Exception", "as", "err", ":", "ret", "[", "'message'", "]", "=", "'Uncaught exception - please report: {0}'", ".", "format", "(", "str", "(", "err", ")", ")", "traceback", ".", "print_exc", "(", ")", "ret", "[", "'out'", "]", "=", "False", "return", "ret", "return", "ret" ]
Retrieve data from a Junos device using Tables/Views table (required) Name of PyEZ Table table_file (required) YAML file that has the table specified in table parameter path: Path of location of the YAML file. defaults to op directory in jnpr.junos.op target: if command need to run on FPC, can specify fpc target key: To overwrite key provided in YAML key_items: To select only given key items filters: To select only filter for the dictionary from columns template_args: key/value pair which should render Jinja template command CLI Example: .. code-block:: bash salt 'device_name' junos.get_table
[ "Retrieve", "data", "from", "a", "Junos", "device", "using", "Tables", "/", "Views" ]
python
train
35.723214
ray-project/ray
python/ray/experimental/state.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/experimental/state.py#L747-L765
def cluster_resources(self): """Get the current total cluster resources. Note that this information can grow stale as nodes are added to or removed from the cluster. Returns: A dictionary mapping resource name to the total quantity of that resource in the cluster. """ resources = defaultdict(int) clients = self.client_table() for client in clients: # Only count resources from live clients. if client["IsInsertion"]: for key, value in client["Resources"].items(): resources[key] += value return dict(resources)
[ "def", "cluster_resources", "(", "self", ")", ":", "resources", "=", "defaultdict", "(", "int", ")", "clients", "=", "self", ".", "client_table", "(", ")", "for", "client", "in", "clients", ":", "# Only count resources from live clients.", "if", "client", "[", "\"IsInsertion\"", "]", ":", "for", "key", ",", "value", "in", "client", "[", "\"Resources\"", "]", ".", "items", "(", ")", ":", "resources", "[", "key", "]", "+=", "value", "return", "dict", "(", "resources", ")" ]
Get the current total cluster resources. Note that this information can grow stale as nodes are added to or removed from the cluster. Returns: A dictionary mapping resource name to the total quantity of that resource in the cluster.
[ "Get", "the", "current", "total", "cluster", "resources", "." ]
python
train
34.578947
ronaldguillen/wave
wave/decorators.py
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/decorators.py#L18-L76
def rest_verbs(http_method_names=None): """ Decorator that converts a function-based view into an RestView subclass. Takes a list of allowed methods for the view as an argument. """ http_method_names = ['GET'] if (http_method_names is None) else http_method_names def decorator(func): WrappedRestView = type( six.PY3 and 'WrappedRestView' or b'WrappedRestView', (RestView,), {'__doc__': func.__doc__} ) # Note, the above allows us to set the docstring. # It is the equivalent of: # # class WrappedRestView(RestView): # pass # WrappedRestView.__doc__ = func.doc <--- Not possible to do this # api_view applied without (method_names) assert not(isinstance(http_method_names, types.FunctionType)), \ '@api_view missing list of allowed HTTP methods' # api_view applied with eg. string instead of list of strings assert isinstance(http_method_names, (list, tuple)), \ '@api_view expected a list of strings, received %s' % type(http_method_names).__name__ allowed_methods = set(http_method_names) | set(('options',)) WrappedRestView.http_method_names = [method.lower() for method in allowed_methods] def handler(self, *args, **kwargs): return func(*args, **kwargs) for method in http_method_names: setattr(WrappedRestView, method.lower(), handler) WrappedRestView.__name__ = func.__name__ WrappedRestView.renderer_classes = getattr(func, 'renderer_classes', RestView.renderer_classes) WrappedRestView.parser_classes = getattr(func, 'parser_classes', RestView.parser_classes) WrappedRestView.authentication_classes = getattr(func, 'authentication_classes', RestView.authentication_classes) WrappedRestView.throttle_classes = getattr(func, 'throttle_classes', RestView.throttle_classes) WrappedRestView.permission_classes = getattr(func, 'permission_classes', RestView.permission_classes) return WrappedRestView.as_view() return decorator
[ "def", "rest_verbs", "(", "http_method_names", "=", "None", ")", ":", "http_method_names", "=", "[", "'GET'", "]", "if", "(", "http_method_names", "is", "None", ")", "else", "http_method_names", "def", "decorator", "(", "func", ")", ":", "WrappedRestView", "=", "type", "(", "six", ".", "PY3", "and", "'WrappedRestView'", "or", "b'WrappedRestView'", ",", "(", "RestView", ",", ")", ",", "{", "'__doc__'", ":", "func", ".", "__doc__", "}", ")", "# Note, the above allows us to set the docstring.", "# It is the equivalent of:", "#", "# class WrappedRestView(RestView):", "# pass", "# WrappedRestView.__doc__ = func.doc <--- Not possible to do this", "# api_view applied without (method_names)", "assert", "not", "(", "isinstance", "(", "http_method_names", ",", "types", ".", "FunctionType", ")", ")", ",", "'@api_view missing list of allowed HTTP methods'", "# api_view applied with eg. string instead of list of strings", "assert", "isinstance", "(", "http_method_names", ",", "(", "list", ",", "tuple", ")", ")", ",", "'@api_view expected a list of strings, received %s'", "%", "type", "(", "http_method_names", ")", ".", "__name__", "allowed_methods", "=", "set", "(", "http_method_names", ")", "|", "set", "(", "(", "'options'", ",", ")", ")", "WrappedRestView", ".", "http_method_names", "=", "[", "method", ".", "lower", "(", ")", "for", "method", "in", "allowed_methods", "]", "def", "handler", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "for", "method", "in", "http_method_names", ":", "setattr", "(", "WrappedRestView", ",", "method", ".", "lower", "(", ")", ",", "handler", ")", "WrappedRestView", ".", "__name__", "=", "func", ".", "__name__", "WrappedRestView", ".", "renderer_classes", "=", "getattr", "(", "func", ",", "'renderer_classes'", ",", "RestView", ".", "renderer_classes", ")", "WrappedRestView", ".", "parser_classes", "=", "getattr", "(", "func", ",", "'parser_classes'", ",", "RestView", ".", "parser_classes", ")", "WrappedRestView", ".", "authentication_classes", "=", "getattr", "(", "func", ",", "'authentication_classes'", ",", "RestView", ".", "authentication_classes", ")", "WrappedRestView", ".", "throttle_classes", "=", "getattr", "(", "func", ",", "'throttle_classes'", ",", "RestView", ".", "throttle_classes", ")", "WrappedRestView", ".", "permission_classes", "=", "getattr", "(", "func", ",", "'permission_classes'", ",", "RestView", ".", "permission_classes", ")", "return", "WrappedRestView", ".", "as_view", "(", ")", "return", "decorator" ]
Decorator that converts a function-based view into an RestView subclass. Takes a list of allowed methods for the view as an argument.
[ "Decorator", "that", "converts", "a", "function", "-", "based", "view", "into", "an", "RestView", "subclass", ".", "Takes", "a", "list", "of", "allowed", "methods", "for", "the", "view", "as", "an", "argument", "." ]
python
train
39.983051
MIT-LCP/wfdb-python
wfdb/processing/evaluate.py
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/processing/evaluate.py#L264-L331
def plot(self, sig_style='', title=None, figsize=None, return_fig=False): """ Plot the comparison of two sets of annotations, possibly overlaid on their original signal. Parameters ---------- sig_style : str, optional The matplotlib style of the signal title : str, optional The title of the plot figsize: tuple, optional Tuple pair specifying the width, and height of the figure. It is the'figsize' argument passed into matplotlib.pyplot's `figure` function. return_fig : bool, optional Whether the figure is to be returned as an output argument. """ fig = plt.figure(figsize=figsize) ax = fig.add_subplot(1, 1, 1) legend = ['Signal', 'Matched Reference Annotations (%d/%d)' % (self.tp, self.n_ref), 'Unmatched Reference Annotations (%d/%d)' % (self.fn, self.n_ref), 'Matched Test Annotations (%d/%d)' % (self.tp, self.n_test), 'Unmatched Test Annotations (%d/%d)' % (self.fp, self.n_test) ] # Plot the signal if any if self.signal is not None: ax.plot(self.signal, sig_style) # Plot reference annotations ax.plot(self.matched_ref_sample, self.signal[self.matched_ref_sample], 'ko') ax.plot(self.unmatched_ref_sample, self.signal[self.unmatched_ref_sample], 'ko', fillstyle='none') # Plot test annotations ax.plot(self.matched_test_sample, self.signal[self.matched_test_sample], 'g+') ax.plot(self.unmatched_test_sample, self.signal[self.unmatched_test_sample], 'rx') ax.legend(legend) # Just plot annotations else: # Plot reference annotations ax.plot(self.matched_ref_sample, np.ones(self.tp), 'ko') ax.plot(self.unmatched_ref_sample, np.ones(self.fn), 'ko', fillstyle='none') # Plot test annotations ax.plot(self.matched_test_sample, 0.5 * np.ones(self.tp), 'g+') ax.plot(self.unmatched_test_sample, 0.5 * np.ones(self.fp), 'rx') ax.legend(legend[1:]) if title: ax.set_title(title) ax.set_xlabel('time/sample') fig.show() if return_fig: return fig, ax
[ "def", "plot", "(", "self", ",", "sig_style", "=", "''", ",", "title", "=", "None", ",", "figsize", "=", "None", ",", "return_fig", "=", "False", ")", ":", "fig", "=", "plt", ".", "figure", "(", "figsize", "=", "figsize", ")", "ax", "=", "fig", ".", "add_subplot", "(", "1", ",", "1", ",", "1", ")", "legend", "=", "[", "'Signal'", ",", "'Matched Reference Annotations (%d/%d)'", "%", "(", "self", ".", "tp", ",", "self", ".", "n_ref", ")", ",", "'Unmatched Reference Annotations (%d/%d)'", "%", "(", "self", ".", "fn", ",", "self", ".", "n_ref", ")", ",", "'Matched Test Annotations (%d/%d)'", "%", "(", "self", ".", "tp", ",", "self", ".", "n_test", ")", ",", "'Unmatched Test Annotations (%d/%d)'", "%", "(", "self", ".", "fp", ",", "self", ".", "n_test", ")", "]", "# Plot the signal if any", "if", "self", ".", "signal", "is", "not", "None", ":", "ax", ".", "plot", "(", "self", ".", "signal", ",", "sig_style", ")", "# Plot reference annotations", "ax", ".", "plot", "(", "self", ".", "matched_ref_sample", ",", "self", ".", "signal", "[", "self", ".", "matched_ref_sample", "]", ",", "'ko'", ")", "ax", ".", "plot", "(", "self", ".", "unmatched_ref_sample", ",", "self", ".", "signal", "[", "self", ".", "unmatched_ref_sample", "]", ",", "'ko'", ",", "fillstyle", "=", "'none'", ")", "# Plot test annotations", "ax", ".", "plot", "(", "self", ".", "matched_test_sample", ",", "self", ".", "signal", "[", "self", ".", "matched_test_sample", "]", ",", "'g+'", ")", "ax", ".", "plot", "(", "self", ".", "unmatched_test_sample", ",", "self", ".", "signal", "[", "self", ".", "unmatched_test_sample", "]", ",", "'rx'", ")", "ax", ".", "legend", "(", "legend", ")", "# Just plot annotations", "else", ":", "# Plot reference annotations", "ax", ".", "plot", "(", "self", ".", "matched_ref_sample", ",", "np", ".", "ones", "(", "self", ".", "tp", ")", ",", "'ko'", ")", "ax", ".", "plot", "(", "self", ".", "unmatched_ref_sample", ",", "np", ".", "ones", "(", "self", ".", "fn", ")", ",", "'ko'", ",", "fillstyle", "=", "'none'", ")", "# Plot test annotations", "ax", ".", "plot", "(", "self", ".", "matched_test_sample", ",", "0.5", "*", "np", ".", "ones", "(", "self", ".", "tp", ")", ",", "'g+'", ")", "ax", ".", "plot", "(", "self", ".", "unmatched_test_sample", ",", "0.5", "*", "np", ".", "ones", "(", "self", ".", "fp", ")", ",", "'rx'", ")", "ax", ".", "legend", "(", "legend", "[", "1", ":", "]", ")", "if", "title", ":", "ax", ".", "set_title", "(", "title", ")", "ax", ".", "set_xlabel", "(", "'time/sample'", ")", "fig", ".", "show", "(", ")", "if", "return_fig", ":", "return", "fig", ",", "ax" ]
Plot the comparison of two sets of annotations, possibly overlaid on their original signal. Parameters ---------- sig_style : str, optional The matplotlib style of the signal title : str, optional The title of the plot figsize: tuple, optional Tuple pair specifying the width, and height of the figure. It is the'figsize' argument passed into matplotlib.pyplot's `figure` function. return_fig : bool, optional Whether the figure is to be returned as an output argument.
[ "Plot", "the", "comparison", "of", "two", "sets", "of", "annotations", "possibly", "overlaid", "on", "their", "original", "signal", "." ]
python
train
36.044118
kgori/treeCl
treeCl/utils/misc.py
https://github.com/kgori/treeCl/blob/fed624b3db1c19cc07175ca04e3eda6905a8d305/treeCl/utils/misc.py#L160-L172
def alignment_to_partials(alignment, missing_data=None): """ Generate a partials dictionary from a treeCl.Alignment """ partials_dict = {} for (name, sequence) in alignment.get_sequences(): datatype = 'dna' if alignment.is_dna() else 'protein' partials_dict[name] = seq_to_partials(sequence, datatype) if missing_data is not None: l = len(alignment) for name in missing_data: if name not in partials_dict: partials_dict[name] = seq_to_partials('-'*l, datatype) return partials_dict
[ "def", "alignment_to_partials", "(", "alignment", ",", "missing_data", "=", "None", ")", ":", "partials_dict", "=", "{", "}", "for", "(", "name", ",", "sequence", ")", "in", "alignment", ".", "get_sequences", "(", ")", ":", "datatype", "=", "'dna'", "if", "alignment", ".", "is_dna", "(", ")", "else", "'protein'", "partials_dict", "[", "name", "]", "=", "seq_to_partials", "(", "sequence", ",", "datatype", ")", "if", "missing_data", "is", "not", "None", ":", "l", "=", "len", "(", "alignment", ")", "for", "name", "in", "missing_data", ":", "if", "name", "not", "in", "partials_dict", ":", "partials_dict", "[", "name", "]", "=", "seq_to_partials", "(", "'-'", "*", "l", ",", "datatype", ")", "return", "partials_dict" ]
Generate a partials dictionary from a treeCl.Alignment
[ "Generate", "a", "partials", "dictionary", "from", "a", "treeCl", ".", "Alignment" ]
python
train
42.307692
sbg/sevenbridges-python
sevenbridges/models/task.py
https://github.com/sbg/sevenbridges-python/blob/f62640d1018d959f0b686f2dbe5e183085336607/sevenbridges/models/task.py#L407-L424
def wait(self=None, period=10, callback=None, *args, **kwargs): """Wait until task is complete :param period: Time in seconds between reloads :param callback: Function to call after the task has finished, arguments and keyword arguments can be provided for it :return: Return value of provided callback function or None if a callback function was not provided """ while self.status not in [ TaskStatus.COMPLETED, TaskStatus.FAILED, TaskStatus.ABORTED ]: self.reload() time.sleep(period) if callback: return callback(*args, **kwargs)
[ "def", "wait", "(", "self", "=", "None", ",", "period", "=", "10", ",", "callback", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "while", "self", ".", "status", "not", "in", "[", "TaskStatus", ".", "COMPLETED", ",", "TaskStatus", ".", "FAILED", ",", "TaskStatus", ".", "ABORTED", "]", ":", "self", ".", "reload", "(", ")", "time", ".", "sleep", "(", "period", ")", "if", "callback", ":", "return", "callback", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Wait until task is complete :param period: Time in seconds between reloads :param callback: Function to call after the task has finished, arguments and keyword arguments can be provided for it :return: Return value of provided callback function or None if a callback function was not provided
[ "Wait", "until", "task", "is", "complete", ":", "param", "period", ":", "Time", "in", "seconds", "between", "reloads", ":", "param", "callback", ":", "Function", "to", "call", "after", "the", "task", "has", "finished", "arguments", "and", "keyword", "arguments", "can", "be", "provided", "for", "it", ":", "return", ":", "Return", "value", "of", "provided", "callback", "function", "or", "None", "if", "a", "callback", "function", "was", "not", "provided" ]
python
train
37.555556
AmesCornish/buttersink
buttersink/Store.py
https://github.com/AmesCornish/buttersink/blob/5cc37e30d9f8071fcf3497dca8b8a91b910321ea/buttersink/Store.py#L438-L441
def writeInfo(self, stream): """ Write information about diffs into a file stream for use later. """ for (fromUUID, size) in Diff.theKnownSizes[self.uuid].iteritems(): self.writeInfoLine(stream, fromUUID, size)
[ "def", "writeInfo", "(", "self", ",", "stream", ")", ":", "for", "(", "fromUUID", ",", "size", ")", "in", "Diff", ".", "theKnownSizes", "[", "self", ".", "uuid", "]", ".", "iteritems", "(", ")", ":", "self", ".", "writeInfoLine", "(", "stream", ",", "fromUUID", ",", "size", ")" ]
Write information about diffs into a file stream for use later.
[ "Write", "information", "about", "diffs", "into", "a", "file", "stream", "for", "use", "later", "." ]
python
train
58.75
lsbardel/python-stdnet
stdnet/odm/struct.py
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/struct.py#L588-L592
def pop_back(self): '''Remove the last element from the :class:`Sequence`.''' backend = self.backend return backend.execute(backend.structure(self).pop_back(), self.value_pickler.loads)
[ "def", "pop_back", "(", "self", ")", ":", "backend", "=", "self", ".", "backend", "return", "backend", ".", "execute", "(", "backend", ".", "structure", "(", "self", ")", ".", "pop_back", "(", ")", ",", "self", ".", "value_pickler", ".", "loads", ")" ]
Remove the last element from the :class:`Sequence`.
[ "Remove", "the", "last", "element", "from", "the", ":", "class", ":", "Sequence", "." ]
python
train
48
DataONEorg/d1_python
lib_common/src/d1_common/bagit.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/bagit.py#L183-L189
def _add_path(dir_name, payload_info_list): """Add a key with the path to each payload_info_dict.""" for payload_info_dict in payload_info_list: file_name = payload_info_dict['filename'] or payload_info_dict['pid'] payload_info_dict['path'] = d1_common.utils.filesystem.gen_safe_path( dir_name, 'data', file_name )
[ "def", "_add_path", "(", "dir_name", ",", "payload_info_list", ")", ":", "for", "payload_info_dict", "in", "payload_info_list", ":", "file_name", "=", "payload_info_dict", "[", "'filename'", "]", "or", "payload_info_dict", "[", "'pid'", "]", "payload_info_dict", "[", "'path'", "]", "=", "d1_common", ".", "utils", ".", "filesystem", ".", "gen_safe_path", "(", "dir_name", ",", "'data'", ",", "file_name", ")" ]
Add a key with the path to each payload_info_dict.
[ "Add", "a", "key", "with", "the", "path", "to", "each", "payload_info_dict", "." ]
python
train
50.285714
JelteF/PyLaTeX
pylatex/utils.py
https://github.com/JelteF/PyLaTeX/blob/62d9d9912ce8445e6629cdbcb80ad86143a1ed23/pylatex/utils.py#L68-L100
def escape_latex(s): r"""Escape characters that are special in latex. Args ---- s : `str`, `NoEscape` or anything that can be converted to string The string to be escaped. If this is not a string, it will be converted to a string using `str`. If it is a `NoEscape` string, it will pass through unchanged. Returns ------- NoEscape The string, with special characters in latex escaped. Examples -------- >>> escape_latex("Total cost: $30,000") 'Total cost: \$30,000' >>> escape_latex("Issue #5 occurs in 30% of all cases") 'Issue \#5 occurs in 30\% of all cases' >>> print(escape_latex("Total cost: $30,000")) References ---------- * http://tex.stackexchange.com/a/34586/43228 * http://stackoverflow.com/a/16264094/2570866 """ if isinstance(s, NoEscape): return s return NoEscape(''.join(_latex_special_chars.get(c, c) for c in str(s)))
[ "def", "escape_latex", "(", "s", ")", ":", "if", "isinstance", "(", "s", ",", "NoEscape", ")", ":", "return", "s", "return", "NoEscape", "(", "''", ".", "join", "(", "_latex_special_chars", ".", "get", "(", "c", ",", "c", ")", "for", "c", "in", "str", "(", "s", ")", ")", ")" ]
r"""Escape characters that are special in latex. Args ---- s : `str`, `NoEscape` or anything that can be converted to string The string to be escaped. If this is not a string, it will be converted to a string using `str`. If it is a `NoEscape` string, it will pass through unchanged. Returns ------- NoEscape The string, with special characters in latex escaped. Examples -------- >>> escape_latex("Total cost: $30,000") 'Total cost: \$30,000' >>> escape_latex("Issue #5 occurs in 30% of all cases") 'Issue \#5 occurs in 30\% of all cases' >>> print(escape_latex("Total cost: $30,000")) References ---------- * http://tex.stackexchange.com/a/34586/43228 * http://stackoverflow.com/a/16264094/2570866
[ "r", "Escape", "characters", "that", "are", "special", "in", "latex", "." ]
python
train
28.484848
bukun/TorCMS
torcms/handlers/post_handler.py
https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/post_handler.py#L503-L525
def fetch_post_data(self): ''' fetch post accessed data. post_data, and ext_dic. ''' post_data = {} ext_dic = {} for key in self.request.arguments: if key.startswith('ext_') or key.startswith('tag_'): ext_dic[key] = self.get_argument(key) else: post_data[key] = self.get_arguments(key)[0] post_data['user_name'] = self.userinfo.user_name post_data['kind'] = self.kind # append external infor. if 'tags' in post_data: ext_dic['def_tag_arr'] = [x.strip() for x in post_data['tags'].strip().strip(',').split(',')] ext_dic = dict(ext_dic, **self.ext_post_data(postdata=post_data)) return (post_data, ext_dic)
[ "def", "fetch_post_data", "(", "self", ")", ":", "post_data", "=", "{", "}", "ext_dic", "=", "{", "}", "for", "key", "in", "self", ".", "request", ".", "arguments", ":", "if", "key", ".", "startswith", "(", "'ext_'", ")", "or", "key", ".", "startswith", "(", "'tag_'", ")", ":", "ext_dic", "[", "key", "]", "=", "self", ".", "get_argument", "(", "key", ")", "else", ":", "post_data", "[", "key", "]", "=", "self", ".", "get_arguments", "(", "key", ")", "[", "0", "]", "post_data", "[", "'user_name'", "]", "=", "self", ".", "userinfo", ".", "user_name", "post_data", "[", "'kind'", "]", "=", "self", ".", "kind", "# append external infor.", "if", "'tags'", "in", "post_data", ":", "ext_dic", "[", "'def_tag_arr'", "]", "=", "[", "x", ".", "strip", "(", ")", "for", "x", "in", "post_data", "[", "'tags'", "]", ".", "strip", "(", ")", ".", "strip", "(", "','", ")", ".", "split", "(", "','", ")", "]", "ext_dic", "=", "dict", "(", "ext_dic", ",", "*", "*", "self", ".", "ext_post_data", "(", "postdata", "=", "post_data", ")", ")", "return", "(", "post_data", ",", "ext_dic", ")" ]
fetch post accessed data. post_data, and ext_dic.
[ "fetch", "post", "accessed", "data", ".", "post_data", "and", "ext_dic", "." ]
python
train
34.26087
YosaiProject/yosai
yosai/core/subject/subject.py
https://github.com/YosaiProject/yosai/blob/7f96aa6b837ceae9bf3d7387cd7e35f5ab032575/yosai/core/subject/subject.py#L566-L583
def pop_identity(self): """ :returns: SimpleIdentifierCollection """ popped = None stack = self.get_run_as_identifiers_stack() if (stack): popped = stack.pop() if (stack): # persist the changed stack to the session session = self.get_session() session.set_internal_attribute(self.run_as_identifiers_session_key, stack) else: # stack is empty, remove it from the session: self.clear_run_as_identities() return popped
[ "def", "pop_identity", "(", "self", ")", ":", "popped", "=", "None", "stack", "=", "self", ".", "get_run_as_identifiers_stack", "(", ")", "if", "(", "stack", ")", ":", "popped", "=", "stack", ".", "pop", "(", ")", "if", "(", "stack", ")", ":", "# persist the changed stack to the session", "session", "=", "self", ".", "get_session", "(", ")", "session", ".", "set_internal_attribute", "(", "self", ".", "run_as_identifiers_session_key", ",", "stack", ")", "else", ":", "# stack is empty, remove it from the session:", "self", ".", "clear_run_as_identities", "(", ")", "return", "popped" ]
:returns: SimpleIdentifierCollection
[ ":", "returns", ":", "SimpleIdentifierCollection" ]
python
train
31.777778
tjcsl/ion
intranet/apps/notifications/views.py
https://github.com/tjcsl/ion/blob/5d722b0725d572039bb0929fd5715a4070c82c72/intranet/apps/notifications/views.py#L53-L91
def chrome_getdata_view(request): """Get the data of the last notification sent to the current user. This is needed because Chrome, as of version 44, doesn't support sending a data payload to a notification. Thus, information on what the notification is actually for must be manually fetched. """ data = {} if request.user.is_authenticated: # authenticated session notifs = GCMNotification.objects.filter(sent_to__user=request.user).order_by("-time") if notifs.count() > 0: notif = notifs.first() ndata = notif.data if "title" in ndata and "text" in ndata: data = { "title": ndata['title'] if 'title' in ndata else '', "text": ndata['text'] if 'text' in ndata else '', "url": ndata['url'] if 'url' in ndata else '' } else: schedule_chk = chrome_getdata_check(request) if schedule_chk: data = schedule_chk else: schedule_chk = chrome_getdata_check(request) if schedule_chk: data = schedule_chk else: return HttpResponse("null", content_type="text/json") else: schedule_chk = chrome_getdata_check(request) if schedule_chk: data = schedule_chk else: data = {"title": "Check Intranet", "text": "You have a new notification that couldn't be loaded right now."} j = json.dumps(data) return HttpResponse(j, content_type="text/json")
[ "def", "chrome_getdata_view", "(", "request", ")", ":", "data", "=", "{", "}", "if", "request", ".", "user", ".", "is_authenticated", ":", "# authenticated session", "notifs", "=", "GCMNotification", ".", "objects", ".", "filter", "(", "sent_to__user", "=", "request", ".", "user", ")", ".", "order_by", "(", "\"-time\"", ")", "if", "notifs", ".", "count", "(", ")", ">", "0", ":", "notif", "=", "notifs", ".", "first", "(", ")", "ndata", "=", "notif", ".", "data", "if", "\"title\"", "in", "ndata", "and", "\"text\"", "in", "ndata", ":", "data", "=", "{", "\"title\"", ":", "ndata", "[", "'title'", "]", "if", "'title'", "in", "ndata", "else", "''", ",", "\"text\"", ":", "ndata", "[", "'text'", "]", "if", "'text'", "in", "ndata", "else", "''", ",", "\"url\"", ":", "ndata", "[", "'url'", "]", "if", "'url'", "in", "ndata", "else", "''", "}", "else", ":", "schedule_chk", "=", "chrome_getdata_check", "(", "request", ")", "if", "schedule_chk", ":", "data", "=", "schedule_chk", "else", ":", "schedule_chk", "=", "chrome_getdata_check", "(", "request", ")", "if", "schedule_chk", ":", "data", "=", "schedule_chk", "else", ":", "return", "HttpResponse", "(", "\"null\"", ",", "content_type", "=", "\"text/json\"", ")", "else", ":", "schedule_chk", "=", "chrome_getdata_check", "(", "request", ")", "if", "schedule_chk", ":", "data", "=", "schedule_chk", "else", ":", "data", "=", "{", "\"title\"", ":", "\"Check Intranet\"", ",", "\"text\"", ":", "\"You have a new notification that couldn't be loaded right now.\"", "}", "j", "=", "json", ".", "dumps", "(", "data", ")", "return", "HttpResponse", "(", "j", ",", "content_type", "=", "\"text/json\"", ")" ]
Get the data of the last notification sent to the current user. This is needed because Chrome, as of version 44, doesn't support sending a data payload to a notification. Thus, information on what the notification is actually for must be manually fetched.
[ "Get", "the", "data", "of", "the", "last", "notification", "sent", "to", "the", "current", "user", "." ]
python
train
40.230769
OpenHydrology/floodestimation
floodestimation/parsers.py
https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/parsers.py#L70-L93
def parse_str(self, s): """ Parse string and return relevant object :param s: string to parse :type s: str :return: Parsed object """ self.object = self.parsed_class() in_section = None # Holds name of FEH file section while traversing through file. for line in s.split('\n'): if line.lower().startswith('[end]'): # Leave section in_section = None elif line.startswith('['): # Enter section, sanitise `[Section Name]` to `section_name` in_section = line.strip().strip('[]').lower().replace(' ', '_') elif in_section: try: # Call method `_section_section_name(line)` getattr(self, '_section_' + in_section)(line.strip()) except AttributeError: pass # Skip unsupported section return self.object
[ "def", "parse_str", "(", "self", ",", "s", ")", ":", "self", ".", "object", "=", "self", ".", "parsed_class", "(", ")", "in_section", "=", "None", "# Holds name of FEH file section while traversing through file.", "for", "line", "in", "s", ".", "split", "(", "'\\n'", ")", ":", "if", "line", ".", "lower", "(", ")", ".", "startswith", "(", "'[end]'", ")", ":", "# Leave section", "in_section", "=", "None", "elif", "line", ".", "startswith", "(", "'['", ")", ":", "# Enter section, sanitise `[Section Name]` to `section_name`", "in_section", "=", "line", ".", "strip", "(", ")", ".", "strip", "(", "'[]'", ")", ".", "lower", "(", ")", ".", "replace", "(", "' '", ",", "'_'", ")", "elif", "in_section", ":", "try", ":", "# Call method `_section_section_name(line)`", "getattr", "(", "self", ",", "'_section_'", "+", "in_section", ")", "(", "line", ".", "strip", "(", ")", ")", "except", "AttributeError", ":", "pass", "# Skip unsupported section", "return", "self", ".", "object" ]
Parse string and return relevant object :param s: string to parse :type s: str :return: Parsed object
[ "Parse", "string", "and", "return", "relevant", "object" ]
python
train
39.375
ktbyers/netmiko
netmiko/_textfsm/_clitable.py
https://github.com/ktbyers/netmiko/blob/54e6116c0b4664de2123081937e0a9a27bdfdfea/netmiko/_textfsm/_clitable.py#L344-L360
def AddKeys(self, key_list): """Mark additional columns as being part of the superkey. Supplements the Keys already extracted from the FSM template. Useful when adding new columns to existing tables. Note: This will impact attempts to further 'extend' the table as the superkey must be common between tables for successful extension. Args: key_list: list of header entries to be included in the superkey. Raises: KeyError: If any entry in list is not a valid header entry. """ for keyname in key_list: if keyname not in self.header: raise KeyError("'%s'" % keyname) self._keys = self._keys.union(set(key_list))
[ "def", "AddKeys", "(", "self", ",", "key_list", ")", ":", "for", "keyname", "in", "key_list", ":", "if", "keyname", "not", "in", "self", ".", "header", ":", "raise", "KeyError", "(", "\"'%s'\"", "%", "keyname", ")", "self", ".", "_keys", "=", "self", ".", "_keys", ".", "union", "(", "set", "(", "key_list", ")", ")" ]
Mark additional columns as being part of the superkey. Supplements the Keys already extracted from the FSM template. Useful when adding new columns to existing tables. Note: This will impact attempts to further 'extend' the table as the superkey must be common between tables for successful extension. Args: key_list: list of header entries to be included in the superkey. Raises: KeyError: If any entry in list is not a valid header entry.
[ "Mark", "additional", "columns", "as", "being", "part", "of", "the", "superkey", ".", "Supplements", "the", "Keys", "already", "extracted", "from", "the", "FSM", "template", ".", "Useful", "when", "adding", "new", "columns", "to", "existing", "tables", ".", "Note", ":", "This", "will", "impact", "attempts", "to", "further", "extend", "the", "table", "as", "the", "superkey", "must", "be", "common", "between", "tables", "for", "successful", "extension", ".", "Args", ":", "key_list", ":", "list", "of", "header", "entries", "to", "be", "included", "in", "the", "superkey", ".", "Raises", ":", "KeyError", ":", "If", "any", "entry", "in", "list", "is", "not", "a", "valid", "header", "entry", "." ]
python
train
40.470588
tornadoweb/tornado
tornado/httputil.py
https://github.com/tornadoweb/tornado/blob/b8b481770bcdb333a69afde5cce7eaa449128326/tornado/httputil.py#L1121-L1144
def parse_cookie(cookie: str) -> Dict[str, str]: """Parse a ``Cookie`` HTTP header into a dict of name/value pairs. This function attempts to mimic browser cookie parsing behavior; it specifically does not follow any of the cookie-related RFCs (because browsers don't either). The algorithm used is identical to that used by Django version 1.9.10. .. versionadded:: 4.4.2 """ cookiedict = {} for chunk in cookie.split(str(";")): if str("=") in chunk: key, val = chunk.split(str("="), 1) else: # Assume an empty name per # https://bugzilla.mozilla.org/show_bug.cgi?id=169091 key, val = str(""), chunk key, val = key.strip(), val.strip() if key or val: # unquote using Python's algorithm. cookiedict[key] = _unquote_cookie(val) return cookiedict
[ "def", "parse_cookie", "(", "cookie", ":", "str", ")", "->", "Dict", "[", "str", ",", "str", "]", ":", "cookiedict", "=", "{", "}", "for", "chunk", "in", "cookie", ".", "split", "(", "str", "(", "\";\"", ")", ")", ":", "if", "str", "(", "\"=\"", ")", "in", "chunk", ":", "key", ",", "val", "=", "chunk", ".", "split", "(", "str", "(", "\"=\"", ")", ",", "1", ")", "else", ":", "# Assume an empty name per", "# https://bugzilla.mozilla.org/show_bug.cgi?id=169091", "key", ",", "val", "=", "str", "(", "\"\"", ")", ",", "chunk", "key", ",", "val", "=", "key", ".", "strip", "(", ")", ",", "val", ".", "strip", "(", ")", "if", "key", "or", "val", ":", "# unquote using Python's algorithm.", "cookiedict", "[", "key", "]", "=", "_unquote_cookie", "(", "val", ")", "return", "cookiedict" ]
Parse a ``Cookie`` HTTP header into a dict of name/value pairs. This function attempts to mimic browser cookie parsing behavior; it specifically does not follow any of the cookie-related RFCs (because browsers don't either). The algorithm used is identical to that used by Django version 1.9.10. .. versionadded:: 4.4.2
[ "Parse", "a", "Cookie", "HTTP", "header", "into", "a", "dict", "of", "name", "/", "value", "pairs", "." ]
python
train
36.125
Esri/ArcREST
src/arcrest/hostedservice/service.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/hostedservice/service.py#L1025-L1054
def addToDefinition(self, json_dict): """ The addToDefinition operation supports adding a definition property to a hosted feature service. The result of this operation is a response indicating success or failure with error code and description. This function will allow users to change add additional values to an already published service. Input: json_dict - part to add to host service. The part format can be derived from the asDictionary property. For layer level modifications, run updates on each individual feature service layer object. Output: JSON message as dictionary """ params = { "f" : "json", "addToDefinition" : json.dumps(json_dict), "async" : False } uURL = self._url + "/addToDefinition" res = self._post(url=uURL, param_dict=params, securityHandler=self._securityHandler, proxy_port=self._proxy_port, proxy_url=self._proxy_url) self.refresh() return res
[ "def", "addToDefinition", "(", "self", ",", "json_dict", ")", ":", "params", "=", "{", "\"f\"", ":", "\"json\"", ",", "\"addToDefinition\"", ":", "json", ".", "dumps", "(", "json_dict", ")", ",", "\"async\"", ":", "False", "}", "uURL", "=", "self", ".", "_url", "+", "\"/addToDefinition\"", "res", "=", "self", ".", "_post", "(", "url", "=", "uURL", ",", "param_dict", "=", "params", ",", "securityHandler", "=", "self", ".", "_securityHandler", ",", "proxy_port", "=", "self", ".", "_proxy_port", ",", "proxy_url", "=", "self", ".", "_proxy_url", ")", "self", ".", "refresh", "(", ")", "return", "res" ]
The addToDefinition operation supports adding a definition property to a hosted feature service. The result of this operation is a response indicating success or failure with error code and description. This function will allow users to change add additional values to an already published service. Input: json_dict - part to add to host service. The part format can be derived from the asDictionary property. For layer level modifications, run updates on each individual feature service layer object. Output: JSON message as dictionary
[ "The", "addToDefinition", "operation", "supports", "adding", "a", "definition", "property", "to", "a", "hosted", "feature", "service", ".", "The", "result", "of", "this", "operation", "is", "a", "response", "indicating", "success", "or", "failure", "with", "error", "code", "and", "description", "." ]
python
train
40.966667
aconrad/pycobertura
pycobertura/cli.py
https://github.com/aconrad/pycobertura/blob/26e472f1424f5cd499c42232dc5ee12e4042806f/pycobertura/cli.py#L141-L176
def diff( cobertura_file1, cobertura_file2, color, format, output, source1, source2, source_prefix1, source_prefix2, source): """compare coverage of two Cobertura reports""" cobertura1 = Cobertura( cobertura_file1, source=source1, source_prefix=source_prefix1 ) cobertura2 = Cobertura( cobertura_file2, source=source2, source_prefix=source_prefix2 ) Reporter = delta_reporters[format] reporter_args = [cobertura1, cobertura2] reporter_kwargs = {'show_source': source} isatty = True if output is None else output.isatty() if format == 'text': color = isatty if color is None else color is True reporter_kwargs['color'] = color reporter = Reporter(*reporter_args, **reporter_kwargs) report = reporter.generate() if not isinstance(report, bytes): report = report.encode('utf-8') click.echo(report, file=output, nl=isatty, color=color) exit_code = get_exit_code(reporter.differ, source) raise SystemExit(exit_code)
[ "def", "diff", "(", "cobertura_file1", ",", "cobertura_file2", ",", "color", ",", "format", ",", "output", ",", "source1", ",", "source2", ",", "source_prefix1", ",", "source_prefix2", ",", "source", ")", ":", "cobertura1", "=", "Cobertura", "(", "cobertura_file1", ",", "source", "=", "source1", ",", "source_prefix", "=", "source_prefix1", ")", "cobertura2", "=", "Cobertura", "(", "cobertura_file2", ",", "source", "=", "source2", ",", "source_prefix", "=", "source_prefix2", ")", "Reporter", "=", "delta_reporters", "[", "format", "]", "reporter_args", "=", "[", "cobertura1", ",", "cobertura2", "]", "reporter_kwargs", "=", "{", "'show_source'", ":", "source", "}", "isatty", "=", "True", "if", "output", "is", "None", "else", "output", ".", "isatty", "(", ")", "if", "format", "==", "'text'", ":", "color", "=", "isatty", "if", "color", "is", "None", "else", "color", "is", "True", "reporter_kwargs", "[", "'color'", "]", "=", "color", "reporter", "=", "Reporter", "(", "*", "reporter_args", ",", "*", "*", "reporter_kwargs", ")", "report", "=", "reporter", ".", "generate", "(", ")", "if", "not", "isinstance", "(", "report", ",", "bytes", ")", ":", "report", "=", "report", ".", "encode", "(", "'utf-8'", ")", "click", ".", "echo", "(", "report", ",", "file", "=", "output", ",", "nl", "=", "isatty", ",", "color", "=", "color", ")", "exit_code", "=", "get_exit_code", "(", "reporter", ".", "differ", ",", "source", ")", "raise", "SystemExit", "(", "exit_code", ")" ]
compare coverage of two Cobertura reports
[ "compare", "coverage", "of", "two", "Cobertura", "reports" ]
python
train
28.944444
datamachine/twx.botapi
twx/botapi/botapi.py
https://github.com/datamachine/twx.botapi/blob/c85184da738169e8f9d6d8e62970540f427c486e/twx/botapi/botapi.py#L4306-L4308
def send_document(self, *args, **kwargs): """See :func:`send_document`""" return send_document(*args, **self._merge_overrides(**kwargs)).run()
[ "def", "send_document", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "send_document", "(", "*", "args", ",", "*", "*", "self", ".", "_merge_overrides", "(", "*", "*", "kwargs", ")", ")", ".", "run", "(", ")" ]
See :func:`send_document`
[ "See", ":", "func", ":", "send_document" ]
python
train
52
common-workflow-language/cwltool
cwltool/cwlrdf.py
https://github.com/common-workflow-language/cwltool/blob/cb81b22abc52838823da9945f04d06739ab32fda/cwltool/cwlrdf.py#L25-L30
def printrdf(wflow, ctx, style): # type: (Process, ContextType, Text) -> Text """Serialize the CWL document into a string, ready for printing.""" rdf = gather(wflow, ctx).serialize(format=style, encoding='utf-8') if not rdf: return u"" return rdf.decode('utf-8')
[ "def", "printrdf", "(", "wflow", ",", "ctx", ",", "style", ")", ":", "# type: (Process, ContextType, Text) -> Text", "rdf", "=", "gather", "(", "wflow", ",", "ctx", ")", ".", "serialize", "(", "format", "=", "style", ",", "encoding", "=", "'utf-8'", ")", "if", "not", "rdf", ":", "return", "u\"\"", "return", "rdf", ".", "decode", "(", "'utf-8'", ")" ]
Serialize the CWL document into a string, ready for printing.
[ "Serialize", "the", "CWL", "document", "into", "a", "string", "ready", "for", "printing", "." ]
python
train
47
SuperCowPowers/bat
bat/utils/net_utils.py
https://github.com/SuperCowPowers/bat/blob/069e6bc52843dc07760969c531cc442ca7da8e0c/bat/utils/net_utils.py#L50-L62
def str_to_inet(address): """Convert an a string IP address to a inet struct Args: address (str): String representation of address Returns: inet: Inet network address """ # First try ipv4 and then ipv6 try: return socket.inet_pton(socket.AF_INET, address) except socket.error: return socket.inet_pton(socket.AF_INET6, address)
[ "def", "str_to_inet", "(", "address", ")", ":", "# First try ipv4 and then ipv6", "try", ":", "return", "socket", ".", "inet_pton", "(", "socket", ".", "AF_INET", ",", "address", ")", "except", "socket", ".", "error", ":", "return", "socket", ".", "inet_pton", "(", "socket", ".", "AF_INET6", ",", "address", ")" ]
Convert an a string IP address to a inet struct Args: address (str): String representation of address Returns: inet: Inet network address
[ "Convert", "an", "a", "string", "IP", "address", "to", "a", "inet", "struct" ]
python
train
30.076923
stevearc/dql
dql/util.py
https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/util.py#L98-L110
def eval_function(value): """ Evaluate a timestamp function """ name, args = value[0], value[1:] if name == "NOW": return datetime.utcnow().replace(tzinfo=tzutc()) elif name in ["TIMESTAMP", "TS"]: return parse(unwrap(args[0])).replace(tzinfo=tzlocal()) elif name in ["UTCTIMESTAMP", "UTCTS"]: return parse(unwrap(args[0])).replace(tzinfo=tzutc()) elif name == "MS": return 1000 * resolve(args[0]) else: raise SyntaxError("Unrecognized function %r" % name)
[ "def", "eval_function", "(", "value", ")", ":", "name", ",", "args", "=", "value", "[", "0", "]", ",", "value", "[", "1", ":", "]", "if", "name", "==", "\"NOW\"", ":", "return", "datetime", ".", "utcnow", "(", ")", ".", "replace", "(", "tzinfo", "=", "tzutc", "(", ")", ")", "elif", "name", "in", "[", "\"TIMESTAMP\"", ",", "\"TS\"", "]", ":", "return", "parse", "(", "unwrap", "(", "args", "[", "0", "]", ")", ")", ".", "replace", "(", "tzinfo", "=", "tzlocal", "(", ")", ")", "elif", "name", "in", "[", "\"UTCTIMESTAMP\"", ",", "\"UTCTS\"", "]", ":", "return", "parse", "(", "unwrap", "(", "args", "[", "0", "]", ")", ")", ".", "replace", "(", "tzinfo", "=", "tzutc", "(", ")", ")", "elif", "name", "==", "\"MS\"", ":", "return", "1000", "*", "resolve", "(", "args", "[", "0", "]", ")", "else", ":", "raise", "SyntaxError", "(", "\"Unrecognized function %r\"", "%", "name", ")" ]
Evaluate a timestamp function
[ "Evaluate", "a", "timestamp", "function" ]
python
train
39.384615
meejah/txtorcon
txtorcon/socks.py
https://github.com/meejah/txtorcon/blob/14053b95adf0b4bd9dd9c317bece912a26578a93/txtorcon/socks.py#L139-L153
def _parse_version_reply(self): "waiting for a version reply" if len(self._data) >= 2: reply = self._data[:2] self._data = self._data[2:] (version, method) = struct.unpack('BB', reply) if version == 5 and method in [0x00, 0x02]: self.version_reply(method) else: if version != 5: self.version_error(SocksError( "Expected version 5, got {}".format(version))) else: self.version_error(SocksError( "Wanted method 0 or 2, got {}".format(method)))
[ "def", "_parse_version_reply", "(", "self", ")", ":", "if", "len", "(", "self", ".", "_data", ")", ">=", "2", ":", "reply", "=", "self", ".", "_data", "[", ":", "2", "]", "self", ".", "_data", "=", "self", ".", "_data", "[", "2", ":", "]", "(", "version", ",", "method", ")", "=", "struct", ".", "unpack", "(", "'BB'", ",", "reply", ")", "if", "version", "==", "5", "and", "method", "in", "[", "0x00", ",", "0x02", "]", ":", "self", ".", "version_reply", "(", "method", ")", "else", ":", "if", "version", "!=", "5", ":", "self", ".", "version_error", "(", "SocksError", "(", "\"Expected version 5, got {}\"", ".", "format", "(", "version", ")", ")", ")", "else", ":", "self", ".", "version_error", "(", "SocksError", "(", "\"Wanted method 0 or 2, got {}\"", ".", "format", "(", "method", ")", ")", ")" ]
waiting for a version reply
[ "waiting", "for", "a", "version", "reply" ]
python
train
42.6
Kozea/pygal
pygal/graph/graph.py
https://github.com/Kozea/pygal/blob/5e25c98a59a0642eecd9fcc5dbfeeb2190fbb5e7/pygal/graph/graph.py#L555-L596
def _static_value( self, serie_node, value, x, y, metadata, align_text='left', classes=None ): """Write the print value""" label = metadata and metadata.get('label') classes = classes and [classes] or [] if self.print_labels and label: label_cls = classes + ['label'] if self.print_values: y -= self.style.value_font_size / 2 self.svg.node( serie_node['text_overlay'], 'text', class_=' '.join(label_cls), x=x, y=y + self.style.value_font_size / 3 ).text = label y += self.style.value_font_size if self.print_values or self.dynamic_print_values: val_cls = classes + ['value'] if self.dynamic_print_values: val_cls.append('showable') self.svg.node( serie_node['text_overlay'], 'text', class_=' '.join(val_cls), x=x, y=y + self.style.value_font_size / 3, attrib={ 'text-anchor': align_text } ).text = value if self.print_zeroes or value != '0' else ''
[ "def", "_static_value", "(", "self", ",", "serie_node", ",", "value", ",", "x", ",", "y", ",", "metadata", ",", "align_text", "=", "'left'", ",", "classes", "=", "None", ")", ":", "label", "=", "metadata", "and", "metadata", ".", "get", "(", "'label'", ")", "classes", "=", "classes", "and", "[", "classes", "]", "or", "[", "]", "if", "self", ".", "print_labels", "and", "label", ":", "label_cls", "=", "classes", "+", "[", "'label'", "]", "if", "self", ".", "print_values", ":", "y", "-=", "self", ".", "style", ".", "value_font_size", "/", "2", "self", ".", "svg", ".", "node", "(", "serie_node", "[", "'text_overlay'", "]", ",", "'text'", ",", "class_", "=", "' '", ".", "join", "(", "label_cls", ")", ",", "x", "=", "x", ",", "y", "=", "y", "+", "self", ".", "style", ".", "value_font_size", "/", "3", ")", ".", "text", "=", "label", "y", "+=", "self", ".", "style", ".", "value_font_size", "if", "self", ".", "print_values", "or", "self", ".", "dynamic_print_values", ":", "val_cls", "=", "classes", "+", "[", "'value'", "]", "if", "self", ".", "dynamic_print_values", ":", "val_cls", ".", "append", "(", "'showable'", ")", "self", ".", "svg", ".", "node", "(", "serie_node", "[", "'text_overlay'", "]", ",", "'text'", ",", "class_", "=", "' '", ".", "join", "(", "val_cls", ")", ",", "x", "=", "x", ",", "y", "=", "y", "+", "self", ".", "style", ".", "value_font_size", "/", "3", ",", "attrib", "=", "{", "'text-anchor'", ":", "align_text", "}", ")", ".", "text", "=", "value", "if", "self", ".", "print_zeroes", "or", "value", "!=", "'0'", "else", "''" ]
Write the print value
[ "Write", "the", "print", "value" ]
python
train
31
bokeh/bokeh
bokeh/model.py
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/model.py#L838-L861
def _visit_value_and_its_immediate_references(obj, visitor): ''' Recurse down Models, HasProps, and Python containers The ordering in this function is to optimize performance. We check the most comomn types (int, float, str) first so that we can quickly return in the common case. We avoid isinstance and issubclass checks in a couple places with `type` checks because isinstance checks can be slow. ''' typ = type(obj) if typ in _common_types: # short circuit on common base types return if typ is list or issubclass(typ, (list, tuple)): # check common containers for item in obj: _visit_value_and_its_immediate_references(item, visitor) elif issubclass(typ, dict): for key, value in iteritems(obj): _visit_value_and_its_immediate_references(key, visitor) _visit_value_and_its_immediate_references(value, visitor) elif issubclass(typ, HasProps): if issubclass(typ, Model): visitor(obj) else: # this isn't a Model, so recurse into it _visit_immediate_value_references(obj, visitor)
[ "def", "_visit_value_and_its_immediate_references", "(", "obj", ",", "visitor", ")", ":", "typ", "=", "type", "(", "obj", ")", "if", "typ", "in", "_common_types", ":", "# short circuit on common base types", "return", "if", "typ", "is", "list", "or", "issubclass", "(", "typ", ",", "(", "list", ",", "tuple", ")", ")", ":", "# check common containers", "for", "item", "in", "obj", ":", "_visit_value_and_its_immediate_references", "(", "item", ",", "visitor", ")", "elif", "issubclass", "(", "typ", ",", "dict", ")", ":", "for", "key", ",", "value", "in", "iteritems", "(", "obj", ")", ":", "_visit_value_and_its_immediate_references", "(", "key", ",", "visitor", ")", "_visit_value_and_its_immediate_references", "(", "value", ",", "visitor", ")", "elif", "issubclass", "(", "typ", ",", "HasProps", ")", ":", "if", "issubclass", "(", "typ", ",", "Model", ")", ":", "visitor", "(", "obj", ")", "else", ":", "# this isn't a Model, so recurse into it", "_visit_immediate_value_references", "(", "obj", ",", "visitor", ")" ]
Recurse down Models, HasProps, and Python containers The ordering in this function is to optimize performance. We check the most comomn types (int, float, str) first so that we can quickly return in the common case. We avoid isinstance and issubclass checks in a couple places with `type` checks because isinstance checks can be slow.
[ "Recurse", "down", "Models", "HasProps", "and", "Python", "containers" ]
python
train
46.625
saltstack/salt
salt/modules/lxd.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/lxd.py#L2151-L2201
def profile_config_get(name, config_key, remote_addr=None, cert=None, key=None, verify_cert=True): ''' Get a profile config item. name : The name of the profile to get the config item from. config_key : The key for the item to retrieve. remote_addr : An URL to a remote Server, you also have to give cert and key if you provide remote_addr and its a TCP Address! Examples: https://myserver.lan:8443 /var/lib/mysocket.sock cert : PEM Formatted SSL Certificate. Examples: ~/.config/lxc/client.crt key : PEM Formatted SSL Key. Examples: ~/.config/lxc/client.key verify_cert : True Wherever to verify the cert, this is by default True but in the most cases you want to set it off as LXD normaly uses self-signed certificates. CLI Example: .. code-block:: bash $ salt '*' lxd.profile_config_get autostart boot.autostart ''' profile = profile_get( name, remote_addr, cert, key, verify_cert, _raw=True ) return _get_property_dict_item(profile, 'config', config_key)
[ "def", "profile_config_get", "(", "name", ",", "config_key", ",", "remote_addr", "=", "None", ",", "cert", "=", "None", ",", "key", "=", "None", ",", "verify_cert", "=", "True", ")", ":", "profile", "=", "profile_get", "(", "name", ",", "remote_addr", ",", "cert", ",", "key", ",", "verify_cert", ",", "_raw", "=", "True", ")", "return", "_get_property_dict_item", "(", "profile", ",", "'config'", ",", "config_key", ")" ]
Get a profile config item. name : The name of the profile to get the config item from. config_key : The key for the item to retrieve. remote_addr : An URL to a remote Server, you also have to give cert and key if you provide remote_addr and its a TCP Address! Examples: https://myserver.lan:8443 /var/lib/mysocket.sock cert : PEM Formatted SSL Certificate. Examples: ~/.config/lxc/client.crt key : PEM Formatted SSL Key. Examples: ~/.config/lxc/client.key verify_cert : True Wherever to verify the cert, this is by default True but in the most cases you want to set it off as LXD normaly uses self-signed certificates. CLI Example: .. code-block:: bash $ salt '*' lxd.profile_config_get autostart boot.autostart
[ "Get", "a", "profile", "config", "item", "." ]
python
train
25.411765
pycontribs/pyrax
pyrax/utils.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/utils.py#L708-L712
def import_class(import_str): """Returns a class from a string including module and class.""" mod_str, _sep, class_str = import_str.rpartition(".") __import__(mod_str) return getattr(sys.modules[mod_str], class_str)
[ "def", "import_class", "(", "import_str", ")", ":", "mod_str", ",", "_sep", ",", "class_str", "=", "import_str", ".", "rpartition", "(", "\".\"", ")", "__import__", "(", "mod_str", ")", "return", "getattr", "(", "sys", ".", "modules", "[", "mod_str", "]", ",", "class_str", ")" ]
Returns a class from a string including module and class.
[ "Returns", "a", "class", "from", "a", "string", "including", "module", "and", "class", "." ]
python
train
45.4
DLR-RM/RAFCON
source/rafcon/gui/controllers/state_editor/semantic_data_editor.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/state_editor/semantic_data_editor.py#L292-L306
def cut_action_callback(self, *event): """Add a copy and cut all selected row dict value pairs to the clipboard""" if react_to_event(self.view, self.tree_view, event) and self.active_entry_widget is None: _, dict_paths = self.get_view_selection() stored_data_list = [] for dict_path_as_list in dict_paths: if dict_path_as_list: value = self.model.state.semantic_data for path_element in dict_path_as_list: value = value[path_element] stored_data_list.append((path_element, value)) self.model.state.remove_semantic_data(dict_path_as_list) rafcon.gui.clipboard.global_clipboard.set_semantic_dictionary_list(stored_data_list) self.reload_tree_store_data()
[ "def", "cut_action_callback", "(", "self", ",", "*", "event", ")", ":", "if", "react_to_event", "(", "self", ".", "view", ",", "self", ".", "tree_view", ",", "event", ")", "and", "self", ".", "active_entry_widget", "is", "None", ":", "_", ",", "dict_paths", "=", "self", ".", "get_view_selection", "(", ")", "stored_data_list", "=", "[", "]", "for", "dict_path_as_list", "in", "dict_paths", ":", "if", "dict_path_as_list", ":", "value", "=", "self", ".", "model", ".", "state", ".", "semantic_data", "for", "path_element", "in", "dict_path_as_list", ":", "value", "=", "value", "[", "path_element", "]", "stored_data_list", ".", "append", "(", "(", "path_element", ",", "value", ")", ")", "self", ".", "model", ".", "state", ".", "remove_semantic_data", "(", "dict_path_as_list", ")", "rafcon", ".", "gui", ".", "clipboard", ".", "global_clipboard", ".", "set_semantic_dictionary_list", "(", "stored_data_list", ")", "self", ".", "reload_tree_store_data", "(", ")" ]
Add a copy and cut all selected row dict value pairs to the clipboard
[ "Add", "a", "copy", "and", "cut", "all", "selected", "row", "dict", "value", "pairs", "to", "the", "clipboard" ]
python
train
55.666667
IRC-SPHERE/HyperStream
hyperstream/workflow/workflow_manager.py
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/workflow/workflow_manager.py#L193-L212
def add_workflow(self, workflow, commit=False): """ Add a new workflow and optionally commit it to the database :param workflow: The workflow :param commit: Whether to commit the workflow to the database :type workflow: Workflow :type commit: bool :return: None """ if workflow.workflow_id in self.workflows: raise KeyError("Workflow with id {} already exists".format(workflow.workflow_id)) self.workflows[workflow.workflow_id] = workflow logging.info("Added workflow {} to workflow manager".format(workflow.workflow_id)) # Optionally also save the workflow to database if commit: self.commit_workflow(workflow.workflow_id) else: self.uncommitted_workflows.add(workflow.workflow_id)
[ "def", "add_workflow", "(", "self", ",", "workflow", ",", "commit", "=", "False", ")", ":", "if", "workflow", ".", "workflow_id", "in", "self", ".", "workflows", ":", "raise", "KeyError", "(", "\"Workflow with id {} already exists\"", ".", "format", "(", "workflow", ".", "workflow_id", ")", ")", "self", ".", "workflows", "[", "workflow", ".", "workflow_id", "]", "=", "workflow", "logging", ".", "info", "(", "\"Added workflow {} to workflow manager\"", ".", "format", "(", "workflow", ".", "workflow_id", ")", ")", "# Optionally also save the workflow to database", "if", "commit", ":", "self", ".", "commit_workflow", "(", "workflow", ".", "workflow_id", ")", "else", ":", "self", ".", "uncommitted_workflows", ".", "add", "(", "workflow", ".", "workflow_id", ")" ]
Add a new workflow and optionally commit it to the database :param workflow: The workflow :param commit: Whether to commit the workflow to the database :type workflow: Workflow :type commit: bool :return: None
[ "Add", "a", "new", "workflow", "and", "optionally", "commit", "it", "to", "the", "database", ":", "param", "workflow", ":", "The", "workflow", ":", "param", "commit", ":", "Whether", "to", "commit", "the", "workflow", "to", "the", "database", ":", "type", "workflow", ":", "Workflow", ":", "type", "commit", ":", "bool", ":", "return", ":", "None" ]
python
train
40.65
annoviko/pyclustering
pyclustering/cluster/ga.py
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/ga.py#L103-L116
def collect_population_best(self, best_chromosome, best_fitness_function): """! @brief Stores the best chromosome for current specific iteration and its fitness function's value. @param[in] best_chromosome (list): The best chromosome on specific iteration. @param[in] best_fitness_function (float): Fitness function value of the chromosome. """ if not self._need_population_best: return self._best_population_result['chromosome'].append(best_chromosome) self._best_population_result['fitness_function'].append(best_fitness_function)
[ "def", "collect_population_best", "(", "self", ",", "best_chromosome", ",", "best_fitness_function", ")", ":", "if", "not", "self", ".", "_need_population_best", ":", "return", "self", ".", "_best_population_result", "[", "'chromosome'", "]", ".", "append", "(", "best_chromosome", ")", "self", ".", "_best_population_result", "[", "'fitness_function'", "]", ".", "append", "(", "best_fitness_function", ")" ]
! @brief Stores the best chromosome for current specific iteration and its fitness function's value. @param[in] best_chromosome (list): The best chromosome on specific iteration. @param[in] best_fitness_function (float): Fitness function value of the chromosome.
[ "!" ]
python
valid
43.928571
yougov/pmxbot
pmxbot/logging.py
https://github.com/yougov/pmxbot/blob/5da84a3258a0fd73cb35b60e39769a5d7bfb2ba7/pmxbot/logging.py#L468-L488
def strike(channel, nick, rest): "Strike last <n> statements from the record" yield NoLog rest = rest.strip() if not rest: count = 1 else: if not rest.isdigit(): yield "Strike how many? Argument must be a positive integer." raise StopIteration count = int(rest) try: struck = Logger.store.strike(channel, nick, count) tmpl = ( "Isn't undo great? Last %d statement%s " "by %s were stricken from the record." ) yield tmpl % (struck, 's' if struck > 1 else '', nick) except Exception: traceback.print_exc() yield "Hmm.. I didn't find anything of yours to strike!"
[ "def", "strike", "(", "channel", ",", "nick", ",", "rest", ")", ":", "yield", "NoLog", "rest", "=", "rest", ".", "strip", "(", ")", "if", "not", "rest", ":", "count", "=", "1", "else", ":", "if", "not", "rest", ".", "isdigit", "(", ")", ":", "yield", "\"Strike how many? Argument must be a positive integer.\"", "raise", "StopIteration", "count", "=", "int", "(", "rest", ")", "try", ":", "struck", "=", "Logger", ".", "store", ".", "strike", "(", "channel", ",", "nick", ",", "count", ")", "tmpl", "=", "(", "\"Isn't undo great? Last %d statement%s \"", "\"by %s were stricken from the record.\"", ")", "yield", "tmpl", "%", "(", "struck", ",", "'s'", "if", "struck", ">", "1", "else", "''", ",", "nick", ")", "except", "Exception", ":", "traceback", ".", "print_exc", "(", ")", "yield", "\"Hmm.. I didn't find anything of yours to strike!\"" ]
Strike last <n> statements from the record
[ "Strike", "last", "<n", ">", "statements", "from", "the", "record" ]
python
train
27.571429
nion-software/nionswift
nion/swift/Facade.py
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/Facade.py#L2460-L2473
def get_library_value(self, key: str) -> typing.Any: """Get the library value for the given key. Please consult the developer documentation for a list of valid keys. .. versionadded:: 1.0 Scriptable: Yes """ desc = Metadata.session_key_map.get(key) if desc is not None: field_id = desc['path'][-1] return getattr(ApplicationData.get_session_metadata_model(), field_id) raise KeyError()
[ "def", "get_library_value", "(", "self", ",", "key", ":", "str", ")", "->", "typing", ".", "Any", ":", "desc", "=", "Metadata", ".", "session_key_map", ".", "get", "(", "key", ")", "if", "desc", "is", "not", "None", ":", "field_id", "=", "desc", "[", "'path'", "]", "[", "-", "1", "]", "return", "getattr", "(", "ApplicationData", ".", "get_session_metadata_model", "(", ")", ",", "field_id", ")", "raise", "KeyError", "(", ")" ]
Get the library value for the given key. Please consult the developer documentation for a list of valid keys. .. versionadded:: 1.0 Scriptable: Yes
[ "Get", "the", "library", "value", "for", "the", "given", "key", "." ]
python
train
33.071429
pedroburon/tbk
tbk/webpay/confirmation.py
https://github.com/pedroburon/tbk/blob/ecd6741e0bae06269eb4ac885c3ffcb7902ee40e/tbk/webpay/confirmation.py#L110-L122
def accountable_date(self): '''Accountable date of transaction, localized as America/Santiago ''' fecha_transaccion = self.data['TBK_FECHA_CONTABLE'] m = int(fecha_transaccion[:2]) d = int(fecha_transaccion[2:]) santiago = pytz.timezone('America/Santiago') today = santiago.localize(datetime.datetime.today()) year = today.year if self.paid_at.month == 12 and m == 1: year += 1 santiago_dt = santiago.localize(datetime.datetime(year, m, d)) return santiago_dt
[ "def", "accountable_date", "(", "self", ")", ":", "fecha_transaccion", "=", "self", ".", "data", "[", "'TBK_FECHA_CONTABLE'", "]", "m", "=", "int", "(", "fecha_transaccion", "[", ":", "2", "]", ")", "d", "=", "int", "(", "fecha_transaccion", "[", "2", ":", "]", ")", "santiago", "=", "pytz", ".", "timezone", "(", "'America/Santiago'", ")", "today", "=", "santiago", ".", "localize", "(", "datetime", ".", "datetime", ".", "today", "(", ")", ")", "year", "=", "today", ".", "year", "if", "self", ".", "paid_at", ".", "month", "==", "12", "and", "m", "==", "1", ":", "year", "+=", "1", "santiago_dt", "=", "santiago", ".", "localize", "(", "datetime", ".", "datetime", "(", "year", ",", "m", ",", "d", ")", ")", "return", "santiago_dt" ]
Accountable date of transaction, localized as America/Santiago
[ "Accountable", "date", "of", "transaction", "localized", "as", "America", "/", "Santiago" ]
python
train
42.076923
mailgun/talon
talon/signature/learning/featurespace.py
https://github.com/mailgun/talon/blob/cdd84563dd329c4f887591807870d10015e0c7a7/talon/signature/learning/featurespace.py#L18-L47
def features(sender=''): '''Returns a list of signature features.''' return [ # This one isn't from paper. # Meant to match companies names, sender's names, address. many_capitalized_words, # This one is not from paper. # Line is too long. # This one is less aggressive than `Line is too short` lambda line: 1 if len(line) > TOO_LONG_SIGNATURE_LINE else 0, # Line contains email pattern. binary_regex_search(RE_EMAIL), # Line contains url. binary_regex_search(RE_URL), # Line contains phone number pattern. binary_regex_search(RE_RELAX_PHONE), # Line matches the regular expression "^[\s]*---*[\s]*$". binary_regex_match(RE_SEPARATOR), # Line has a sequence of 10 or more special characters. binary_regex_search(RE_SPECIAL_CHARS), # Line contains any typical signature words. binary_regex_search(RE_SIGNATURE_WORDS), # Line contains a pattern like Vitor R. Carvalho or William W. Cohen. binary_regex_search(RE_NAME), # Percentage of punctuation symbols in the line is larger than 50% lambda line: 1 if punctuation_percent(line) > 50 else 0, # Percentage of punctuation symbols in the line is larger than 90% lambda line: 1 if punctuation_percent(line) > 90 else 0, contains_sender_names(sender) ]
[ "def", "features", "(", "sender", "=", "''", ")", ":", "return", "[", "# This one isn't from paper.", "# Meant to match companies names, sender's names, address.", "many_capitalized_words", ",", "# This one is not from paper.", "# Line is too long.", "# This one is less aggressive than `Line is too short`", "lambda", "line", ":", "1", "if", "len", "(", "line", ")", ">", "TOO_LONG_SIGNATURE_LINE", "else", "0", ",", "# Line contains email pattern.", "binary_regex_search", "(", "RE_EMAIL", ")", ",", "# Line contains url.", "binary_regex_search", "(", "RE_URL", ")", ",", "# Line contains phone number pattern.", "binary_regex_search", "(", "RE_RELAX_PHONE", ")", ",", "# Line matches the regular expression \"^[\\s]*---*[\\s]*$\".", "binary_regex_match", "(", "RE_SEPARATOR", ")", ",", "# Line has a sequence of 10 or more special characters.", "binary_regex_search", "(", "RE_SPECIAL_CHARS", ")", ",", "# Line contains any typical signature words.", "binary_regex_search", "(", "RE_SIGNATURE_WORDS", ")", ",", "# Line contains a pattern like Vitor R. Carvalho or William W. Cohen.", "binary_regex_search", "(", "RE_NAME", ")", ",", "# Percentage of punctuation symbols in the line is larger than 50%", "lambda", "line", ":", "1", "if", "punctuation_percent", "(", "line", ")", ">", "50", "else", "0", ",", "# Percentage of punctuation symbols in the line is larger than 90%", "lambda", "line", ":", "1", "if", "punctuation_percent", "(", "line", ")", ">", "90", "else", "0", ",", "contains_sender_names", "(", "sender", ")", "]" ]
Returns a list of signature features.
[ "Returns", "a", "list", "of", "signature", "features", "." ]
python
train
46.366667
cthorey/pdsimage
pdsimage/PDS_Extractor.py
https://github.com/cthorey/pdsimage/blob/f71de6dfddd3d538d76da229b4b9605c40f3fbac/pdsimage/PDS_Extractor.py#L685-L707
def _define_case(self): ''' Identify case ''' lonBool = self._map_center( 'long', self.lonM) != self._map_center('long', self.lonm) latBool = self._map_center( 'lat', self.latM) != self._map_center('lat', self.latm) if not lonBool and not latBool: print('No overlap - Processing should be quick') return self._cas_1() elif lonBool and not latBool: print('Longitude overlap - 2 images have to be proceded \n \ Processing could take a few seconds') return self._cas_2() elif not lonBool and latBool: print('Latitude overlap - 2 images have to be proceded \n\ Processing could take a few seconds') return self._cas_3() else: print('Latitude/Longidude overlaps - 4 images have to be proceded \n\ Processing could take a few seconds') return self._cas_4()
[ "def", "_define_case", "(", "self", ")", ":", "lonBool", "=", "self", ".", "_map_center", "(", "'long'", ",", "self", ".", "lonM", ")", "!=", "self", ".", "_map_center", "(", "'long'", ",", "self", ".", "lonm", ")", "latBool", "=", "self", ".", "_map_center", "(", "'lat'", ",", "self", ".", "latM", ")", "!=", "self", ".", "_map_center", "(", "'lat'", ",", "self", ".", "latm", ")", "if", "not", "lonBool", "and", "not", "latBool", ":", "print", "(", "'No overlap - Processing should be quick'", ")", "return", "self", ".", "_cas_1", "(", ")", "elif", "lonBool", "and", "not", "latBool", ":", "print", "(", "'Longitude overlap - 2 images have to be proceded \\n \\\n Processing could take a few seconds'", ")", "return", "self", ".", "_cas_2", "(", ")", "elif", "not", "lonBool", "and", "latBool", ":", "print", "(", "'Latitude overlap - 2 images have to be proceded \\n\\\n Processing could take a few seconds'", ")", "return", "self", ".", "_cas_3", "(", ")", "else", ":", "print", "(", "'Latitude/Longidude overlaps - 4 images have to be proceded \\n\\\n Processing could take a few seconds'", ")", "return", "self", ".", "_cas_4", "(", ")" ]
Identify case
[ "Identify", "case" ]
python
train
41.73913
gmr/rejected
rejected/process.py
https://github.com/gmr/rejected/blob/610a3e1401122ecb98d891b6795cca0255e5b044/rejected/process.py#L793-L818
def submit_influxdb_measurement(self): """Submit a measurement for a message to InfluxDB""" measurement = influxdb.Measurement(*self.influxdb) measurement.set_timestamp(time.time()) for key, value in self.measurement.counters.items(): measurement.set_field(key, value) for key, value in self.measurement.tags.items(): measurement.set_tag(key, value) for key, value in self.measurement.values.items(): measurement.set_field(key, value) for key, values in self.measurement.durations.items(): if len(values) == 1: measurement.set_field(key, values[0]) elif len(values) > 1: measurement.set_field('{}-average'.format(key), sum(values) / len(values)) measurement.set_field('{}-max'.format(key), max(values)) measurement.set_field('{}-min'.format(key), min(values)) measurement.set_field('{}-median'.format(key), utils.percentile(values, 50)) measurement.set_field('{}-95th'.format(key), utils.percentile(values, 95)) influxdb.add_measurement(measurement) LOGGER.debug('InfluxDB Measurement: %r', measurement.marshall())
[ "def", "submit_influxdb_measurement", "(", "self", ")", ":", "measurement", "=", "influxdb", ".", "Measurement", "(", "*", "self", ".", "influxdb", ")", "measurement", ".", "set_timestamp", "(", "time", ".", "time", "(", ")", ")", "for", "key", ",", "value", "in", "self", ".", "measurement", ".", "counters", ".", "items", "(", ")", ":", "measurement", ".", "set_field", "(", "key", ",", "value", ")", "for", "key", ",", "value", "in", "self", ".", "measurement", ".", "tags", ".", "items", "(", ")", ":", "measurement", ".", "set_tag", "(", "key", ",", "value", ")", "for", "key", ",", "value", "in", "self", ".", "measurement", ".", "values", ".", "items", "(", ")", ":", "measurement", ".", "set_field", "(", "key", ",", "value", ")", "for", "key", ",", "values", "in", "self", ".", "measurement", ".", "durations", ".", "items", "(", ")", ":", "if", "len", "(", "values", ")", "==", "1", ":", "measurement", ".", "set_field", "(", "key", ",", "values", "[", "0", "]", ")", "elif", "len", "(", "values", ")", ">", "1", ":", "measurement", ".", "set_field", "(", "'{}-average'", ".", "format", "(", "key", ")", ",", "sum", "(", "values", ")", "/", "len", "(", "values", ")", ")", "measurement", ".", "set_field", "(", "'{}-max'", ".", "format", "(", "key", ")", ",", "max", "(", "values", ")", ")", "measurement", ".", "set_field", "(", "'{}-min'", ".", "format", "(", "key", ")", ",", "min", "(", "values", ")", ")", "measurement", ".", "set_field", "(", "'{}-median'", ".", "format", "(", "key", ")", ",", "utils", ".", "percentile", "(", "values", ",", "50", ")", ")", "measurement", ".", "set_field", "(", "'{}-95th'", ".", "format", "(", "key", ")", ",", "utils", ".", "percentile", "(", "values", ",", "95", ")", ")", "influxdb", ".", "add_measurement", "(", "measurement", ")", "LOGGER", ".", "debug", "(", "'InfluxDB Measurement: %r'", ",", "measurement", ".", "marshall", "(", ")", ")" ]
Submit a measurement for a message to InfluxDB
[ "Submit", "a", "measurement", "for", "a", "message", "to", "InfluxDB" ]
python
train
51.269231
noahbenson/pimms
pimms/util.py
https://github.com/noahbenson/pimms/blob/9051b86d6b858a7a13511b72c48dc21bc903dab2/pimms/util.py#L664-L673
def is_memoized(self, k): ''' lmap.is_memoized(k) yields True if k is a key in the given lazy map lmap that is both lazy and already memoized. ''' v = ps.PMap.__getitem__(self, k) if not isinstance(v, (types.FunctionType, partial)): return False else: return id(v) in self._memoized
[ "def", "is_memoized", "(", "self", ",", "k", ")", ":", "v", "=", "ps", ".", "PMap", ".", "__getitem__", "(", "self", ",", "k", ")", "if", "not", "isinstance", "(", "v", ",", "(", "types", ".", "FunctionType", ",", "partial", ")", ")", ":", "return", "False", "else", ":", "return", "id", "(", "v", ")", "in", "self", ".", "_memoized" ]
lmap.is_memoized(k) yields True if k is a key in the given lazy map lmap that is both lazy and already memoized.
[ "lmap", ".", "is_memoized", "(", "k", ")", "yields", "True", "if", "k", "is", "a", "key", "in", "the", "given", "lazy", "map", "lmap", "that", "is", "both", "lazy", "and", "already", "memoized", "." ]
python
train
35.3
inasafe/inasafe
safe/gui/widgets/dock.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/widgets/dock.py#L1596-L1657
def set_provenance_to_project_variables(provenances): """Helper method to update / create provenance in project variables. :param provenances: Keys and values from provenances. :type provenances: dict """ def write_project_variable(key, value): """Helper to write project variable for base_key and value. The key will be: - base_key__KEY: value for dictionary. - base_key__INDEX: value for list, tuple, set. - date will be converted to ISO. - None will be converted to ''. :param key: The key. :type key: basestring :param value: A list of dictionary. :type value: dict, list, tuple, set """ if key in list(duplicated_global_variables.keys()): return if isinstance(value, (list, tuple, set)): # Skip if the type is too complex (list of note, actions) return elif isinstance(value, dict): for dict_key, dict_value in list(value.items()): write_project_variable( '%s__%s' % (key, dict_key), dict_value) elif isinstance(value, (bool, str, Number)): # Don't use get_name for field if 'field' in key: pretty_value = get_name(value) QgsExpressionContextUtils.setProjectVariable( QgsProject.instance(), key, pretty_value) else: QgsExpressionContextUtils.setProjectVariable( QgsProject.instance(), key, value) elif isinstance(value, type(None)): QgsExpressionContextUtils.setProjectVariable( QgsProject.instance(), key, '') elif isinstance(value, datetime): QgsExpressionContextUtils.setProjectVariable( QgsProject.instance(), key, value.isoformat()) elif isinstance(value, QUrl): QgsExpressionContextUtils.setProjectVariable( QgsProject.instance(), key, value.toString()) else: LOGGER.warning('Not handled provenance') LOGGER.warning('Key: %s, Type: %s, Value: %s' % ( key, type(value), value)) # Remove old provenance data first remove_provenance_project_variables() for key, value in list(provenances.items()): if QgsExpressionContextUtils.globalScope().hasVariable(key): continue write_project_variable(key, value)
[ "def", "set_provenance_to_project_variables", "(", "provenances", ")", ":", "def", "write_project_variable", "(", "key", ",", "value", ")", ":", "\"\"\"Helper to write project variable for base_key and value.\n\n The key will be:\n - base_key__KEY: value for dictionary.\n - base_key__INDEX: value for list, tuple, set.\n - date will be converted to ISO.\n - None will be converted to ''.\n\n :param key: The key.\n :type key: basestring\n\n :param value: A list of dictionary.\n :type value: dict, list, tuple, set\n \"\"\"", "if", "key", "in", "list", "(", "duplicated_global_variables", ".", "keys", "(", ")", ")", ":", "return", "if", "isinstance", "(", "value", ",", "(", "list", ",", "tuple", ",", "set", ")", ")", ":", "# Skip if the type is too complex (list of note, actions)", "return", "elif", "isinstance", "(", "value", ",", "dict", ")", ":", "for", "dict_key", ",", "dict_value", "in", "list", "(", "value", ".", "items", "(", ")", ")", ":", "write_project_variable", "(", "'%s__%s'", "%", "(", "key", ",", "dict_key", ")", ",", "dict_value", ")", "elif", "isinstance", "(", "value", ",", "(", "bool", ",", "str", ",", "Number", ")", ")", ":", "# Don't use get_name for field", "if", "'field'", "in", "key", ":", "pretty_value", "=", "get_name", "(", "value", ")", "QgsExpressionContextUtils", ".", "setProjectVariable", "(", "QgsProject", ".", "instance", "(", ")", ",", "key", ",", "pretty_value", ")", "else", ":", "QgsExpressionContextUtils", ".", "setProjectVariable", "(", "QgsProject", ".", "instance", "(", ")", ",", "key", ",", "value", ")", "elif", "isinstance", "(", "value", ",", "type", "(", "None", ")", ")", ":", "QgsExpressionContextUtils", ".", "setProjectVariable", "(", "QgsProject", ".", "instance", "(", ")", ",", "key", ",", "''", ")", "elif", "isinstance", "(", "value", ",", "datetime", ")", ":", "QgsExpressionContextUtils", ".", "setProjectVariable", "(", "QgsProject", ".", "instance", "(", ")", ",", "key", ",", "value", ".", "isoformat", "(", ")", ")", "elif", "isinstance", "(", "value", ",", "QUrl", ")", ":", "QgsExpressionContextUtils", ".", "setProjectVariable", "(", "QgsProject", ".", "instance", "(", ")", ",", "key", ",", "value", ".", "toString", "(", ")", ")", "else", ":", "LOGGER", ".", "warning", "(", "'Not handled provenance'", ")", "LOGGER", ".", "warning", "(", "'Key: %s, Type: %s, Value: %s'", "%", "(", "key", ",", "type", "(", "value", ")", ",", "value", ")", ")", "# Remove old provenance data first", "remove_provenance_project_variables", "(", ")", "for", "key", ",", "value", "in", "list", "(", "provenances", ".", "items", "(", ")", ")", ":", "if", "QgsExpressionContextUtils", ".", "globalScope", "(", ")", ".", "hasVariable", "(", "key", ")", ":", "continue", "write_project_variable", "(", "key", ",", "value", ")" ]
Helper method to update / create provenance in project variables. :param provenances: Keys and values from provenances. :type provenances: dict
[ "Helper", "method", "to", "update", "/", "create", "provenance", "in", "project", "variables", "." ]
python
train
39.516129
rueckstiess/mtools
mtools/mlogfilter/mlogfilter.py
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlogfilter/mlogfilter.py#L141-L172
def _formatNumbers(self, line): """ Format the numbers so that there are commas inserted. For example: 1200300 becomes 1,200,300. """ # below thousands separator syntax only works for # python 2.7, skip for 2.6 if sys.version_info < (2, 7): return line last_index = 0 try: # find the index of the last } character last_index = (line.rindex('}') + 1) end = line[last_index:] except ValueError: return line else: # split the string on numbers to isolate them splitted = re.split("(\d+)", end) for index, val in enumerate(splitted): converted = 0 try: converted = int(val) # if it's not an int pass and don't change the string except ValueError: pass else: if converted > 1000: splitted[index] = format(converted, ",d") return line[:last_index] + ("").join(splitted)
[ "def", "_formatNumbers", "(", "self", ",", "line", ")", ":", "# below thousands separator syntax only works for", "# python 2.7, skip for 2.6", "if", "sys", ".", "version_info", "<", "(", "2", ",", "7", ")", ":", "return", "line", "last_index", "=", "0", "try", ":", "# find the index of the last } character", "last_index", "=", "(", "line", ".", "rindex", "(", "'}'", ")", "+", "1", ")", "end", "=", "line", "[", "last_index", ":", "]", "except", "ValueError", ":", "return", "line", "else", ":", "# split the string on numbers to isolate them", "splitted", "=", "re", ".", "split", "(", "\"(\\d+)\"", ",", "end", ")", "for", "index", ",", "val", "in", "enumerate", "(", "splitted", ")", ":", "converted", "=", "0", "try", ":", "converted", "=", "int", "(", "val", ")", "# if it's not an int pass and don't change the string", "except", "ValueError", ":", "pass", "else", ":", "if", "converted", ">", "1000", ":", "splitted", "[", "index", "]", "=", "format", "(", "converted", ",", "\",d\"", ")", "return", "line", "[", ":", "last_index", "]", "+", "(", "\"\"", ")", ".", "join", "(", "splitted", ")" ]
Format the numbers so that there are commas inserted. For example: 1200300 becomes 1,200,300.
[ "Format", "the", "numbers", "so", "that", "there", "are", "commas", "inserted", "." ]
python
train
34.1875
guaix-ucm/numina
numina/drps/drpsystem.py
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/drps/drpsystem.py#L36-L43
def load_drp(self, name, entry_point='numina.pipeline.1'): """Load all available DRPs in 'entry_point'.""" for drpins in self.iload(entry_point): if drpins.name == name: return drpins else: raise KeyError('{}'.format(name))
[ "def", "load_drp", "(", "self", ",", "name", ",", "entry_point", "=", "'numina.pipeline.1'", ")", ":", "for", "drpins", "in", "self", ".", "iload", "(", "entry_point", ")", ":", "if", "drpins", ".", "name", "==", "name", ":", "return", "drpins", "else", ":", "raise", "KeyError", "(", "'{}'", ".", "format", "(", "name", ")", ")" ]
Load all available DRPs in 'entry_point'.
[ "Load", "all", "available", "DRPs", "in", "entry_point", "." ]
python
train
35.125
hotdoc/hotdoc
hotdoc/core/formatter.py
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/core/formatter.py#L462-L487
def write_out(self, page, xml_subpages, output): """Banana banana """ # pylint: disable=missing-docstring def subpages(_): return xml_subpages namespace = etree.FunctionNamespace('uri:hotdoc') namespace['subpages'] = subpages html_output = os.path.join(output, 'html') rel_path = os.path.join(self.get_output_folder(page), page.link.ref) cached_path = os.path.join(self.__cache_dir, rel_path) full_path = os.path.join(html_output, rel_path) if not os.path.exists(os.path.dirname(full_path)): os.makedirs(os.path.dirname(full_path)) with open(cached_path, 'r', encoding='utf-8') as _: doc_root = etree.HTML(_.read()) self.__validate_html(self.extension.project, page, doc_root) self.writing_page_signal(self, page, full_path, doc_root) with open(full_path, 'w', encoding='utf-8') as _: transformed = str(self.__page_transform(doc_root)) _.write('<!DOCTYPE html>\n%s' % transformed)
[ "def", "write_out", "(", "self", ",", "page", ",", "xml_subpages", ",", "output", ")", ":", "# pylint: disable=missing-docstring", "def", "subpages", "(", "_", ")", ":", "return", "xml_subpages", "namespace", "=", "etree", ".", "FunctionNamespace", "(", "'uri:hotdoc'", ")", "namespace", "[", "'subpages'", "]", "=", "subpages", "html_output", "=", "os", ".", "path", ".", "join", "(", "output", ",", "'html'", ")", "rel_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "get_output_folder", "(", "page", ")", ",", "page", ".", "link", ".", "ref", ")", "cached_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "__cache_dir", ",", "rel_path", ")", "full_path", "=", "os", ".", "path", ".", "join", "(", "html_output", ",", "rel_path", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "dirname", "(", "full_path", ")", ")", ":", "os", ".", "makedirs", "(", "os", ".", "path", ".", "dirname", "(", "full_path", ")", ")", "with", "open", "(", "cached_path", ",", "'r'", ",", "encoding", "=", "'utf-8'", ")", "as", "_", ":", "doc_root", "=", "etree", ".", "HTML", "(", "_", ".", "read", "(", ")", ")", "self", ".", "__validate_html", "(", "self", ".", "extension", ".", "project", ",", "page", ",", "doc_root", ")", "self", ".", "writing_page_signal", "(", "self", ",", "page", ",", "full_path", ",", "doc_root", ")", "with", "open", "(", "full_path", ",", "'w'", ",", "encoding", "=", "'utf-8'", ")", "as", "_", ":", "transformed", "=", "str", "(", "self", ".", "__page_transform", "(", "doc_root", ")", ")", "_", ".", "write", "(", "'<!DOCTYPE html>\\n%s'", "%", "transformed", ")" ]
Banana banana
[ "Banana", "banana" ]
python
train
40
Microsoft/nni
src/sdk/pynni/nni/networkmorphism_tuner/graph.py
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/src/sdk/pynni/nni/networkmorphism_tuner/graph.py#L233-L255
def _redirect_edge(self, u_id, v_id, new_v_id): """Redirect the layer to a new node. Change the edge originally from `u_id` to `v_id` into an edge from `u_id` to `new_v_id` while keeping all other property of the edge the same. """ layer_id = None for index, edge_tuple in enumerate(self.adj_list[u_id]): if edge_tuple[0] == v_id: layer_id = edge_tuple[1] self.adj_list[u_id][index] = (new_v_id, layer_id) self.layer_list[layer_id].output = self.node_list[new_v_id] break for index, edge_tuple in enumerate(self.reverse_adj_list[v_id]): if edge_tuple[0] == u_id: layer_id = edge_tuple[1] self.reverse_adj_list[v_id].remove(edge_tuple) break self.reverse_adj_list[new_v_id].append((u_id, layer_id)) for index, value in enumerate(self.layer_id_to_output_node_ids[layer_id]): if value == v_id: self.layer_id_to_output_node_ids[layer_id][index] = new_v_id break
[ "def", "_redirect_edge", "(", "self", ",", "u_id", ",", "v_id", ",", "new_v_id", ")", ":", "layer_id", "=", "None", "for", "index", ",", "edge_tuple", "in", "enumerate", "(", "self", ".", "adj_list", "[", "u_id", "]", ")", ":", "if", "edge_tuple", "[", "0", "]", "==", "v_id", ":", "layer_id", "=", "edge_tuple", "[", "1", "]", "self", ".", "adj_list", "[", "u_id", "]", "[", "index", "]", "=", "(", "new_v_id", ",", "layer_id", ")", "self", ".", "layer_list", "[", "layer_id", "]", ".", "output", "=", "self", ".", "node_list", "[", "new_v_id", "]", "break", "for", "index", ",", "edge_tuple", "in", "enumerate", "(", "self", ".", "reverse_adj_list", "[", "v_id", "]", ")", ":", "if", "edge_tuple", "[", "0", "]", "==", "u_id", ":", "layer_id", "=", "edge_tuple", "[", "1", "]", "self", ".", "reverse_adj_list", "[", "v_id", "]", ".", "remove", "(", "edge_tuple", ")", "break", "self", ".", "reverse_adj_list", "[", "new_v_id", "]", ".", "append", "(", "(", "u_id", ",", "layer_id", ")", ")", "for", "index", ",", "value", "in", "enumerate", "(", "self", ".", "layer_id_to_output_node_ids", "[", "layer_id", "]", ")", ":", "if", "value", "==", "v_id", ":", "self", ".", "layer_id_to_output_node_ids", "[", "layer_id", "]", "[", "index", "]", "=", "new_v_id", "break" ]
Redirect the layer to a new node. Change the edge originally from `u_id` to `v_id` into an edge from `u_id` to `new_v_id` while keeping all other property of the edge the same.
[ "Redirect", "the", "layer", "to", "a", "new", "node", ".", "Change", "the", "edge", "originally", "from", "u_id", "to", "v_id", "into", "an", "edge", "from", "u_id", "to", "new_v_id", "while", "keeping", "all", "other", "property", "of", "the", "edge", "the", "same", "." ]
python
train
47.304348
openpaperwork/paperwork-backend
paperwork_backend/docimport.py
https://github.com/openpaperwork/paperwork-backend/blob/114b831e94e039e68b339751fd18250877abad76/paperwork_backend/docimport.py#L151-L161
def can_import(self, file_uris, current_doc=None): """ Check that the specified file looks like a PDF """ if len(file_uris) <= 0: return False for uri in file_uris: uri = self.fs.safe(uri) if not self.check_file_type(uri): return False return True
[ "def", "can_import", "(", "self", ",", "file_uris", ",", "current_doc", "=", "None", ")", ":", "if", "len", "(", "file_uris", ")", "<=", "0", ":", "return", "False", "for", "uri", "in", "file_uris", ":", "uri", "=", "self", ".", "fs", ".", "safe", "(", "uri", ")", "if", "not", "self", ".", "check_file_type", "(", "uri", ")", ":", "return", "False", "return", "True" ]
Check that the specified file looks like a PDF
[ "Check", "that", "the", "specified", "file", "looks", "like", "a", "PDF" ]
python
train
30.636364
minhhoit/yacms
yacms/pages/admin.py
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/pages/admin.py#L175-L182
def save_form(self, request, form, change): """ Don't show links in the sitemap. """ obj = form.save(commit=False) if not obj.id and "in_sitemap" not in form.fields: obj.in_sitemap = False return super(LinkAdmin, self).save_form(request, form, change)
[ "def", "save_form", "(", "self", ",", "request", ",", "form", ",", "change", ")", ":", "obj", "=", "form", ".", "save", "(", "commit", "=", "False", ")", "if", "not", "obj", ".", "id", "and", "\"in_sitemap\"", "not", "in", "form", ".", "fields", ":", "obj", ".", "in_sitemap", "=", "False", "return", "super", "(", "LinkAdmin", ",", "self", ")", ".", "save_form", "(", "request", ",", "form", ",", "change", ")" ]
Don't show links in the sitemap.
[ "Don", "t", "show", "links", "in", "the", "sitemap", "." ]
python
train
38
bfontaine/term2048
term2048/ui.py
https://github.com/bfontaine/term2048/blob/8b5ce8b65f44f20a7ad36022a34dce56184070af/term2048/ui.py#L30-L41
def parse_cli_args(): """parse args from the CLI and return a dict""" parser = argparse.ArgumentParser(description='2048 in your terminal') parser.add_argument('--mode', dest='mode', type=str, default=None, help='colors mode (dark or light)') parser.add_argument('--az', dest='azmode', action='store_true', help='Use the letters a-z instead of numbers') parser.add_argument('--resume', dest='resume', action='store_true', help='restart the game from where you left') parser.add_argument('-v', '--version', action='store_true') parser.add_argument('-r', '--rules', action='store_true') return vars(parser.parse_args())
[ "def", "parse_cli_args", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'2048 in your terminal'", ")", "parser", ".", "add_argument", "(", "'--mode'", ",", "dest", "=", "'mode'", ",", "type", "=", "str", ",", "default", "=", "None", ",", "help", "=", "'colors mode (dark or light)'", ")", "parser", ".", "add_argument", "(", "'--az'", ",", "dest", "=", "'azmode'", ",", "action", "=", "'store_true'", ",", "help", "=", "'Use the letters a-z instead of numbers'", ")", "parser", ".", "add_argument", "(", "'--resume'", ",", "dest", "=", "'resume'", ",", "action", "=", "'store_true'", ",", "help", "=", "'restart the game from where you left'", ")", "parser", ".", "add_argument", "(", "'-v'", ",", "'--version'", ",", "action", "=", "'store_true'", ")", "parser", ".", "add_argument", "(", "'-r'", ",", "'--rules'", ",", "action", "=", "'store_true'", ")", "return", "vars", "(", "parser", ".", "parse_args", "(", ")", ")" ]
parse args from the CLI and return a dict
[ "parse", "args", "from", "the", "CLI", "and", "return", "a", "dict" ]
python
train
59.166667
apache/airflow
airflow/models/taskinstance.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/models/taskinstance.py#L470-L474
def key(self): """ Returns a tuple that identifies the task instance uniquely """ return self.dag_id, self.task_id, self.execution_date, self.try_number
[ "def", "key", "(", "self", ")", ":", "return", "self", ".", "dag_id", ",", "self", ".", "task_id", ",", "self", ".", "execution_date", ",", "self", ".", "try_number" ]
Returns a tuple that identifies the task instance uniquely
[ "Returns", "a", "tuple", "that", "identifies", "the", "task", "instance", "uniquely" ]
python
test
36
yougov/pmxbot
pmxbot/system.py
https://github.com/yougov/pmxbot/blob/5da84a3258a0fd73cb35b60e39769a5d7bfb2ba7/pmxbot/system.py#L17-L49
def help(rest): """Help (this command)""" rs = rest.strip() if rs: # give help for matching commands for handler in Handler._registry: if handler.name == rs.lower(): yield '!%s: %s' % (handler.name, handler.doc) break else: yield "command not found" return # give help for all commands def mk_entries(): handlers = ( handler for handler in Handler._registry if type(handler) is pmxbot.core.CommandHandler ) handlers = sorted(handlers, key=operator.attrgetter('name')) for handler in handlers: res = "!" + handler.name if handler.aliases: alias_names = (alias.name for alias in handler.aliases) res += " (%s)" % ', '.join(alias_names) yield res o = io.StringIO(" ".join(mk_entries())) more = o.read(160) while more: yield more time.sleep(0.3) more = o.read(160)
[ "def", "help", "(", "rest", ")", ":", "rs", "=", "rest", ".", "strip", "(", ")", "if", "rs", ":", "# give help for matching commands", "for", "handler", "in", "Handler", ".", "_registry", ":", "if", "handler", ".", "name", "==", "rs", ".", "lower", "(", ")", ":", "yield", "'!%s: %s'", "%", "(", "handler", ".", "name", ",", "handler", ".", "doc", ")", "break", "else", ":", "yield", "\"command not found\"", "return", "# give help for all commands", "def", "mk_entries", "(", ")", ":", "handlers", "=", "(", "handler", "for", "handler", "in", "Handler", ".", "_registry", "if", "type", "(", "handler", ")", "is", "pmxbot", ".", "core", ".", "CommandHandler", ")", "handlers", "=", "sorted", "(", "handlers", ",", "key", "=", "operator", ".", "attrgetter", "(", "'name'", ")", ")", "for", "handler", "in", "handlers", ":", "res", "=", "\"!\"", "+", "handler", ".", "name", "if", "handler", ".", "aliases", ":", "alias_names", "=", "(", "alias", ".", "name", "for", "alias", "in", "handler", ".", "aliases", ")", "res", "+=", "\" (%s)\"", "%", "', '", ".", "join", "(", "alias_names", ")", "yield", "res", "o", "=", "io", ".", "StringIO", "(", "\" \"", ".", "join", "(", "mk_entries", "(", ")", ")", ")", "more", "=", "o", ".", "read", "(", "160", ")", "while", "more", ":", "yield", "more", "time", ".", "sleep", "(", "0.3", ")", "more", "=", "o", ".", "read", "(", "160", ")" ]
Help (this command)
[ "Help", "(", "this", "command", ")" ]
python
train
24.212121
PayEx/pypayex
payex/utils.py
https://github.com/PayEx/pypayex/blob/549ba7cc47f112a7aa3417fcf87ff07bc74cd9ab/payex/utils.py#L12-L23
def normalize_value(val): """ Normalize strings with booleans into Python types. """ if val is not None: if val.lower() == 'false': val = False elif val.lower() == 'true': val = True return val
[ "def", "normalize_value", "(", "val", ")", ":", "if", "val", "is", "not", "None", ":", "if", "val", ".", "lower", "(", ")", "==", "'false'", ":", "val", "=", "False", "elif", "val", ".", "lower", "(", ")", "==", "'true'", ":", "val", "=", "True", "return", "val" ]
Normalize strings with booleans into Python types.
[ "Normalize", "strings", "with", "booleans", "into", "Python", "types", "." ]
python
train
21
PySimpleGUI/PySimpleGUI
DemoPrograms/Demo_Multithreaded_Queued.py
https://github.com/PySimpleGUI/PySimpleGUI/blob/08184197f5bd4580ab5e5aca28bdda30f87b86fc/DemoPrograms/Demo_Multithreaded_Queued.py#L68-L100
def the_gui(gui_queue): """ Starts and executes the GUI Reads data from a Queue and displays the data to the window Returns when the user exits / closes the window (that means it does NOT return until the user exits the window) :param gui_queue: Queue the GUI should read from :return: """ layout = [ [sg.Text('Multithreaded Window Example')], [sg.Text('', size=(15,1), key='_OUTPUT_')], [sg.Output(size=(40,6))], [sg.Button('Exit')],] window = sg.Window('Multithreaded Window').Layout(layout) # --------------------- EVENT LOOP --------------------- while True: event, values = window.Read(timeout=100) # wait for up to 100 ms for a GUI event if event is None or event == 'Exit': break #--------------- Loop through all messages coming in from threads --------------- while True: # loop executes until runs out of messages in Queue try: # see if something has been posted to Queue message = gui_queue.get_nowait() except queue.Empty: # get_nowait() will get exception when Queue is empty break # break from the loop if no more messages are queued up # if message received from queue, display the message in the Window if message: window.Element('_OUTPUT_').Update(message) window.Refresh() # do a refresh because could be showing multiple messages before next Read # if user exits the window, then close the window and exit the GUI func window.Close()
[ "def", "the_gui", "(", "gui_queue", ")", ":", "layout", "=", "[", "[", "sg", ".", "Text", "(", "'Multithreaded Window Example'", ")", "]", ",", "[", "sg", ".", "Text", "(", "''", ",", "size", "=", "(", "15", ",", "1", ")", ",", "key", "=", "'_OUTPUT_'", ")", "]", ",", "[", "sg", ".", "Output", "(", "size", "=", "(", "40", ",", "6", ")", ")", "]", ",", "[", "sg", ".", "Button", "(", "'Exit'", ")", "]", ",", "]", "window", "=", "sg", ".", "Window", "(", "'Multithreaded Window'", ")", ".", "Layout", "(", "layout", ")", "# --------------------- EVENT LOOP ---------------------", "while", "True", ":", "event", ",", "values", "=", "window", ".", "Read", "(", "timeout", "=", "100", ")", "# wait for up to 100 ms for a GUI event", "if", "event", "is", "None", "or", "event", "==", "'Exit'", ":", "break", "#--------------- Loop through all messages coming in from threads ---------------", "while", "True", ":", "# loop executes until runs out of messages in Queue", "try", ":", "# see if something has been posted to Queue", "message", "=", "gui_queue", ".", "get_nowait", "(", ")", "except", "queue", ".", "Empty", ":", "# get_nowait() will get exception when Queue is empty", "break", "# break from the loop if no more messages are queued up", "# if message received from queue, display the message in the Window", "if", "message", ":", "window", ".", "Element", "(", "'_OUTPUT_'", ")", ".", "Update", "(", "message", ")", "window", ".", "Refresh", "(", ")", "# do a refresh because could be showing multiple messages before next Read", "# if user exits the window, then close the window and exit the GUI func", "window", ".", "Close", "(", ")" ]
Starts and executes the GUI Reads data from a Queue and displays the data to the window Returns when the user exits / closes the window (that means it does NOT return until the user exits the window) :param gui_queue: Queue the GUI should read from :return:
[ "Starts", "and", "executes", "the", "GUI", "Reads", "data", "from", "a", "Queue", "and", "displays", "the", "data", "to", "the", "window", "Returns", "when", "the", "user", "exits", "/", "closes", "the", "window", "(", "that", "means", "it", "does", "NOT", "return", "until", "the", "user", "exits", "the", "window", ")", ":", "param", "gui_queue", ":", "Queue", "the", "GUI", "should", "read", "from", ":", "return", ":" ]
python
train
49.878788
O365/python-o365
O365/utils/utils.py
https://github.com/O365/python-o365/blob/02a71cf3775cc6a3c042e003365d6a07c8c75a73/O365/utils/utils.py#L259-L270
def get_first_recipient_with_address(self): """ Returns the first recipient found with a non blank address :return: First Recipient :rtype: Recipient """ recipients_with_address = [recipient for recipient in self._recipients if recipient.address] if recipients_with_address: return recipients_with_address[0] else: return None
[ "def", "get_first_recipient_with_address", "(", "self", ")", ":", "recipients_with_address", "=", "[", "recipient", "for", "recipient", "in", "self", ".", "_recipients", "if", "recipient", ".", "address", "]", "if", "recipients_with_address", ":", "return", "recipients_with_address", "[", "0", "]", "else", ":", "return", "None" ]
Returns the first recipient found with a non blank address :return: First Recipient :rtype: Recipient
[ "Returns", "the", "first", "recipient", "found", "with", "a", "non", "blank", "address" ]
python
train
35.916667