repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
websauna/pyramid_notebook
pyramid_notebook/notebookmanager.py
https://github.com/websauna/pyramid_notebook/blob/8a7ecfa0259810de1a818e4b415a62811a7b077a/pyramid_notebook/notebookmanager.py#L128-L161
def start_notebook(self, name, context: dict, fg=False): """Start new IPython Notebook daemon. :param name: The owner of the Notebook will be *name*. He/she gets a new Notebook content folder created where all files are placed. :param context: Extra context information passed to the started Notebook. This must contain {context_hash:int} parameter used to identify the launch parameters for the notebook """ assert context assert type(context) == dict assert "context_hash" in context assert type(context["context_hash"]) == int http_port = self.pick_port() assert http_port context = context.copy() context["http_port"] = http_port # We can't proxy websocket URLs, so let them go directly through localhost or have front end server to do proxying (nginx) if "websocket_url" not in context: context["websocket_url"] = "ws://localhost:{port}".format(port=http_port) if "{port}" in context["websocket_url"]: # Do port substitution for the websocket URL context["websocket_url"] = context["websocket_url"].format(port=http_port) pid = self.get_pid(name) assert "terminated" not in context comm.set_context(pid, context) if fg: self.exec_notebook_daemon_command(name, "fg", port=http_port) else: self.exec_notebook_daemon_command(name, "start", port=http_port)
[ "def", "start_notebook", "(", "self", ",", "name", ",", "context", ":", "dict", ",", "fg", "=", "False", ")", ":", "assert", "context", "assert", "type", "(", "context", ")", "==", "dict", "assert", "\"context_hash\"", "in", "context", "assert", "type", "(", "context", "[", "\"context_hash\"", "]", ")", "==", "int", "http_port", "=", "self", ".", "pick_port", "(", ")", "assert", "http_port", "context", "=", "context", ".", "copy", "(", ")", "context", "[", "\"http_port\"", "]", "=", "http_port", "# We can't proxy websocket URLs, so let them go directly through localhost or have front end server to do proxying (nginx)", "if", "\"websocket_url\"", "not", "in", "context", ":", "context", "[", "\"websocket_url\"", "]", "=", "\"ws://localhost:{port}\"", ".", "format", "(", "port", "=", "http_port", ")", "if", "\"{port}\"", "in", "context", "[", "\"websocket_url\"", "]", ":", "# Do port substitution for the websocket URL", "context", "[", "\"websocket_url\"", "]", "=", "context", "[", "\"websocket_url\"", "]", ".", "format", "(", "port", "=", "http_port", ")", "pid", "=", "self", ".", "get_pid", "(", "name", ")", "assert", "\"terminated\"", "not", "in", "context", "comm", ".", "set_context", "(", "pid", ",", "context", ")", "if", "fg", ":", "self", ".", "exec_notebook_daemon_command", "(", "name", ",", "\"fg\"", ",", "port", "=", "http_port", ")", "else", ":", "self", ".", "exec_notebook_daemon_command", "(", "name", ",", "\"start\"", ",", "port", "=", "http_port", ")" ]
Start new IPython Notebook daemon. :param name: The owner of the Notebook will be *name*. He/she gets a new Notebook content folder created where all files are placed. :param context: Extra context information passed to the started Notebook. This must contain {context_hash:int} parameter used to identify the launch parameters for the notebook
[ "Start", "new", "IPython", "Notebook", "daemon", "." ]
python
train
Kortemme-Lab/klab
klab/benchmarking/analysis/ddg_monomeric_stability_analysis.py
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/benchmarking/analysis/ddg_monomeric_stability_analysis.py#L1759-L1833
def plot_optimum_prediction_fraction_correct_cutoffs_over_range(self, analysis_set, min_stability_classication_x_cutoff, max_stability_classication_x_cutoff, suppress_plot = False, analysis_file_prefix = None, verbose = True): '''Plots the optimum cutoff for the predictions to maximize the fraction correct metric over a range of experimental cutoffs. Returns the average scalar corresponding to the best value of fraction correct over a range of cutoff values for the experimental cutoffs.''' # Filenames analysis_set_prefix = '' #if analysis_set: # analysis_set_prefix = '_{0}'.format(analysis_set) plot_filename = None if not suppress_plot: output_filename_prefix = '{0}{1}optimum_fraction_correct_at_varying_kcal_mol'.format(analysis_file_prefix, analysis_set_prefix) plot_filename = output_filename_prefix + '.png' csv_filename = output_filename_prefix + '.txt' # Create CSV input lines = ['ExperimentalCutoff,BestPredictionCutoff'] x_cutoff = min_stability_classication_x_cutoff x_values = [] y_values = [] avg_scale = 0 plot_graph = self.generate_plots and not(suppress_plot) while x_cutoff < max_stability_classication_x_cutoff + 0.1: max_value_cutoff, max_value, fraction_correct_range = self.determine_optimum_fraction_correct_cutoffs(analysis_set, self.dataframe, x_cutoff) if plot_graph: lines.append(','.join(map(str, (x_cutoff, max_value_cutoff)))) x_values.append(x_cutoff) y_values.append(max_value_cutoff) avg_scale += max_value_cutoff / x_cutoff x_cutoff += 0.1 if plot_graph: write_file(csv_filename, '\n'.join(lines)) # Determine the average scalar needed to fit the plot avg_scale = avg_scale / len(x_values) x_values = numpy.array(x_values) y_values = numpy.array(y_values) scalars = y_values / x_values average_scalar = numpy.mean(scalars) plot_label_1 = 'Scalar == %0.2f' % average_scalar plot_label_2 = 'sigma == %0.2f' % numpy.std(scalars) # Create plot if plot_graph: if not(os.path.exists(plot_filename) and not(self.recreate_graphs)): if verbose: self.log('Saving scatterplot to %s.' % plot_filename) self.log('Saving plot of approximate optimal fraction correct cutoffs over varying experimental cutoffs to %s.' % plot_filename) title = 'Optimum cutoff for fraction correct metric at varying experimental cutoffs' if analysis_set: title += ' for {0}'.format(analysis_set) r_script = '''library(ggplot2) library(gridExtra) library(scales) library(qualV) png('%(plot_filename)s', height=4096, width=4096, bg="white", res=600) plot_data <- read.csv('%(csv_filename)s', header=T) max_y = max(plot_data$BestPredictionCutoff) p <- ggplot(data = plot_data, aes(x = ExperimentalCutoff, y = BestPredictionCutoff)) + xlab("Experimental cutoff (kcal/mol)") + ylab("Optimal prediction cutoff (energy units)") + ggtitle("%(title)s") + geom_point() + geom_line() + geom_smooth() + geom_text(hjust=0, size=4, color="black", aes(0.5, max_y, fontface="plain", family = "sans", label="%(plot_label_1)s"), parse = T) + geom_text(hjust=0, size=4, color="black", aes(0.5, max_y - 0.5, fontface="plain", family = "sans", label="%(plot_label_2)s"), parse = T) p dev.off()''' RInterface._runRScript(r_script % locals()) return average_scalar, plot_filename
[ "def", "plot_optimum_prediction_fraction_correct_cutoffs_over_range", "(", "self", ",", "analysis_set", ",", "min_stability_classication_x_cutoff", ",", "max_stability_classication_x_cutoff", ",", "suppress_plot", "=", "False", ",", "analysis_file_prefix", "=", "None", ",", "verbose", "=", "True", ")", ":", "# Filenames", "analysis_set_prefix", "=", "''", "#if analysis_set:", "# analysis_set_prefix = '_{0}'.format(analysis_set)", "plot_filename", "=", "None", "if", "not", "suppress_plot", ":", "output_filename_prefix", "=", "'{0}{1}optimum_fraction_correct_at_varying_kcal_mol'", ".", "format", "(", "analysis_file_prefix", ",", "analysis_set_prefix", ")", "plot_filename", "=", "output_filename_prefix", "+", "'.png'", "csv_filename", "=", "output_filename_prefix", "+", "'.txt'", "# Create CSV input", "lines", "=", "[", "'ExperimentalCutoff,BestPredictionCutoff'", "]", "x_cutoff", "=", "min_stability_classication_x_cutoff", "x_values", "=", "[", "]", "y_values", "=", "[", "]", "avg_scale", "=", "0", "plot_graph", "=", "self", ".", "generate_plots", "and", "not", "(", "suppress_plot", ")", "while", "x_cutoff", "<", "max_stability_classication_x_cutoff", "+", "0.1", ":", "max_value_cutoff", ",", "max_value", ",", "fraction_correct_range", "=", "self", ".", "determine_optimum_fraction_correct_cutoffs", "(", "analysis_set", ",", "self", ".", "dataframe", ",", "x_cutoff", ")", "if", "plot_graph", ":", "lines", ".", "append", "(", "','", ".", "join", "(", "map", "(", "str", ",", "(", "x_cutoff", ",", "max_value_cutoff", ")", ")", ")", ")", "x_values", ".", "append", "(", "x_cutoff", ")", "y_values", ".", "append", "(", "max_value_cutoff", ")", "avg_scale", "+=", "max_value_cutoff", "/", "x_cutoff", "x_cutoff", "+=", "0.1", "if", "plot_graph", ":", "write_file", "(", "csv_filename", ",", "'\\n'", ".", "join", "(", "lines", ")", ")", "# Determine the average scalar needed to fit the plot", "avg_scale", "=", "avg_scale", "/", "len", "(", "x_values", ")", "x_values", "=", "numpy", ".", "array", "(", "x_values", ")", "y_values", "=", "numpy", ".", "array", "(", "y_values", ")", "scalars", "=", "y_values", "/", "x_values", "average_scalar", "=", "numpy", ".", "mean", "(", "scalars", ")", "plot_label_1", "=", "'Scalar == %0.2f'", "%", "average_scalar", "plot_label_2", "=", "'sigma == %0.2f'", "%", "numpy", ".", "std", "(", "scalars", ")", "# Create plot", "if", "plot_graph", ":", "if", "not", "(", "os", ".", "path", ".", "exists", "(", "plot_filename", ")", "and", "not", "(", "self", ".", "recreate_graphs", ")", ")", ":", "if", "verbose", ":", "self", ".", "log", "(", "'Saving scatterplot to %s.'", "%", "plot_filename", ")", "self", ".", "log", "(", "'Saving plot of approximate optimal fraction correct cutoffs over varying experimental cutoffs to %s.'", "%", "plot_filename", ")", "title", "=", "'Optimum cutoff for fraction correct metric at varying experimental cutoffs'", "if", "analysis_set", ":", "title", "+=", "' for {0}'", ".", "format", "(", "analysis_set", ")", "r_script", "=", "'''library(ggplot2)\nlibrary(gridExtra)\nlibrary(scales)\nlibrary(qualV)\n\npng('%(plot_filename)s', height=4096, width=4096, bg=\"white\", res=600)\nplot_data <- read.csv('%(csv_filename)s', header=T)\n\nmax_y = max(plot_data$BestPredictionCutoff)\np <- ggplot(data = plot_data, aes(x = ExperimentalCutoff, y = BestPredictionCutoff)) +\n xlab(\"Experimental cutoff (kcal/mol)\") +\n ylab(\"Optimal prediction cutoff (energy units)\") +\n ggtitle(\"%(title)s\") +\n geom_point() +\n geom_line() +\n geom_smooth() +\n geom_text(hjust=0, size=4, color=\"black\", aes(0.5, max_y, fontface=\"plain\", family = \"sans\", label=\"%(plot_label_1)s\"), parse = T) +\n geom_text(hjust=0, size=4, color=\"black\", aes(0.5, max_y - 0.5, fontface=\"plain\", family = \"sans\", label=\"%(plot_label_2)s\"), parse = T)\np\ndev.off()'''", "RInterface", ".", "_runRScript", "(", "r_script", "%", "locals", "(", ")", ")", "return", "average_scalar", ",", "plot_filename" ]
Plots the optimum cutoff for the predictions to maximize the fraction correct metric over a range of experimental cutoffs. Returns the average scalar corresponding to the best value of fraction correct over a range of cutoff values for the experimental cutoffs.
[ "Plots", "the", "optimum", "cutoff", "for", "the", "predictions", "to", "maximize", "the", "fraction", "correct", "metric", "over", "a", "range", "of", "experimental", "cutoffs", ".", "Returns", "the", "average", "scalar", "corresponding", "to", "the", "best", "value", "of", "fraction", "correct", "over", "a", "range", "of", "cutoff", "values", "for", "the", "experimental", "cutoffs", "." ]
python
train
swharden/SWHLab
doc/oldcode/swhlab/core/common.py
https://github.com/swharden/SWHLab/blob/a86c3c65323cec809a4bd4f81919644927094bf5/doc/oldcode/swhlab/core/common.py#L113-L123
def dictAvg(listOfDicts,key,stdErr=False): """Given a list (l) of dicts (d), return AV and SD.""" vals=dictVals(listOfDicts,key) if len(vals) and np.any(vals): av=np.nanmean(vals) er=np.nanstd(vals) if stdErr: er=er/np.sqrt(np.count_nonzero(~np.isnan(er))) else: av,er=np.nan,np.nan return av,er
[ "def", "dictAvg", "(", "listOfDicts", ",", "key", ",", "stdErr", "=", "False", ")", ":", "vals", "=", "dictVals", "(", "listOfDicts", ",", "key", ")", "if", "len", "(", "vals", ")", "and", "np", ".", "any", "(", "vals", ")", ":", "av", "=", "np", ".", "nanmean", "(", "vals", ")", "er", "=", "np", ".", "nanstd", "(", "vals", ")", "if", "stdErr", ":", "er", "=", "er", "/", "np", ".", "sqrt", "(", "np", ".", "count_nonzero", "(", "~", "np", ".", "isnan", "(", "er", ")", ")", ")", "else", ":", "av", ",", "er", "=", "np", ".", "nan", ",", "np", ".", "nan", "return", "av", ",", "er" ]
Given a list (l) of dicts (d), return AV and SD.
[ "Given", "a", "list", "(", "l", ")", "of", "dicts", "(", "d", ")", "return", "AV", "and", "SD", "." ]
python
valid
WTRMQDev/lnoise
lnoise/noisetypes.py
https://github.com/WTRMQDev/lnoise/blob/7f8d9faf135025a6aac50131d14a34d1009e8cdd/lnoise/noisetypes.py#L113-L125
def append(self, val): """Append byte string val to buffer If the result exceeds the length of the buffer, behavior depends on whether instance was initialized as strict. In strict mode, a ValueError is raised. In non-strict mode, the buffer is extended as necessary. """ new_len = self.length + len(val) to_add = new_len - len(self.bfr) if self.strict and to_add > 0: raise ValueError("Cannot resize buffer") self.bfr[self.length:new_len] = val self.length = new_len
[ "def", "append", "(", "self", ",", "val", ")", ":", "new_len", "=", "self", ".", "length", "+", "len", "(", "val", ")", "to_add", "=", "new_len", "-", "len", "(", "self", ".", "bfr", ")", "if", "self", ".", "strict", "and", "to_add", ">", "0", ":", "raise", "ValueError", "(", "\"Cannot resize buffer\"", ")", "self", ".", "bfr", "[", "self", ".", "length", ":", "new_len", "]", "=", "val", "self", ".", "length", "=", "new_len" ]
Append byte string val to buffer If the result exceeds the length of the buffer, behavior depends on whether instance was initialized as strict. In strict mode, a ValueError is raised. In non-strict mode, the buffer is extended as necessary.
[ "Append", "byte", "string", "val", "to", "buffer", "If", "the", "result", "exceeds", "the", "length", "of", "the", "buffer", "behavior", "depends", "on", "whether", "instance", "was", "initialized", "as", "strict", ".", "In", "strict", "mode", "a", "ValueError", "is", "raised", ".", "In", "non", "-", "strict", "mode", "the", "buffer", "is", "extended", "as", "necessary", "." ]
python
train
influxdata/influxdb-python
influxdb/influxdb08/client.py
https://github.com/influxdata/influxdb-python/blob/d5d12499f3755199d5eedd8b363450f1cf4073bd/influxdb/influxdb08/client.py#L321-L331
def write_points_with_precision(self, data, time_precision='s'): """Write to multiple time series names. DEPRECATED. """ warnings.warn( "write_points_with_precision is deprecated, and will be removed " "in future versions. Please use " "``InfluxDBClient.write_points(time_precision='..')`` instead.", FutureWarning) return self._write_points(data=data, time_precision=time_precision)
[ "def", "write_points_with_precision", "(", "self", ",", "data", ",", "time_precision", "=", "'s'", ")", ":", "warnings", ".", "warn", "(", "\"write_points_with_precision is deprecated, and will be removed \"", "\"in future versions. Please use \"", "\"``InfluxDBClient.write_points(time_precision='..')`` instead.\"", ",", "FutureWarning", ")", "return", "self", ".", "_write_points", "(", "data", "=", "data", ",", "time_precision", "=", "time_precision", ")" ]
Write to multiple time series names. DEPRECATED.
[ "Write", "to", "multiple", "time", "series", "names", "." ]
python
train
mitsei/dlkit
dlkit/json_/authorization/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/authorization/sessions.py#L3077-L3096
def is_parent_of_vault(self, id_, vault_id): """Tests if an ``Id`` is a direct parent of a vault. arg: id (osid.id.Id): an ``Id`` arg: vault_id (osid.id.Id): the ``Id`` of a vault return: (boolean) - ``true`` if this ``id`` is a parent of ``vault_id,`` ``false`` otherwise raise: NotFound - ``vault_id`` is not found raise: NullArgument - ``id`` or ``vault_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* *implementation notes*: If ``id`` not found return ``false``. """ # Implemented from template for # osid.resource.BinHierarchySession.is_parent_of_bin if self._catalog_session is not None: return self._catalog_session.is_parent_of_catalog(id_=id_, catalog_id=vault_id) return self._hierarchy_session.is_parent(id_=vault_id, parent_id=id_)
[ "def", "is_parent_of_vault", "(", "self", ",", "id_", ",", "vault_id", ")", ":", "# Implemented from template for", "# osid.resource.BinHierarchySession.is_parent_of_bin", "if", "self", ".", "_catalog_session", "is", "not", "None", ":", "return", "self", ".", "_catalog_session", ".", "is_parent_of_catalog", "(", "id_", "=", "id_", ",", "catalog_id", "=", "vault_id", ")", "return", "self", ".", "_hierarchy_session", ".", "is_parent", "(", "id_", "=", "vault_id", ",", "parent_id", "=", "id_", ")" ]
Tests if an ``Id`` is a direct parent of a vault. arg: id (osid.id.Id): an ``Id`` arg: vault_id (osid.id.Id): the ``Id`` of a vault return: (boolean) - ``true`` if this ``id`` is a parent of ``vault_id,`` ``false`` otherwise raise: NotFound - ``vault_id`` is not found raise: NullArgument - ``id`` or ``vault_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* *implementation notes*: If ``id`` not found return ``false``.
[ "Tests", "if", "an", "Id", "is", "a", "direct", "parent", "of", "a", "vault", "." ]
python
train
bennylope/smartystreets.py
smartystreets/client.py
https://github.com/bennylope/smartystreets.py/blob/f45e37dd52ea7cec8ed43ce2b64724beb6dbbb69/smartystreets/client.py#L16-L34
def validate_args(f): """ Ensures that *args consist of a consistent type :param f: any client method with *args parameter :return: function f """ def wrapper(self, args): arg_types = set([type(arg) for arg in args]) if len(arg_types) > 1: raise TypeError("Mixed input types are not allowed") elif list(arg_types)[0] not in (dict, str): raise TypeError("Only dict and str types accepted") return f(self, args) return wrapper
[ "def", "validate_args", "(", "f", ")", ":", "def", "wrapper", "(", "self", ",", "args", ")", ":", "arg_types", "=", "set", "(", "[", "type", "(", "arg", ")", "for", "arg", "in", "args", "]", ")", "if", "len", "(", "arg_types", ")", ">", "1", ":", "raise", "TypeError", "(", "\"Mixed input types are not allowed\"", ")", "elif", "list", "(", "arg_types", ")", "[", "0", "]", "not", "in", "(", "dict", ",", "str", ")", ":", "raise", "TypeError", "(", "\"Only dict and str types accepted\"", ")", "return", "f", "(", "self", ",", "args", ")", "return", "wrapper" ]
Ensures that *args consist of a consistent type :param f: any client method with *args parameter :return: function f
[ "Ensures", "that", "*", "args", "consist", "of", "a", "consistent", "type" ]
python
train
CalebBell/thermo
thermo/viscosity.py
https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/viscosity.py#L1454-L1501
def load_all_methods(self): r'''Method which picks out coefficients for the specified chemical from the various dictionaries and DataFrames storing it. All data is stored as attributes. This method also sets :obj:`Tmin`, :obj:`Tmax`, :obj:`all_methods` and obj:`all_methods_P` as a set of methods for which the data exists for. Called on initialization only. See the source code for the variables at which the coefficients are stored. The coefficients can safely be altered once the class is initialized. This method can be called again to reset the parameters. ''' methods, methods_P = [], [] Tmins, Tmaxs = [], [] if self.CASRN in _VDISaturationDict: methods.append(VDI_TABULAR) Ts, props = VDI_tabular_data(self.CASRN, 'Mu (g)') self.VDI_Tmin = Ts[0] self.VDI_Tmax = Ts[-1] self.tabular_data[VDI_TABULAR] = (Ts, props) Tmins.append(self.VDI_Tmin); Tmaxs.append(self.VDI_Tmax) if has_CoolProp and self.CASRN in coolprop_dict: methods.append(COOLPROP); methods_P.append(COOLPROP) self.CP_f = coolprop_fluids[self.CASRN] Tmins.append(self.CP_f.Tmin); Tmaxs.append(self.CP_f.Tmax) if self.CASRN in Perrys2_312.index: methods.append(DIPPR_PERRY_8E) _, C1, C2, C3, C4, self.Perrys2_312_Tmin, self.Perrys2_312_Tmax = _Perrys2_312_values[Perrys2_312.index.get_loc(self.CASRN)].tolist() self.Perrys2_312_coeffs = [C1, C2, C3, C4] Tmins.append(self.Perrys2_312_Tmin); Tmaxs.append(self.Perrys2_312_Tmax) if self.CASRN in VDI_PPDS_8.index: methods.append(VDI_PPDS) self.VDI_PPDS_coeffs = _VDI_PPDS_8_values[VDI_PPDS_8.index.get_loc(self.CASRN)].tolist()[1:] self.VDI_PPDS_coeffs.reverse() # in format for horner's scheme if all([self.Tc, self.Pc, self.MW]): methods.append(GHARAGHEIZI) methods.append(YOON_THODOS) methods.append(STIEL_THODOS) Tmins.append(0); Tmaxs.append(5E3) # Intelligently set limit # GHARAGHEIZI turns nonsesical at ~15 K, YOON_THODOS fine to 0 K, # same as STIEL_THODOS if all([self.Tc, self.Pc, self.Zc, self.MW]): methods.append(LUCAS_GAS) Tmins.append(0); Tmaxs.append(1E3) self.all_methods = set(methods) self.all_methods_P = set(methods_P) if Tmins and Tmaxs: self.Tmin, self.Tmax = min(Tmins), max(Tmaxs)
[ "def", "load_all_methods", "(", "self", ")", ":", "methods", ",", "methods_P", "=", "[", "]", ",", "[", "]", "Tmins", ",", "Tmaxs", "=", "[", "]", ",", "[", "]", "if", "self", ".", "CASRN", "in", "_VDISaturationDict", ":", "methods", ".", "append", "(", "VDI_TABULAR", ")", "Ts", ",", "props", "=", "VDI_tabular_data", "(", "self", ".", "CASRN", ",", "'Mu (g)'", ")", "self", ".", "VDI_Tmin", "=", "Ts", "[", "0", "]", "self", ".", "VDI_Tmax", "=", "Ts", "[", "-", "1", "]", "self", ".", "tabular_data", "[", "VDI_TABULAR", "]", "=", "(", "Ts", ",", "props", ")", "Tmins", ".", "append", "(", "self", ".", "VDI_Tmin", ")", "Tmaxs", ".", "append", "(", "self", ".", "VDI_Tmax", ")", "if", "has_CoolProp", "and", "self", ".", "CASRN", "in", "coolprop_dict", ":", "methods", ".", "append", "(", "COOLPROP", ")", "methods_P", ".", "append", "(", "COOLPROP", ")", "self", ".", "CP_f", "=", "coolprop_fluids", "[", "self", ".", "CASRN", "]", "Tmins", ".", "append", "(", "self", ".", "CP_f", ".", "Tmin", ")", "Tmaxs", ".", "append", "(", "self", ".", "CP_f", ".", "Tmax", ")", "if", "self", ".", "CASRN", "in", "Perrys2_312", ".", "index", ":", "methods", ".", "append", "(", "DIPPR_PERRY_8E", ")", "_", ",", "C1", ",", "C2", ",", "C3", ",", "C4", ",", "self", ".", "Perrys2_312_Tmin", ",", "self", ".", "Perrys2_312_Tmax", "=", "_Perrys2_312_values", "[", "Perrys2_312", ".", "index", ".", "get_loc", "(", "self", ".", "CASRN", ")", "]", ".", "tolist", "(", ")", "self", ".", "Perrys2_312_coeffs", "=", "[", "C1", ",", "C2", ",", "C3", ",", "C4", "]", "Tmins", ".", "append", "(", "self", ".", "Perrys2_312_Tmin", ")", "Tmaxs", ".", "append", "(", "self", ".", "Perrys2_312_Tmax", ")", "if", "self", ".", "CASRN", "in", "VDI_PPDS_8", ".", "index", ":", "methods", ".", "append", "(", "VDI_PPDS", ")", "self", ".", "VDI_PPDS_coeffs", "=", "_VDI_PPDS_8_values", "[", "VDI_PPDS_8", ".", "index", ".", "get_loc", "(", "self", ".", "CASRN", ")", "]", ".", "tolist", "(", ")", "[", "1", ":", "]", "self", ".", "VDI_PPDS_coeffs", ".", "reverse", "(", ")", "# in format for horner's scheme", "if", "all", "(", "[", "self", ".", "Tc", ",", "self", ".", "Pc", ",", "self", ".", "MW", "]", ")", ":", "methods", ".", "append", "(", "GHARAGHEIZI", ")", "methods", ".", "append", "(", "YOON_THODOS", ")", "methods", ".", "append", "(", "STIEL_THODOS", ")", "Tmins", ".", "append", "(", "0", ")", "Tmaxs", ".", "append", "(", "5E3", ")", "# Intelligently set limit", "# GHARAGHEIZI turns nonsesical at ~15 K, YOON_THODOS fine to 0 K,", "# same as STIEL_THODOS", "if", "all", "(", "[", "self", ".", "Tc", ",", "self", ".", "Pc", ",", "self", ".", "Zc", ",", "self", ".", "MW", "]", ")", ":", "methods", ".", "append", "(", "LUCAS_GAS", ")", "Tmins", ".", "append", "(", "0", ")", "Tmaxs", ".", "append", "(", "1E3", ")", "self", ".", "all_methods", "=", "set", "(", "methods", ")", "self", ".", "all_methods_P", "=", "set", "(", "methods_P", ")", "if", "Tmins", "and", "Tmaxs", ":", "self", ".", "Tmin", ",", "self", ".", "Tmax", "=", "min", "(", "Tmins", ")", ",", "max", "(", "Tmaxs", ")" ]
r'''Method which picks out coefficients for the specified chemical from the various dictionaries and DataFrames storing it. All data is stored as attributes. This method also sets :obj:`Tmin`, :obj:`Tmax`, :obj:`all_methods` and obj:`all_methods_P` as a set of methods for which the data exists for. Called on initialization only. See the source code for the variables at which the coefficients are stored. The coefficients can safely be altered once the class is initialized. This method can be called again to reset the parameters.
[ "r", "Method", "which", "picks", "out", "coefficients", "for", "the", "specified", "chemical", "from", "the", "various", "dictionaries", "and", "DataFrames", "storing", "it", ".", "All", "data", "is", "stored", "as", "attributes", ".", "This", "method", "also", "sets", ":", "obj", ":", "Tmin", ":", "obj", ":", "Tmax", ":", "obj", ":", "all_methods", "and", "obj", ":", "all_methods_P", "as", "a", "set", "of", "methods", "for", "which", "the", "data", "exists", "for", "." ]
python
valid
python-openxml/python-docx
docx/enum/base.py
https://github.com/python-openxml/python-docx/blob/6756f6cd145511d3eb6d1d188beea391b1ddfd53/docx/enum/base.py#L15-L27
def alias(*aliases): """ Decorating a class with @alias('FOO', 'BAR', ..) allows the class to be referenced by each of the names provided as arguments. """ def decorator(cls): # alias must be set in globals from caller's frame caller = sys._getframe(1) globals_dict = caller.f_globals for alias in aliases: globals_dict[alias] = cls return cls return decorator
[ "def", "alias", "(", "*", "aliases", ")", ":", "def", "decorator", "(", "cls", ")", ":", "# alias must be set in globals from caller's frame", "caller", "=", "sys", ".", "_getframe", "(", "1", ")", "globals_dict", "=", "caller", ".", "f_globals", "for", "alias", "in", "aliases", ":", "globals_dict", "[", "alias", "]", "=", "cls", "return", "cls", "return", "decorator" ]
Decorating a class with @alias('FOO', 'BAR', ..) allows the class to be referenced by each of the names provided as arguments.
[ "Decorating", "a", "class", "with" ]
python
train
QuantEcon/QuantEcon.py
quantecon/dle.py
https://github.com/QuantEcon/QuantEcon.py/blob/26a66c552f2a73967d7efb6e1f4b4c4985a12643/quantecon/dle.py#L299-L330
def canonical(self): """ Compute canonical preference representation Uses auxiliary problem of 9.4.2, with the preference shock process reintroduced Calculates pihat, llambdahat and ubhat for the equivalent canonical household technology """ Ac1 = np.hstack((self.deltah, np.zeros((self.nh, self.nz)))) Ac2 = np.hstack((np.zeros((self.nz, self.nh)), self.a22)) Ac = np.vstack((Ac1, Ac2)) Bc = np.vstack((self.thetah, np.zeros((self.nz, self.nc)))) Cc = np.vstack((np.zeros((self.nh, self.nw)), self.c2)) Rc1 = np.hstack((self.llambda.T.dot(self.llambda), - self.llambda.T.dot(self.ub))) Rc2 = np.hstack((-self.ub.T.dot(self.llambda), self.ub.T.dot(self.ub))) Rc = np.vstack((Rc1, Rc2)) Qc = self.pih.T.dot(self.pih) Nc = np.hstack( (self.pih.T.dot(self.llambda), -self.pih.T.dot(self.ub))) lq_aux = LQ(Qc, Rc, Ac, Bc, N=Nc, beta=self.beta) P1, F1, d1 = lq_aux.stationary_values() self.F_b = F1[:, 0:self.nh] self.F_f = F1[:, self.nh:] self.pihat = np.linalg.cholesky(self.pih.T.dot( self.pih) + self.beta.dot(self.thetah.T).dot(P1[0:self.nh, 0:self.nh]).dot(self.thetah)).T self.llambdahat = self.pihat.dot(self.F_b) self.ubhat = - self.pihat.dot(self.F_f) return
[ "def", "canonical", "(", "self", ")", ":", "Ac1", "=", "np", ".", "hstack", "(", "(", "self", ".", "deltah", ",", "np", ".", "zeros", "(", "(", "self", ".", "nh", ",", "self", ".", "nz", ")", ")", ")", ")", "Ac2", "=", "np", ".", "hstack", "(", "(", "np", ".", "zeros", "(", "(", "self", ".", "nz", ",", "self", ".", "nh", ")", ")", ",", "self", ".", "a22", ")", ")", "Ac", "=", "np", ".", "vstack", "(", "(", "Ac1", ",", "Ac2", ")", ")", "Bc", "=", "np", ".", "vstack", "(", "(", "self", ".", "thetah", ",", "np", ".", "zeros", "(", "(", "self", ".", "nz", ",", "self", ".", "nc", ")", ")", ")", ")", "Cc", "=", "np", ".", "vstack", "(", "(", "np", ".", "zeros", "(", "(", "self", ".", "nh", ",", "self", ".", "nw", ")", ")", ",", "self", ".", "c2", ")", ")", "Rc1", "=", "np", ".", "hstack", "(", "(", "self", ".", "llambda", ".", "T", ".", "dot", "(", "self", ".", "llambda", ")", ",", "-", "self", ".", "llambda", ".", "T", ".", "dot", "(", "self", ".", "ub", ")", ")", ")", "Rc2", "=", "np", ".", "hstack", "(", "(", "-", "self", ".", "ub", ".", "T", ".", "dot", "(", "self", ".", "llambda", ")", ",", "self", ".", "ub", ".", "T", ".", "dot", "(", "self", ".", "ub", ")", ")", ")", "Rc", "=", "np", ".", "vstack", "(", "(", "Rc1", ",", "Rc2", ")", ")", "Qc", "=", "self", ".", "pih", ".", "T", ".", "dot", "(", "self", ".", "pih", ")", "Nc", "=", "np", ".", "hstack", "(", "(", "self", ".", "pih", ".", "T", ".", "dot", "(", "self", ".", "llambda", ")", ",", "-", "self", ".", "pih", ".", "T", ".", "dot", "(", "self", ".", "ub", ")", ")", ")", "lq_aux", "=", "LQ", "(", "Qc", ",", "Rc", ",", "Ac", ",", "Bc", ",", "N", "=", "Nc", ",", "beta", "=", "self", ".", "beta", ")", "P1", ",", "F1", ",", "d1", "=", "lq_aux", ".", "stationary_values", "(", ")", "self", ".", "F_b", "=", "F1", "[", ":", ",", "0", ":", "self", ".", "nh", "]", "self", ".", "F_f", "=", "F1", "[", ":", ",", "self", ".", "nh", ":", "]", "self", ".", "pihat", "=", "np", ".", "linalg", ".", "cholesky", "(", "self", ".", "pih", ".", "T", ".", "dot", "(", "self", ".", "pih", ")", "+", "self", ".", "beta", ".", "dot", "(", "self", ".", "thetah", ".", "T", ")", ".", "dot", "(", "P1", "[", "0", ":", "self", ".", "nh", ",", "0", ":", "self", ".", "nh", "]", ")", ".", "dot", "(", "self", ".", "thetah", ")", ")", ".", "T", "self", ".", "llambdahat", "=", "self", ".", "pihat", ".", "dot", "(", "self", ".", "F_b", ")", "self", ".", "ubhat", "=", "-", "self", ".", "pihat", ".", "dot", "(", "self", ".", "F_f", ")", "return" ]
Compute canonical preference representation Uses auxiliary problem of 9.4.2, with the preference shock process reintroduced Calculates pihat, llambdahat and ubhat for the equivalent canonical household technology
[ "Compute", "canonical", "preference", "representation", "Uses", "auxiliary", "problem", "of", "9", ".", "4", ".", "2", "with", "the", "preference", "shock", "process", "reintroduced", "Calculates", "pihat", "llambdahat", "and", "ubhat", "for", "the", "equivalent", "canonical", "household", "technology" ]
python
train
mgoral/subconvert
src/subconvert/utils/SubFile.py
https://github.com/mgoral/subconvert/blob/59701e5e69ef1ca26ce7d1d766c936664aa2cb32/src/subconvert/utils/SubFile.py#L189-L194
def detectFps(self, movieFile = None, default = 23.976): """Fetch movie FPS from MPlayer output or return given default.""" if movieFile is None: movieFile = self._searchForMovieFile() return File.detectFpsFromMovie(movieFile, default)
[ "def", "detectFps", "(", "self", ",", "movieFile", "=", "None", ",", "default", "=", "23.976", ")", ":", "if", "movieFile", "is", "None", ":", "movieFile", "=", "self", ".", "_searchForMovieFile", "(", ")", "return", "File", ".", "detectFpsFromMovie", "(", "movieFile", ",", "default", ")" ]
Fetch movie FPS from MPlayer output or return given default.
[ "Fetch", "movie", "FPS", "from", "MPlayer", "output", "or", "return", "given", "default", "." ]
python
train
econ-ark/HARK
HARK/ConsumptionSaving/ConsAggShockModel.py
https://github.com/econ-ark/HARK/blob/3d184153a189e618a87c9540df1cd12044039cc5/HARK/ConsumptionSaving/ConsAggShockModel.py#L937-L967
def update(self): ''' Use primitive parameters (and perfect foresight calibrations) to make interest factor and wage rate functions (of capital to labor ratio), as well as discrete approximations to the aggregate shock distributions. Parameters ---------- None Returns ------- None ''' self.kSS = ((self.getPermGroFacAggLR()**(self.CRRA)/self.DiscFac - (1.0-self.DeprFac))/self.CapShare)**(1.0/(self.CapShare-1.0)) self.KtoYSS = self.kSS**(1.0-self.CapShare) self.wRteSS = (1.0-self.CapShare)*self.kSS**(self.CapShare) self.RfreeSS = (1.0 + self.CapShare*self.kSS**(self.CapShare-1.0) - self.DeprFac) self.MSS = self.kSS*self.RfreeSS + self.wRteSS self.convertKtoY = lambda KtoY : KtoY**(1.0/(1.0 - self.CapShare)) # converts K/Y to K/L self.Rfunc = lambda k : (1.0 + self.CapShare*k**(self.CapShare-1.0) - self.DeprFac) self.wFunc = lambda k : ((1.0-self.CapShare)*k**(self.CapShare)) self.KtoLnow_init = self.kSS self.MaggNow_init = self.kSS self.AaggNow_init = self.kSS self.RfreeNow_init = self.Rfunc(self.kSS) self.wRteNow_init = self.wFunc(self.kSS) self.PermShkAggNow_init = 1.0 self.TranShkAggNow_init = 1.0 self.makeAggShkDstn() self.AFunc = AggregateSavingRule(self.intercept_prev,self.slope_prev)
[ "def", "update", "(", "self", ")", ":", "self", ".", "kSS", "=", "(", "(", "self", ".", "getPermGroFacAggLR", "(", ")", "**", "(", "self", ".", "CRRA", ")", "/", "self", ".", "DiscFac", "-", "(", "1.0", "-", "self", ".", "DeprFac", ")", ")", "/", "self", ".", "CapShare", ")", "**", "(", "1.0", "/", "(", "self", ".", "CapShare", "-", "1.0", ")", ")", "self", ".", "KtoYSS", "=", "self", ".", "kSS", "**", "(", "1.0", "-", "self", ".", "CapShare", ")", "self", ".", "wRteSS", "=", "(", "1.0", "-", "self", ".", "CapShare", ")", "*", "self", ".", "kSS", "**", "(", "self", ".", "CapShare", ")", "self", ".", "RfreeSS", "=", "(", "1.0", "+", "self", ".", "CapShare", "*", "self", ".", "kSS", "**", "(", "self", ".", "CapShare", "-", "1.0", ")", "-", "self", ".", "DeprFac", ")", "self", ".", "MSS", "=", "self", ".", "kSS", "*", "self", ".", "RfreeSS", "+", "self", ".", "wRteSS", "self", ".", "convertKtoY", "=", "lambda", "KtoY", ":", "KtoY", "**", "(", "1.0", "/", "(", "1.0", "-", "self", ".", "CapShare", ")", ")", "# converts K/Y to K/L", "self", ".", "Rfunc", "=", "lambda", "k", ":", "(", "1.0", "+", "self", ".", "CapShare", "*", "k", "**", "(", "self", ".", "CapShare", "-", "1.0", ")", "-", "self", ".", "DeprFac", ")", "self", ".", "wFunc", "=", "lambda", "k", ":", "(", "(", "1.0", "-", "self", ".", "CapShare", ")", "*", "k", "**", "(", "self", ".", "CapShare", ")", ")", "self", ".", "KtoLnow_init", "=", "self", ".", "kSS", "self", ".", "MaggNow_init", "=", "self", ".", "kSS", "self", ".", "AaggNow_init", "=", "self", ".", "kSS", "self", ".", "RfreeNow_init", "=", "self", ".", "Rfunc", "(", "self", ".", "kSS", ")", "self", ".", "wRteNow_init", "=", "self", ".", "wFunc", "(", "self", ".", "kSS", ")", "self", ".", "PermShkAggNow_init", "=", "1.0", "self", ".", "TranShkAggNow_init", "=", "1.0", "self", ".", "makeAggShkDstn", "(", ")", "self", ".", "AFunc", "=", "AggregateSavingRule", "(", "self", ".", "intercept_prev", ",", "self", ".", "slope_prev", ")" ]
Use primitive parameters (and perfect foresight calibrations) to make interest factor and wage rate functions (of capital to labor ratio), as well as discrete approximations to the aggregate shock distributions. Parameters ---------- None Returns ------- None
[ "Use", "primitive", "parameters", "(", "and", "perfect", "foresight", "calibrations", ")", "to", "make", "interest", "factor", "and", "wage", "rate", "functions", "(", "of", "capital", "to", "labor", "ratio", ")", "as", "well", "as", "discrete", "approximations", "to", "the", "aggregate", "shock", "distributions", "." ]
python
train
manahl/arctic
arctic/store/version_store.py
https://github.com/manahl/arctic/blob/57e110b6e182dbab00e7e214dc26f7d9ec47c120/arctic/store/version_store.py#L338-L383
def read(self, symbol, as_of=None, date_range=None, from_version=None, allow_secondary=None, **kwargs): """ Read data for the named symbol. Returns a VersionedItem object with a data and metdata element (as passed into write). Parameters ---------- symbol : `str` symbol name for the item as_of : `str` or `int` or `datetime.datetime` Return the data as it was as_of the point in time. `int` : specific version number `str` : snapshot name which contains the version `datetime.datetime` : the version of the data that existed as_of the requested point in time date_range: `arctic.date.DateRange` DateRange to read data for. Applies to Pandas data, with a DateTime index returns only the part of the data that falls in the DateRange. allow_secondary : `bool` or `None` Override the default behavior for allowing reads from secondary members of a cluster: `None` : use the settings from the top-level `Arctic` object used to query this version store. `True` : allow reads from secondary members `False` : only allow reads from primary members Returns ------- VersionedItem namedtuple which contains a .data and .metadata element """ try: read_preference = self._read_preference(allow_secondary) _version = self._read_metadata(symbol, as_of=as_of, read_preference=read_preference) return self._do_read(symbol, _version, from_version, date_range=date_range, read_preference=read_preference, **kwargs) except (OperationFailure, AutoReconnect) as e: # Log the exception so we know how often this is happening log_exception('read', e, 1) # If we've failed to read from the secondary, then it's possible the # secondary has lagged. In this case direct the query to the primary. _version = mongo_retry(self._read_metadata)(symbol, as_of=as_of, read_preference=ReadPreference.PRIMARY) return self._do_read_retry(symbol, _version, from_version, date_range=date_range, read_preference=ReadPreference.PRIMARY, **kwargs) except Exception as e: log_exception('read', e, 1) raise
[ "def", "read", "(", "self", ",", "symbol", ",", "as_of", "=", "None", ",", "date_range", "=", "None", ",", "from_version", "=", "None", ",", "allow_secondary", "=", "None", ",", "*", "*", "kwargs", ")", ":", "try", ":", "read_preference", "=", "self", ".", "_read_preference", "(", "allow_secondary", ")", "_version", "=", "self", ".", "_read_metadata", "(", "symbol", ",", "as_of", "=", "as_of", ",", "read_preference", "=", "read_preference", ")", "return", "self", ".", "_do_read", "(", "symbol", ",", "_version", ",", "from_version", ",", "date_range", "=", "date_range", ",", "read_preference", "=", "read_preference", ",", "*", "*", "kwargs", ")", "except", "(", "OperationFailure", ",", "AutoReconnect", ")", "as", "e", ":", "# Log the exception so we know how often this is happening", "log_exception", "(", "'read'", ",", "e", ",", "1", ")", "# If we've failed to read from the secondary, then it's possible the", "# secondary has lagged. In this case direct the query to the primary.", "_version", "=", "mongo_retry", "(", "self", ".", "_read_metadata", ")", "(", "symbol", ",", "as_of", "=", "as_of", ",", "read_preference", "=", "ReadPreference", ".", "PRIMARY", ")", "return", "self", ".", "_do_read_retry", "(", "symbol", ",", "_version", ",", "from_version", ",", "date_range", "=", "date_range", ",", "read_preference", "=", "ReadPreference", ".", "PRIMARY", ",", "*", "*", "kwargs", ")", "except", "Exception", "as", "e", ":", "log_exception", "(", "'read'", ",", "e", ",", "1", ")", "raise" ]
Read data for the named symbol. Returns a VersionedItem object with a data and metdata element (as passed into write). Parameters ---------- symbol : `str` symbol name for the item as_of : `str` or `int` or `datetime.datetime` Return the data as it was as_of the point in time. `int` : specific version number `str` : snapshot name which contains the version `datetime.datetime` : the version of the data that existed as_of the requested point in time date_range: `arctic.date.DateRange` DateRange to read data for. Applies to Pandas data, with a DateTime index returns only the part of the data that falls in the DateRange. allow_secondary : `bool` or `None` Override the default behavior for allowing reads from secondary members of a cluster: `None` : use the settings from the top-level `Arctic` object used to query this version store. `True` : allow reads from secondary members `False` : only allow reads from primary members Returns ------- VersionedItem namedtuple which contains a .data and .metadata element
[ "Read", "data", "for", "the", "named", "symbol", ".", "Returns", "a", "VersionedItem", "object", "with", "a", "data", "and", "metdata", "element", "(", "as", "passed", "into", "write", ")", "." ]
python
train
pulumi/pulumi
sdk/python/lib/pulumi/resource.py
https://github.com/pulumi/pulumi/blob/95d51efe6ab9a533838b6d83aa240b5f912e72aa/sdk/python/lib/pulumi/resource.py#L321-L330
def export(name: str, value: Any): """ Exports a named stack output. :param str name: The name to assign to this output. :param Any value: The value of this output. """ stack = get_root_resource() if stack is not None: stack.output(name, value)
[ "def", "export", "(", "name", ":", "str", ",", "value", ":", "Any", ")", ":", "stack", "=", "get_root_resource", "(", ")", "if", "stack", "is", "not", "None", ":", "stack", ".", "output", "(", "name", ",", "value", ")" ]
Exports a named stack output. :param str name: The name to assign to this output. :param Any value: The value of this output.
[ "Exports", "a", "named", "stack", "output", "." ]
python
train
nickpandolfi/Cyther
cyther/configuration.py
https://github.com/nickpandolfi/Cyther/blob/9fb0bd77af594008aa6ee8af460aa8c953abf5bc/cyther/configuration.py#L333-L353
def generate_configurations(*, guided=False, fresh_start=False, save=False): """ If a config file is found in the standard locations, it will be loaded and the config data would be retuned. If not found, then generate the data on the fly, and return it """ if fresh_start: purge_configs() loaded_status, loaded_data = get_config() if loaded_status != CONFIG_VALID: if save: make_config_file(guided=guided) status, config_data = get_config() else: config_data = make_config_data(guided=guided) else: config_data = loaded_data return config_data
[ "def", "generate_configurations", "(", "*", ",", "guided", "=", "False", ",", "fresh_start", "=", "False", ",", "save", "=", "False", ")", ":", "if", "fresh_start", ":", "purge_configs", "(", ")", "loaded_status", ",", "loaded_data", "=", "get_config", "(", ")", "if", "loaded_status", "!=", "CONFIG_VALID", ":", "if", "save", ":", "make_config_file", "(", "guided", "=", "guided", ")", "status", ",", "config_data", "=", "get_config", "(", ")", "else", ":", "config_data", "=", "make_config_data", "(", "guided", "=", "guided", ")", "else", ":", "config_data", "=", "loaded_data", "return", "config_data" ]
If a config file is found in the standard locations, it will be loaded and the config data would be retuned. If not found, then generate the data on the fly, and return it
[ "If", "a", "config", "file", "is", "found", "in", "the", "standard", "locations", "it", "will", "be", "loaded", "and", "the", "config", "data", "would", "be", "retuned", ".", "If", "not", "found", "then", "generate", "the", "data", "on", "the", "fly", "and", "return", "it" ]
python
train
linkedin/pyexchange
pyexchange/exchange2010/__init__.py
https://github.com/linkedin/pyexchange/blob/d568f4edd326adb451b915ddf66cf1a37820e3ca/pyexchange/exchange2010/__init__.py#L154-L174
def load_all_details(self): """ This function will execute all the event lookups for known events. This is intended for use when you want to have a completely populated event entry, including Organizer & Attendee details. """ log.debug(u"Loading all details") if self.count > 0: # Now, empty out the events to prevent duplicates! del(self.events[:]) # Send the SOAP request with the list of exchange ID values. log.debug(u"Requesting all event details for events: {event_list}".format(event_list=str(self.event_ids))) body = soap_request.get_item(exchange_id=self.event_ids, format=u'AllProperties') response_xml = self.service.send(body) # Re-parse the results for all the details! self._parse_response_for_all_events(response_xml) return self
[ "def", "load_all_details", "(", "self", ")", ":", "log", ".", "debug", "(", "u\"Loading all details\"", ")", "if", "self", ".", "count", ">", "0", ":", "# Now, empty out the events to prevent duplicates!", "del", "(", "self", ".", "events", "[", ":", "]", ")", "# Send the SOAP request with the list of exchange ID values.", "log", ".", "debug", "(", "u\"Requesting all event details for events: {event_list}\"", ".", "format", "(", "event_list", "=", "str", "(", "self", ".", "event_ids", ")", ")", ")", "body", "=", "soap_request", ".", "get_item", "(", "exchange_id", "=", "self", ".", "event_ids", ",", "format", "=", "u'AllProperties'", ")", "response_xml", "=", "self", ".", "service", ".", "send", "(", "body", ")", "# Re-parse the results for all the details!", "self", ".", "_parse_response_for_all_events", "(", "response_xml", ")", "return", "self" ]
This function will execute all the event lookups for known events. This is intended for use when you want to have a completely populated event entry, including Organizer & Attendee details.
[ "This", "function", "will", "execute", "all", "the", "event", "lookups", "for", "known", "events", "." ]
python
train
tradenity/python-sdk
tradenity/resources/tax_rate.py
https://github.com/tradenity/python-sdk/blob/d13fbe23f4d6ff22554c6d8d2deaf209371adaf1/tradenity/resources/tax_rate.py#L339-L353
def based_on(self, based_on): """Sets the based_on of this TaxRate. :param based_on: The based_on of this TaxRate. :type: str """ allowed_values = ["shippingAddress", "billingAddress"] if based_on is not None and based_on not in allowed_values: raise ValueError( "Invalid value for `based_on` ({0}), must be one of {1}" .format(based_on, allowed_values) ) self._based_on = based_on
[ "def", "based_on", "(", "self", ",", "based_on", ")", ":", "allowed_values", "=", "[", "\"shippingAddress\"", ",", "\"billingAddress\"", "]", "if", "based_on", "is", "not", "None", "and", "based_on", "not", "in", "allowed_values", ":", "raise", "ValueError", "(", "\"Invalid value for `based_on` ({0}), must be one of {1}\"", ".", "format", "(", "based_on", ",", "allowed_values", ")", ")", "self", ".", "_based_on", "=", "based_on" ]
Sets the based_on of this TaxRate. :param based_on: The based_on of this TaxRate. :type: str
[ "Sets", "the", "based_on", "of", "this", "TaxRate", "." ]
python
train
opinkerfi/nago
nago/settings/__init__.py
https://github.com/opinkerfi/nago/blob/85e1bdd1de0122f56868a483e7599e1b36a439b0/nago/settings/__init__.py#L96-L108
def generate_configfile(cfg_file,defaults=defaults): """ Write a new nago.ini config file from the defaults. Arguments: cfg_file -- File that is written to like /etc/nago/nago.ini defaults -- Dictionary with default values to use """ # Create a directory if needed and write an empty file _mkdir_for_config(cfg_file=cfg_file) with open(cfg_file, 'w') as f: f.write('') for section in defaults.keys(): set_option(section, cfg_file=cfg_file, **defaults[section])
[ "def", "generate_configfile", "(", "cfg_file", ",", "defaults", "=", "defaults", ")", ":", "# Create a directory if needed and write an empty file", "_mkdir_for_config", "(", "cfg_file", "=", "cfg_file", ")", "with", "open", "(", "cfg_file", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "''", ")", "for", "section", "in", "defaults", ".", "keys", "(", ")", ":", "set_option", "(", "section", ",", "cfg_file", "=", "cfg_file", ",", "*", "*", "defaults", "[", "section", "]", ")" ]
Write a new nago.ini config file from the defaults. Arguments: cfg_file -- File that is written to like /etc/nago/nago.ini defaults -- Dictionary with default values to use
[ "Write", "a", "new", "nago", ".", "ini", "config", "file", "from", "the", "defaults", "." ]
python
train
gwastro/pycbc
pycbc/inference/sampler/base_mcmc.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/inference/sampler/base_mcmc.py#L241-L249
def niterations(self): """The current number of iterations.""" itercounter = self._itercounter if itercounter is None: itercounter = 0 lastclear = self._lastclear if lastclear is None: lastclear = 0 return itercounter + lastclear
[ "def", "niterations", "(", "self", ")", ":", "itercounter", "=", "self", ".", "_itercounter", "if", "itercounter", "is", "None", ":", "itercounter", "=", "0", "lastclear", "=", "self", ".", "_lastclear", "if", "lastclear", "is", "None", ":", "lastclear", "=", "0", "return", "itercounter", "+", "lastclear" ]
The current number of iterations.
[ "The", "current", "number", "of", "iterations", "." ]
python
train
joeblackwaslike/pricing
pricing/babel_numbers.py
https://github.com/joeblackwaslike/pricing/blob/be988b0851b4313af81f1db475bc33248700e39c/pricing/babel_numbers.py#L185-L259
def apply( self, value, locale, currency=None, currency_digits=True, decimal_quantization=True): """Renders into a string a number following the defined pattern. Forced decimal quantization is active by default so we'll produce a number string that is strictly following CLDR pattern definitions. """ if not isinstance(value, decimal.Decimal): value = decimal.Decimal(str(value)) value = value.scaleb(self.scale) # Separate the absolute value from its sign. is_negative = int(value.is_signed()) value = abs(value).normalize() # Prepare scientific notation metadata. if self.exp_prec: value, exp, exp_sign = self.scientific_notation_elements( value, locale) # Adjust the precision of the fractionnal part and force it to the # currency's if neccessary. frac_prec = self.frac_prec if currency and currency_digits: frac_prec = (babel.numbers.get_currency_precision(currency), ) * 2 # Bump decimal precision to the natural precision of the number if it # exceeds the one we're about to use. This adaptative precision is only # triggered if the decimal quantization is disabled or if a scientific # notation pattern has a missing mandatory fractional part (as in the # default '#E0' pattern). This special case has been extensively # discussed at # https://github.com/python-babel/babel/pull/494#issuecomment-307649969 if not decimal_quantization or (self.exp_prec and frac_prec == (0, 0)): frac_prec = (frac_prec[0], max([frac_prec[1], get_decimal_precision(value)])) # Render scientific notation. if self.exp_prec: number = ''.join([ self._quantize_value(value, locale, frac_prec), babel.numbers.get_exponential_symbol(locale), exp_sign, self._format_int( str(exp), self.exp_prec[0], self.exp_prec[1], locale)]) # Is it a siginificant digits pattern? elif '@' in self.pattern: text = self._format_significant(value, self.int_prec[0], self.int_prec[1]) a, sep, b = text.partition(".") number = self._format_int(a, 0, 1000, locale) if sep: number += babel.numbers.get_decimal_symbol(locale) + b # A normal number pattern. else: number = self._quantize_value(value, locale, frac_prec) retval = ''.join([ self.prefix[is_negative], number, self.suffix[is_negative]]) if u'¤' in retval: retval = retval.replace(u'¤¤¤', babel.numbers.get_currency_name( currency, value, locale)) retval = retval.replace(u'¤¤', currency.upper()) retval = retval.replace(u'¤', babel.numbers.get_currency_symbol( currency, locale)) return retval
[ "def", "apply", "(", "self", ",", "value", ",", "locale", ",", "currency", "=", "None", ",", "currency_digits", "=", "True", ",", "decimal_quantization", "=", "True", ")", ":", "if", "not", "isinstance", "(", "value", ",", "decimal", ".", "Decimal", ")", ":", "value", "=", "decimal", ".", "Decimal", "(", "str", "(", "value", ")", ")", "value", "=", "value", ".", "scaleb", "(", "self", ".", "scale", ")", "# Separate the absolute value from its sign.", "is_negative", "=", "int", "(", "value", ".", "is_signed", "(", ")", ")", "value", "=", "abs", "(", "value", ")", ".", "normalize", "(", ")", "# Prepare scientific notation metadata.", "if", "self", ".", "exp_prec", ":", "value", ",", "exp", ",", "exp_sign", "=", "self", ".", "scientific_notation_elements", "(", "value", ",", "locale", ")", "# Adjust the precision of the fractionnal part and force it to the", "# currency's if neccessary.", "frac_prec", "=", "self", ".", "frac_prec", "if", "currency", "and", "currency_digits", ":", "frac_prec", "=", "(", "babel", ".", "numbers", ".", "get_currency_precision", "(", "currency", ")", ",", ")", "*", "2", "# Bump decimal precision to the natural precision of the number if it", "# exceeds the one we're about to use. This adaptative precision is only", "# triggered if the decimal quantization is disabled or if a scientific", "# notation pattern has a missing mandatory fractional part (as in the", "# default '#E0' pattern). This special case has been extensively", "# discussed at", "# https://github.com/python-babel/babel/pull/494#issuecomment-307649969", "if", "not", "decimal_quantization", "or", "(", "self", ".", "exp_prec", "and", "frac_prec", "==", "(", "0", ",", "0", ")", ")", ":", "frac_prec", "=", "(", "frac_prec", "[", "0", "]", ",", "max", "(", "[", "frac_prec", "[", "1", "]", ",", "get_decimal_precision", "(", "value", ")", "]", ")", ")", "# Render scientific notation.", "if", "self", ".", "exp_prec", ":", "number", "=", "''", ".", "join", "(", "[", "self", ".", "_quantize_value", "(", "value", ",", "locale", ",", "frac_prec", ")", ",", "babel", ".", "numbers", ".", "get_exponential_symbol", "(", "locale", ")", ",", "exp_sign", ",", "self", ".", "_format_int", "(", "str", "(", "exp", ")", ",", "self", ".", "exp_prec", "[", "0", "]", ",", "self", ".", "exp_prec", "[", "1", "]", ",", "locale", ")", "]", ")", "# Is it a siginificant digits pattern?", "elif", "'@'", "in", "self", ".", "pattern", ":", "text", "=", "self", ".", "_format_significant", "(", "value", ",", "self", ".", "int_prec", "[", "0", "]", ",", "self", ".", "int_prec", "[", "1", "]", ")", "a", ",", "sep", ",", "b", "=", "text", ".", "partition", "(", "\".\"", ")", "number", "=", "self", ".", "_format_int", "(", "a", ",", "0", ",", "1000", ",", "locale", ")", "if", "sep", ":", "number", "+=", "babel", ".", "numbers", ".", "get_decimal_symbol", "(", "locale", ")", "+", "b", "# A normal number pattern.", "else", ":", "number", "=", "self", ".", "_quantize_value", "(", "value", ",", "locale", ",", "frac_prec", ")", "retval", "=", "''", ".", "join", "(", "[", "self", ".", "prefix", "[", "is_negative", "]", ",", "number", ",", "self", ".", "suffix", "[", "is_negative", "]", "]", ")", "if", "u'¤' ", "n ", "etval:", "", "retval", "=", "retval", ".", "replace", "(", "u'¤¤¤',", "", "babel", ".", "numbers", ".", "get_currency_name", "(", "currency", ",", "value", ",", "locale", ")", ")", "retval", "=", "retval", ".", "replace", "(", "u'¤¤', ", "c", "rrency.u", "p", "per()", ")", "", "", "retval", "=", "retval", ".", "replace", "(", "u'¤',", " ", "abel.", "n", "umbers.", "g", "et_currency_symbol(", "", "currency", ",", "locale", ")", ")", "return", "retval" ]
Renders into a string a number following the defined pattern. Forced decimal quantization is active by default so we'll produce a number string that is strictly following CLDR pattern definitions.
[ "Renders", "into", "a", "string", "a", "number", "following", "the", "defined", "pattern", ".", "Forced", "decimal", "quantization", "is", "active", "by", "default", "so", "we", "ll", "produce", "a", "number", "string", "that", "is", "strictly", "following", "CLDR", "pattern", "definitions", "." ]
python
test
numenta/nupic
src/nupic/algorithms/sdr_classifier.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/sdr_classifier.py#L478-L500
def _calculateError(self, recordNum, bucketIdxList): """ Calculate error signal :param bucketIdxList: list of encoder buckets :return: dict containing error. The key is the number of steps The value is a numpy array of error at the output layer """ error = dict() targetDist = numpy.zeros(self._maxBucketIdx + 1) numCategories = len(bucketIdxList) for bucketIdx in bucketIdxList: targetDist[bucketIdx] = 1.0/numCategories for (learnRecordNum, learnPatternNZ) in self._patternNZHistory: nSteps = recordNum - learnRecordNum if nSteps in self.steps: predictDist = self.inferSingleStep(learnPatternNZ, self._weightMatrix[nSteps]) error[nSteps] = targetDist - predictDist return error
[ "def", "_calculateError", "(", "self", ",", "recordNum", ",", "bucketIdxList", ")", ":", "error", "=", "dict", "(", ")", "targetDist", "=", "numpy", ".", "zeros", "(", "self", ".", "_maxBucketIdx", "+", "1", ")", "numCategories", "=", "len", "(", "bucketIdxList", ")", "for", "bucketIdx", "in", "bucketIdxList", ":", "targetDist", "[", "bucketIdx", "]", "=", "1.0", "/", "numCategories", "for", "(", "learnRecordNum", ",", "learnPatternNZ", ")", "in", "self", ".", "_patternNZHistory", ":", "nSteps", "=", "recordNum", "-", "learnRecordNum", "if", "nSteps", "in", "self", ".", "steps", ":", "predictDist", "=", "self", ".", "inferSingleStep", "(", "learnPatternNZ", ",", "self", ".", "_weightMatrix", "[", "nSteps", "]", ")", "error", "[", "nSteps", "]", "=", "targetDist", "-", "predictDist", "return", "error" ]
Calculate error signal :param bucketIdxList: list of encoder buckets :return: dict containing error. The key is the number of steps The value is a numpy array of error at the output layer
[ "Calculate", "error", "signal" ]
python
valid
pytroll/satpy
satpy/composites/viirs.py
https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/composites/viirs.py#L594-L819
def local_histogram_equalization(data, mask_to_equalize, valid_data_mask=None, number_of_bins=1000, std_mult_cutoff=3.0, do_zerotoone_normalization=True, local_radius_px=300, clip_limit=60.0, # 20.0, slope_limit=3.0, # 0.5, do_log_scale=True, # can't take the log of zero, so the offset # may be needed; pass 0.0 if your data doesn't # need it log_offset=0.00001, out=None ): """Equalize the provided data (in the mask_to_equalize) using adaptive histogram equalization. tiles of width/height (2 * local_radius_px + 1) will be calculated and results for each pixel will be bilinerarly interpolated from the nearest 4 tiles when pixels fall near the edge of the image (there is no adjacent tile) the resultant interpolated sum from the available tiles will be multipled to account for the weight of any missing tiles:: pixel total interpolated value = pixel available interpolated value / (1 - missing interpolation weight) if ``do_zerotoone_normalization`` is True the data will be scaled so that all data in the mask_to_equalize falls between 0 and 1; otherwise the data in mask_to_equalize will all fall between 0 and number_of_bins Returns: The equalized data """ out = out if out is not None else np.zeros_like(data) # if we don't have a valid mask, use the mask of what we should be # equalizing if valid_data_mask is None: valid_data_mask = mask_to_equalize # calculate some useful numbers for our tile math total_rows = data.shape[0] total_cols = data.shape[1] tile_size = int((local_radius_px * 2.0) + 1.0) row_tiles = int(total_rows / tile_size) if ( (total_rows % tile_size) == 0) else int(total_rows / tile_size) + 1 col_tiles = int(total_cols / tile_size) if ( (total_cols % tile_size) == 0) else int(total_cols / tile_size) + 1 # an array of our distribution functions for equalization all_cumulative_dist_functions = [[]] # an array of our bin information for equalization all_bin_information = [[]] # loop through our tiles and create the histogram equalizations for each # one for num_row_tile in range(row_tiles): # make sure we have enough rows available to store info on this next # row of tiles if len(all_cumulative_dist_functions) <= num_row_tile: all_cumulative_dist_functions.append([]) if len(all_bin_information) <= num_row_tile: all_bin_information.append([]) # go through each tile in this row and calculate the equalization for num_col_tile in range(col_tiles): # calculate the range for this tile (min is inclusive, max is # exclusive) min_row = num_row_tile * tile_size max_row = min_row + tile_size min_col = num_col_tile * tile_size max_col = min_col + tile_size # for speed of calculation, pull out the mask of pixels that should # be used to calculate the histogram mask_valid_data_in_tile = valid_data_mask[min_row:max_row, min_col: max_col] # if we have any valid data in this tile, calculate a histogram equalization for this tile # (note: even if this tile does no fall in the mask_to_equalize, it's histogram may be used by other tiles) cumulative_dist_function, temp_bins = None, None if mask_valid_data_in_tile.any(): # use all valid data in the tile, so separate sections will # blend cleanly temp_valid_data = data[min_row:max_row, min_col:max_col][ mask_valid_data_in_tile] temp_valid_data = temp_valid_data[ temp_valid_data >= 0 ] # TEMP, testing to see if negative data is messing everything up # limit the contrast by only considering data within a certain # range of the average if std_mult_cutoff is not None: avg = np.mean(temp_valid_data) std = np.std(temp_valid_data) # limit our range to avg +/- std_mult_cutoff*std; e.g. the # default std_mult_cutoff is 4.0 so about 99.8% of the data concervative_mask = ( temp_valid_data < (avg + std * std_mult_cutoff)) & ( temp_valid_data > (avg - std * std_mult_cutoff)) temp_valid_data = temp_valid_data[concervative_mask] # if we are taking the log of our data, do so now if do_log_scale: temp_valid_data = np.log(temp_valid_data + log_offset) # do the histogram equalization and get the resulting # distribution function and bin information if temp_valid_data.size > 0: cumulative_dist_function, temp_bins = _histogram_equalization_helper( temp_valid_data, number_of_bins, clip_limit=clip_limit, slope_limit=slope_limit) # hang on to our equalization related information for use later all_cumulative_dist_functions[num_row_tile].append( cumulative_dist_function) all_bin_information[num_row_tile].append(temp_bins) # get the tile weight array so we can use it to interpolate our data tile_weights = _calculate_weights(tile_size) # now loop through our tiles and linearly interpolate the equalized # versions of the data for num_row_tile in range(row_tiles): for num_col_tile in range(col_tiles): # calculate the range for this tile (min is inclusive, max is # exclusive) min_row = num_row_tile * tile_size max_row = min_row + tile_size min_col = num_col_tile * tile_size max_col = min_col + tile_size # for convenience, pull some of these tile sized chunks out temp_all_data = data[min_row:max_row, min_col:max_col].copy() temp_mask_to_equalize = mask_to_equalize[min_row:max_row, min_col: max_col] temp_all_valid_data_mask = valid_data_mask[min_row:max_row, min_col:max_col] # if we have any data in this tile, calculate our weighted sum if temp_mask_to_equalize.any(): if do_log_scale: temp_all_data[temp_all_valid_data_mask] = np.log( temp_all_data[temp_all_valid_data_mask] + log_offset) temp_data_to_equalize = temp_all_data[temp_mask_to_equalize] temp_all_valid_data = temp_all_data[temp_all_valid_data_mask] # a place to hold our weighted sum that represents the interpolated contributions # of the histogram equalizations from the surrounding tiles temp_sum = np.zeros_like(temp_data_to_equalize) # how much weight were we unable to use because those tiles # fell off the edge of the image? unused_weight = np.zeros(temp_data_to_equalize.shape, dtype=tile_weights.dtype) # loop through all the surrounding tiles and process their # contributions to this tile for weight_row in range(3): for weight_col in range(3): # figure out which adjacent tile we're processing (in # overall tile coordinates instead of relative to our # current tile) calculated_row = num_row_tile - 1 + weight_row calculated_col = num_col_tile - 1 + weight_col tmp_tile_weights = tile_weights[ weight_row, weight_col][np.where( temp_mask_to_equalize)] # if we're inside the tile array and the tile we're # processing has a histogram equalization for us to # use, process it if ((calculated_row >= 0) and (calculated_row < row_tiles) and (calculated_col >= 0) and (calculated_col < col_tiles) and ( all_bin_information[calculated_row][ calculated_col] is not None) and (all_cumulative_dist_functions[calculated_row][ calculated_col] is not None)): # equalize our current tile using the histogram # equalization from the tile we're processing temp_equalized_data = np.interp( temp_all_valid_data, all_bin_information[ calculated_row][calculated_col][:-1], all_cumulative_dist_functions[calculated_row][ calculated_col]) temp_equalized_data = temp_equalized_data[np.where( temp_mask_to_equalize[ temp_all_valid_data_mask])] # add the contribution for the tile we're # processing to our weighted sum temp_sum += (temp_equalized_data * tmp_tile_weights) # if the tile we're processing doesn't exist, hang onto the weight we # would have used for it so we can correct that later else: unused_weight -= tmp_tile_weights # if we have unused weights, scale our values to correct for # that if unused_weight.any(): # TODO, if the mask masks everything out this will be a # zero! temp_sum /= unused_weight + 1 # now that we've calculated the weighted sum for this tile, set # it in our data array out[min_row:max_row, min_col:max_col][ temp_mask_to_equalize] = temp_sum # TEMP, test without using weights # data[min_row:max_row, min_col:max_col][temp_mask_to_equalize] = \ # np.interp(temp_data_to_equalize, all_bin_information[num_row_tile][num_col_tile][:-1], # all_cumulative_dist_functions[num_row_tile][num_col_tile]) # if we were asked to, normalize our data to be between zero and one, # rather than zero and number_of_bins if do_zerotoone_normalization: _linear_normalization_from_0to1(out, mask_to_equalize, number_of_bins) return out
[ "def", "local_histogram_equalization", "(", "data", ",", "mask_to_equalize", ",", "valid_data_mask", "=", "None", ",", "number_of_bins", "=", "1000", ",", "std_mult_cutoff", "=", "3.0", ",", "do_zerotoone_normalization", "=", "True", ",", "local_radius_px", "=", "300", ",", "clip_limit", "=", "60.0", ",", "# 20.0,", "slope_limit", "=", "3.0", ",", "# 0.5,", "do_log_scale", "=", "True", ",", "# can't take the log of zero, so the offset", "# may be needed; pass 0.0 if your data doesn't", "# need it", "log_offset", "=", "0.00001", ",", "out", "=", "None", ")", ":", "out", "=", "out", "if", "out", "is", "not", "None", "else", "np", ".", "zeros_like", "(", "data", ")", "# if we don't have a valid mask, use the mask of what we should be", "# equalizing", "if", "valid_data_mask", "is", "None", ":", "valid_data_mask", "=", "mask_to_equalize", "# calculate some useful numbers for our tile math", "total_rows", "=", "data", ".", "shape", "[", "0", "]", "total_cols", "=", "data", ".", "shape", "[", "1", "]", "tile_size", "=", "int", "(", "(", "local_radius_px", "*", "2.0", ")", "+", "1.0", ")", "row_tiles", "=", "int", "(", "total_rows", "/", "tile_size", ")", "if", "(", "(", "total_rows", "%", "tile_size", ")", "==", "0", ")", "else", "int", "(", "total_rows", "/", "tile_size", ")", "+", "1", "col_tiles", "=", "int", "(", "total_cols", "/", "tile_size", ")", "if", "(", "(", "total_cols", "%", "tile_size", ")", "==", "0", ")", "else", "int", "(", "total_cols", "/", "tile_size", ")", "+", "1", "# an array of our distribution functions for equalization", "all_cumulative_dist_functions", "=", "[", "[", "]", "]", "# an array of our bin information for equalization", "all_bin_information", "=", "[", "[", "]", "]", "# loop through our tiles and create the histogram equalizations for each", "# one", "for", "num_row_tile", "in", "range", "(", "row_tiles", ")", ":", "# make sure we have enough rows available to store info on this next", "# row of tiles", "if", "len", "(", "all_cumulative_dist_functions", ")", "<=", "num_row_tile", ":", "all_cumulative_dist_functions", ".", "append", "(", "[", "]", ")", "if", "len", "(", "all_bin_information", ")", "<=", "num_row_tile", ":", "all_bin_information", ".", "append", "(", "[", "]", ")", "# go through each tile in this row and calculate the equalization", "for", "num_col_tile", "in", "range", "(", "col_tiles", ")", ":", "# calculate the range for this tile (min is inclusive, max is", "# exclusive)", "min_row", "=", "num_row_tile", "*", "tile_size", "max_row", "=", "min_row", "+", "tile_size", "min_col", "=", "num_col_tile", "*", "tile_size", "max_col", "=", "min_col", "+", "tile_size", "# for speed of calculation, pull out the mask of pixels that should", "# be used to calculate the histogram", "mask_valid_data_in_tile", "=", "valid_data_mask", "[", "min_row", ":", "max_row", ",", "min_col", ":", "max_col", "]", "# if we have any valid data in this tile, calculate a histogram equalization for this tile", "# (note: even if this tile does no fall in the mask_to_equalize, it's histogram may be used by other tiles)", "cumulative_dist_function", ",", "temp_bins", "=", "None", ",", "None", "if", "mask_valid_data_in_tile", ".", "any", "(", ")", ":", "# use all valid data in the tile, so separate sections will", "# blend cleanly", "temp_valid_data", "=", "data", "[", "min_row", ":", "max_row", ",", "min_col", ":", "max_col", "]", "[", "mask_valid_data_in_tile", "]", "temp_valid_data", "=", "temp_valid_data", "[", "temp_valid_data", ">=", "0", "]", "# TEMP, testing to see if negative data is messing everything up", "# limit the contrast by only considering data within a certain", "# range of the average", "if", "std_mult_cutoff", "is", "not", "None", ":", "avg", "=", "np", ".", "mean", "(", "temp_valid_data", ")", "std", "=", "np", ".", "std", "(", "temp_valid_data", ")", "# limit our range to avg +/- std_mult_cutoff*std; e.g. the", "# default std_mult_cutoff is 4.0 so about 99.8% of the data", "concervative_mask", "=", "(", "temp_valid_data", "<", "(", "avg", "+", "std", "*", "std_mult_cutoff", ")", ")", "&", "(", "temp_valid_data", ">", "(", "avg", "-", "std", "*", "std_mult_cutoff", ")", ")", "temp_valid_data", "=", "temp_valid_data", "[", "concervative_mask", "]", "# if we are taking the log of our data, do so now", "if", "do_log_scale", ":", "temp_valid_data", "=", "np", ".", "log", "(", "temp_valid_data", "+", "log_offset", ")", "# do the histogram equalization and get the resulting", "# distribution function and bin information", "if", "temp_valid_data", ".", "size", ">", "0", ":", "cumulative_dist_function", ",", "temp_bins", "=", "_histogram_equalization_helper", "(", "temp_valid_data", ",", "number_of_bins", ",", "clip_limit", "=", "clip_limit", ",", "slope_limit", "=", "slope_limit", ")", "# hang on to our equalization related information for use later", "all_cumulative_dist_functions", "[", "num_row_tile", "]", ".", "append", "(", "cumulative_dist_function", ")", "all_bin_information", "[", "num_row_tile", "]", ".", "append", "(", "temp_bins", ")", "# get the tile weight array so we can use it to interpolate our data", "tile_weights", "=", "_calculate_weights", "(", "tile_size", ")", "# now loop through our tiles and linearly interpolate the equalized", "# versions of the data", "for", "num_row_tile", "in", "range", "(", "row_tiles", ")", ":", "for", "num_col_tile", "in", "range", "(", "col_tiles", ")", ":", "# calculate the range for this tile (min is inclusive, max is", "# exclusive)", "min_row", "=", "num_row_tile", "*", "tile_size", "max_row", "=", "min_row", "+", "tile_size", "min_col", "=", "num_col_tile", "*", "tile_size", "max_col", "=", "min_col", "+", "tile_size", "# for convenience, pull some of these tile sized chunks out", "temp_all_data", "=", "data", "[", "min_row", ":", "max_row", ",", "min_col", ":", "max_col", "]", ".", "copy", "(", ")", "temp_mask_to_equalize", "=", "mask_to_equalize", "[", "min_row", ":", "max_row", ",", "min_col", ":", "max_col", "]", "temp_all_valid_data_mask", "=", "valid_data_mask", "[", "min_row", ":", "max_row", ",", "min_col", ":", "max_col", "]", "# if we have any data in this tile, calculate our weighted sum", "if", "temp_mask_to_equalize", ".", "any", "(", ")", ":", "if", "do_log_scale", ":", "temp_all_data", "[", "temp_all_valid_data_mask", "]", "=", "np", ".", "log", "(", "temp_all_data", "[", "temp_all_valid_data_mask", "]", "+", "log_offset", ")", "temp_data_to_equalize", "=", "temp_all_data", "[", "temp_mask_to_equalize", "]", "temp_all_valid_data", "=", "temp_all_data", "[", "temp_all_valid_data_mask", "]", "# a place to hold our weighted sum that represents the interpolated contributions", "# of the histogram equalizations from the surrounding tiles", "temp_sum", "=", "np", ".", "zeros_like", "(", "temp_data_to_equalize", ")", "# how much weight were we unable to use because those tiles", "# fell off the edge of the image?", "unused_weight", "=", "np", ".", "zeros", "(", "temp_data_to_equalize", ".", "shape", ",", "dtype", "=", "tile_weights", ".", "dtype", ")", "# loop through all the surrounding tiles and process their", "# contributions to this tile", "for", "weight_row", "in", "range", "(", "3", ")", ":", "for", "weight_col", "in", "range", "(", "3", ")", ":", "# figure out which adjacent tile we're processing (in", "# overall tile coordinates instead of relative to our", "# current tile)", "calculated_row", "=", "num_row_tile", "-", "1", "+", "weight_row", "calculated_col", "=", "num_col_tile", "-", "1", "+", "weight_col", "tmp_tile_weights", "=", "tile_weights", "[", "weight_row", ",", "weight_col", "]", "[", "np", ".", "where", "(", "temp_mask_to_equalize", ")", "]", "# if we're inside the tile array and the tile we're", "# processing has a histogram equalization for us to", "# use, process it", "if", "(", "(", "calculated_row", ">=", "0", ")", "and", "(", "calculated_row", "<", "row_tiles", ")", "and", "(", "calculated_col", ">=", "0", ")", "and", "(", "calculated_col", "<", "col_tiles", ")", "and", "(", "all_bin_information", "[", "calculated_row", "]", "[", "calculated_col", "]", "is", "not", "None", ")", "and", "(", "all_cumulative_dist_functions", "[", "calculated_row", "]", "[", "calculated_col", "]", "is", "not", "None", ")", ")", ":", "# equalize our current tile using the histogram", "# equalization from the tile we're processing", "temp_equalized_data", "=", "np", ".", "interp", "(", "temp_all_valid_data", ",", "all_bin_information", "[", "calculated_row", "]", "[", "calculated_col", "]", "[", ":", "-", "1", "]", ",", "all_cumulative_dist_functions", "[", "calculated_row", "]", "[", "calculated_col", "]", ")", "temp_equalized_data", "=", "temp_equalized_data", "[", "np", ".", "where", "(", "temp_mask_to_equalize", "[", "temp_all_valid_data_mask", "]", ")", "]", "# add the contribution for the tile we're", "# processing to our weighted sum", "temp_sum", "+=", "(", "temp_equalized_data", "*", "tmp_tile_weights", ")", "# if the tile we're processing doesn't exist, hang onto the weight we", "# would have used for it so we can correct that later", "else", ":", "unused_weight", "-=", "tmp_tile_weights", "# if we have unused weights, scale our values to correct for", "# that", "if", "unused_weight", ".", "any", "(", ")", ":", "# TODO, if the mask masks everything out this will be a", "# zero!", "temp_sum", "/=", "unused_weight", "+", "1", "# now that we've calculated the weighted sum for this tile, set", "# it in our data array", "out", "[", "min_row", ":", "max_row", ",", "min_col", ":", "max_col", "]", "[", "temp_mask_to_equalize", "]", "=", "temp_sum", "# TEMP, test without using weights", "# data[min_row:max_row, min_col:max_col][temp_mask_to_equalize] = \\", "# np.interp(temp_data_to_equalize, all_bin_information[num_row_tile][num_col_tile][:-1],", "# all_cumulative_dist_functions[num_row_tile][num_col_tile])", "# if we were asked to, normalize our data to be between zero and one,", "# rather than zero and number_of_bins", "if", "do_zerotoone_normalization", ":", "_linear_normalization_from_0to1", "(", "out", ",", "mask_to_equalize", ",", "number_of_bins", ")", "return", "out" ]
Equalize the provided data (in the mask_to_equalize) using adaptive histogram equalization. tiles of width/height (2 * local_radius_px + 1) will be calculated and results for each pixel will be bilinerarly interpolated from the nearest 4 tiles when pixels fall near the edge of the image (there is no adjacent tile) the resultant interpolated sum from the available tiles will be multipled to account for the weight of any missing tiles:: pixel total interpolated value = pixel available interpolated value / (1 - missing interpolation weight) if ``do_zerotoone_normalization`` is True the data will be scaled so that all data in the mask_to_equalize falls between 0 and 1; otherwise the data in mask_to_equalize will all fall between 0 and number_of_bins Returns: The equalized data
[ "Equalize", "the", "provided", "data", "(", "in", "the", "mask_to_equalize", ")", "using", "adaptive", "histogram", "equalization", "." ]
python
train
HttpRunner/HttpRunner
httprunner/cli.py
https://github.com/HttpRunner/HttpRunner/blob/f259551bf9c8ba905eae5c1afcf2efea20ae0871/httprunner/cli.py#L91-L180
def main_locust(): """ Performance test with locust: parse command line options and run commands. """ # monkey patch ssl at beginning to avoid RecursionError when running locust. from gevent import monkey; monkey.patch_ssl() import multiprocessing import sys from httprunner import logger try: from httprunner import locusts except ImportError: msg = "Locust is not installed, install first and try again.\n" msg += "install command: pip install locustio" print(msg) exit(1) sys.argv[0] = 'locust' if len(sys.argv) == 1: sys.argv.extend(["-h"]) if sys.argv[1] in ["-h", "--help", "-V", "--version"]: locusts.start_locust_main() sys.exit(0) # set logging level if "-L" in sys.argv: loglevel_index = sys.argv.index('-L') + 1 elif "--loglevel" in sys.argv: loglevel_index = sys.argv.index('--loglevel') + 1 else: loglevel_index = None if loglevel_index and loglevel_index < len(sys.argv): loglevel = sys.argv[loglevel_index] else: # default loglevel = "WARNING" logger.setup_logger(loglevel) # get testcase file path try: if "-f" in sys.argv: testcase_index = sys.argv.index('-f') + 1 elif "--locustfile" in sys.argv: testcase_index = sys.argv.index('--locustfile') + 1 else: testcase_index = None assert testcase_index and testcase_index < len(sys.argv) except AssertionError: print("Testcase file is not specified, exit.") sys.exit(1) testcase_file_path = sys.argv[testcase_index] sys.argv[testcase_index] = locusts.parse_locustfile(testcase_file_path) if "--processes" in sys.argv: """ locusts -f locustfile.py --processes 4 """ if "--no-web" in sys.argv: logger.log_error("conflict parameter args: --processes & --no-web. \nexit.") sys.exit(1) processes_index = sys.argv.index('--processes') processes_count_index = processes_index + 1 if processes_count_index >= len(sys.argv): """ do not specify processes count explicitly locusts -f locustfile.py --processes """ processes_count = multiprocessing.cpu_count() logger.log_warning("processes count not specified, use {} by default.".format(processes_count)) else: try: """ locusts -f locustfile.py --processes 4 """ processes_count = int(sys.argv[processes_count_index]) sys.argv.pop(processes_count_index) except ValueError: """ locusts -f locustfile.py --processes -P 8888 """ processes_count = multiprocessing.cpu_count() logger.log_warning("processes count not specified, use {} by default.".format(processes_count)) sys.argv.pop(processes_index) locusts.run_locusts_with_processes(sys.argv, processes_count) else: locusts.start_locust_main()
[ "def", "main_locust", "(", ")", ":", "# monkey patch ssl at beginning to avoid RecursionError when running locust.", "from", "gevent", "import", "monkey", "monkey", ".", "patch_ssl", "(", ")", "import", "multiprocessing", "import", "sys", "from", "httprunner", "import", "logger", "try", ":", "from", "httprunner", "import", "locusts", "except", "ImportError", ":", "msg", "=", "\"Locust is not installed, install first and try again.\\n\"", "msg", "+=", "\"install command: pip install locustio\"", "print", "(", "msg", ")", "exit", "(", "1", ")", "sys", ".", "argv", "[", "0", "]", "=", "'locust'", "if", "len", "(", "sys", ".", "argv", ")", "==", "1", ":", "sys", ".", "argv", ".", "extend", "(", "[", "\"-h\"", "]", ")", "if", "sys", ".", "argv", "[", "1", "]", "in", "[", "\"-h\"", ",", "\"--help\"", ",", "\"-V\"", ",", "\"--version\"", "]", ":", "locusts", ".", "start_locust_main", "(", ")", "sys", ".", "exit", "(", "0", ")", "# set logging level", "if", "\"-L\"", "in", "sys", ".", "argv", ":", "loglevel_index", "=", "sys", ".", "argv", ".", "index", "(", "'-L'", ")", "+", "1", "elif", "\"--loglevel\"", "in", "sys", ".", "argv", ":", "loglevel_index", "=", "sys", ".", "argv", ".", "index", "(", "'--loglevel'", ")", "+", "1", "else", ":", "loglevel_index", "=", "None", "if", "loglevel_index", "and", "loglevel_index", "<", "len", "(", "sys", ".", "argv", ")", ":", "loglevel", "=", "sys", ".", "argv", "[", "loglevel_index", "]", "else", ":", "# default", "loglevel", "=", "\"WARNING\"", "logger", ".", "setup_logger", "(", "loglevel", ")", "# get testcase file path", "try", ":", "if", "\"-f\"", "in", "sys", ".", "argv", ":", "testcase_index", "=", "sys", ".", "argv", ".", "index", "(", "'-f'", ")", "+", "1", "elif", "\"--locustfile\"", "in", "sys", ".", "argv", ":", "testcase_index", "=", "sys", ".", "argv", ".", "index", "(", "'--locustfile'", ")", "+", "1", "else", ":", "testcase_index", "=", "None", "assert", "testcase_index", "and", "testcase_index", "<", "len", "(", "sys", ".", "argv", ")", "except", "AssertionError", ":", "print", "(", "\"Testcase file is not specified, exit.\"", ")", "sys", ".", "exit", "(", "1", ")", "testcase_file_path", "=", "sys", ".", "argv", "[", "testcase_index", "]", "sys", ".", "argv", "[", "testcase_index", "]", "=", "locusts", ".", "parse_locustfile", "(", "testcase_file_path", ")", "if", "\"--processes\"", "in", "sys", ".", "argv", ":", "\"\"\" locusts -f locustfile.py --processes 4\n \"\"\"", "if", "\"--no-web\"", "in", "sys", ".", "argv", ":", "logger", ".", "log_error", "(", "\"conflict parameter args: --processes & --no-web. \\nexit.\"", ")", "sys", ".", "exit", "(", "1", ")", "processes_index", "=", "sys", ".", "argv", ".", "index", "(", "'--processes'", ")", "processes_count_index", "=", "processes_index", "+", "1", "if", "processes_count_index", ">=", "len", "(", "sys", ".", "argv", ")", ":", "\"\"\" do not specify processes count explicitly\n locusts -f locustfile.py --processes\n \"\"\"", "processes_count", "=", "multiprocessing", ".", "cpu_count", "(", ")", "logger", ".", "log_warning", "(", "\"processes count not specified, use {} by default.\"", ".", "format", "(", "processes_count", ")", ")", "else", ":", "try", ":", "\"\"\" locusts -f locustfile.py --processes 4 \"\"\"", "processes_count", "=", "int", "(", "sys", ".", "argv", "[", "processes_count_index", "]", ")", "sys", ".", "argv", ".", "pop", "(", "processes_count_index", ")", "except", "ValueError", ":", "\"\"\" locusts -f locustfile.py --processes -P 8888 \"\"\"", "processes_count", "=", "multiprocessing", ".", "cpu_count", "(", ")", "logger", ".", "log_warning", "(", "\"processes count not specified, use {} by default.\"", ".", "format", "(", "processes_count", ")", ")", "sys", ".", "argv", ".", "pop", "(", "processes_index", ")", "locusts", ".", "run_locusts_with_processes", "(", "sys", ".", "argv", ",", "processes_count", ")", "else", ":", "locusts", ".", "start_locust_main", "(", ")" ]
Performance test with locust: parse command line options and run commands.
[ "Performance", "test", "with", "locust", ":", "parse", "command", "line", "options", "and", "run", "commands", "." ]
python
train
Aloomaio/python-sdk
alooma_pysdk/alooma_pysdk.py
https://github.com/Aloomaio/python-sdk/blob/e6e7322d0b23d90b1ff0320e9a9c431c82c0c277/alooma_pysdk/alooma_pysdk.py#L274-L287
def _notify(self, log_level, message): """ Calls the callback function and logs messages using the PySDK logger :param log_level: An integer representing the log level, as specified in the Python `logging` library :param message: The actual message to be sent to the logger and the `callback` function """ timestamp = datetime.datetime.utcnow() logger.log(log_level, str(message)) try: self._callback(log_level, message, timestamp) except Exception as ex: logger.warning(consts.LOG_MSG_CALLBACK_FAILURE % str(ex))
[ "def", "_notify", "(", "self", ",", "log_level", ",", "message", ")", ":", "timestamp", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "logger", ".", "log", "(", "log_level", ",", "str", "(", "message", ")", ")", "try", ":", "self", ".", "_callback", "(", "log_level", ",", "message", ",", "timestamp", ")", "except", "Exception", "as", "ex", ":", "logger", ".", "warning", "(", "consts", ".", "LOG_MSG_CALLBACK_FAILURE", "%", "str", "(", "ex", ")", ")" ]
Calls the callback function and logs messages using the PySDK logger :param log_level: An integer representing the log level, as specified in the Python `logging` library :param message: The actual message to be sent to the logger and the `callback` function
[ "Calls", "the", "callback", "function", "and", "logs", "messages", "using", "the", "PySDK", "logger", ":", "param", "log_level", ":", "An", "integer", "representing", "the", "log", "level", "as", "specified", "in", "the", "Python", "logging", "library", ":", "param", "message", ":", "The", "actual", "message", "to", "be", "sent", "to", "the", "logger", "and", "the", "callback", "function" ]
python
train
open-mmlab/mmcv
mmcv/runner/priority.py
https://github.com/open-mmlab/mmcv/blob/0d77f61450aab4dde8b8585a577cc496acb95d7f/mmcv/runner/priority.py#L35-L53
def get_priority(priority): """Get priority value. Args: priority (int or str or :obj:`Priority`): Priority. Returns: int: The priority value. """ if isinstance(priority, int): if priority < 0 or priority > 100: raise ValueError('priority must be between 0 and 100') return priority elif isinstance(priority, Priority): return priority.value elif isinstance(priority, str): return Priority[priority.upper()].value else: raise TypeError('priority must be an integer or Priority enum value')
[ "def", "get_priority", "(", "priority", ")", ":", "if", "isinstance", "(", "priority", ",", "int", ")", ":", "if", "priority", "<", "0", "or", "priority", ">", "100", ":", "raise", "ValueError", "(", "'priority must be between 0 and 100'", ")", "return", "priority", "elif", "isinstance", "(", "priority", ",", "Priority", ")", ":", "return", "priority", ".", "value", "elif", "isinstance", "(", "priority", ",", "str", ")", ":", "return", "Priority", "[", "priority", ".", "upper", "(", ")", "]", ".", "value", "else", ":", "raise", "TypeError", "(", "'priority must be an integer or Priority enum value'", ")" ]
Get priority value. Args: priority (int or str or :obj:`Priority`): Priority. Returns: int: The priority value.
[ "Get", "priority", "value", "." ]
python
test
centralniak/py-raildriver
raildriver/library.py
https://github.com/centralniak/py-raildriver/blob/c7f5f551e0436451b9507fc63a62e49a229282b9/raildriver/library.py#L134-L141
def get_current_time(self): """ Get current time :return: datetime.time """ hms = [int(self.get_current_controller_value(i)) for i in range(406, 409)] return datetime.time(*hms)
[ "def", "get_current_time", "(", "self", ")", ":", "hms", "=", "[", "int", "(", "self", ".", "get_current_controller_value", "(", "i", ")", ")", "for", "i", "in", "range", "(", "406", ",", "409", ")", "]", "return", "datetime", ".", "time", "(", "*", "hms", ")" ]
Get current time :return: datetime.time
[ "Get", "current", "time" ]
python
train
10gen/mongo-orchestration
mongo_orchestration/replica_sets.py
https://github.com/10gen/mongo-orchestration/blob/81fd2224205922ea2178b08190b53a33aec47261/mongo_orchestration/replica_sets.py#L522-L541
def wait_while_reachable(self, servers, timeout=60): """wait while all servers be reachable Args: servers - list of servers """ t_start = time.time() while True: try: for server in servers: # TODO: use state code to check if server is reachable server_info = self.connection( hostname=server, timeout=5).admin.command('ismaster') logger.debug("server_info: {server_info}".format(server_info=server_info)) if int(server_info['ok']) != 1: raise pymongo.errors.OperationFailure("{server} is not reachable".format(**locals)) return True except (KeyError, AttributeError, pymongo.errors.AutoReconnect, pymongo.errors.OperationFailure): if time.time() - t_start > timeout: return False time.sleep(0.1)
[ "def", "wait_while_reachable", "(", "self", ",", "servers", ",", "timeout", "=", "60", ")", ":", "t_start", "=", "time", ".", "time", "(", ")", "while", "True", ":", "try", ":", "for", "server", "in", "servers", ":", "# TODO: use state code to check if server is reachable", "server_info", "=", "self", ".", "connection", "(", "hostname", "=", "server", ",", "timeout", "=", "5", ")", ".", "admin", ".", "command", "(", "'ismaster'", ")", "logger", ".", "debug", "(", "\"server_info: {server_info}\"", ".", "format", "(", "server_info", "=", "server_info", ")", ")", "if", "int", "(", "server_info", "[", "'ok'", "]", ")", "!=", "1", ":", "raise", "pymongo", ".", "errors", ".", "OperationFailure", "(", "\"{server} is not reachable\"", ".", "format", "(", "*", "*", "locals", ")", ")", "return", "True", "except", "(", "KeyError", ",", "AttributeError", ",", "pymongo", ".", "errors", ".", "AutoReconnect", ",", "pymongo", ".", "errors", ".", "OperationFailure", ")", ":", "if", "time", ".", "time", "(", ")", "-", "t_start", ">", "timeout", ":", "return", "False", "time", ".", "sleep", "(", "0.1", ")" ]
wait while all servers be reachable Args: servers - list of servers
[ "wait", "while", "all", "servers", "be", "reachable", "Args", ":", "servers", "-", "list", "of", "servers" ]
python
train
blockstack/blockstack-core
blockstack/blockstackd.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L1124-L1172
def rpc_getinfo(self, **con_info): """ Get information from the running server: * last_block_seen: the last block height seen * consensus: the consensus hash for that block * server_version: the server version * last_block_processed: the last block processed * server_alive: True * [optional] zonefile_count: the number of zonefiles known """ conf = get_blockstack_opts() info = self.get_bitcoind_info() cinfo = self.get_consensus_info() reply = {} reply['last_block_seen'] = info['blocks'] reply['consensus'] = cinfo['consensus_hash'] reply['server_version'] = "%s" % VERSION reply['last_block_processed'] = cinfo['block_height'] reply['server_alive'] = True reply['indexing'] = config.is_indexing(self.working_dir) # this is a bit janky, but the logic is as follows: # * BLOCKSTACK_TESTNET_ACTIVE means that we've explicitly set an alternative magic bytes, so we should report this. # * BLOCKSTACK_PUBLIC_TESTNET means that we're on the default hosted testnet (e.g. testnet.blockstack.org) # * BLOCKSTACK_TEST or BLOCKSTACK_TESTNET usually means we're running inside an integration test if BLOCKSTACK_TESTNET_ACTIVE: reply['testnet'] = MAGIC_BYTES elif BLOCKSTACK_PUBLIC_TESTNET: reply['testnet'] = 'hosted' elif BLOCKSTACK_TEST or BLOCKSTACK_TESTNET: reply['testnet'] = True else: reply['testnet'] = False reply['first_block'] = FIRST_BLOCK_MAINNET if conf.get('atlas', False): # return zonefile inv length reply['zonefile_count'] = atlas_get_num_zonefiles() if self.is_stale(): reply['stale'] = True reply['warning'] = 'Daemon is behind the chain tip. Do not rely on it for fresh information.' return reply
[ "def", "rpc_getinfo", "(", "self", ",", "*", "*", "con_info", ")", ":", "conf", "=", "get_blockstack_opts", "(", ")", "info", "=", "self", ".", "get_bitcoind_info", "(", ")", "cinfo", "=", "self", ".", "get_consensus_info", "(", ")", "reply", "=", "{", "}", "reply", "[", "'last_block_seen'", "]", "=", "info", "[", "'blocks'", "]", "reply", "[", "'consensus'", "]", "=", "cinfo", "[", "'consensus_hash'", "]", "reply", "[", "'server_version'", "]", "=", "\"%s\"", "%", "VERSION", "reply", "[", "'last_block_processed'", "]", "=", "cinfo", "[", "'block_height'", "]", "reply", "[", "'server_alive'", "]", "=", "True", "reply", "[", "'indexing'", "]", "=", "config", ".", "is_indexing", "(", "self", ".", "working_dir", ")", "# this is a bit janky, but the logic is as follows:", "# * BLOCKSTACK_TESTNET_ACTIVE means that we've explicitly set an alternative magic bytes, so we should report this.", "# * BLOCKSTACK_PUBLIC_TESTNET means that we're on the default hosted testnet (e.g. testnet.blockstack.org)", "# * BLOCKSTACK_TEST or BLOCKSTACK_TESTNET usually means we're running inside an integration test", "if", "BLOCKSTACK_TESTNET_ACTIVE", ":", "reply", "[", "'testnet'", "]", "=", "MAGIC_BYTES", "elif", "BLOCKSTACK_PUBLIC_TESTNET", ":", "reply", "[", "'testnet'", "]", "=", "'hosted'", "elif", "BLOCKSTACK_TEST", "or", "BLOCKSTACK_TESTNET", ":", "reply", "[", "'testnet'", "]", "=", "True", "else", ":", "reply", "[", "'testnet'", "]", "=", "False", "reply", "[", "'first_block'", "]", "=", "FIRST_BLOCK_MAINNET", "if", "conf", ".", "get", "(", "'atlas'", ",", "False", ")", ":", "# return zonefile inv length", "reply", "[", "'zonefile_count'", "]", "=", "atlas_get_num_zonefiles", "(", ")", "if", "self", ".", "is_stale", "(", ")", ":", "reply", "[", "'stale'", "]", "=", "True", "reply", "[", "'warning'", "]", "=", "'Daemon is behind the chain tip. Do not rely on it for fresh information.'", "return", "reply" ]
Get information from the running server: * last_block_seen: the last block height seen * consensus: the consensus hash for that block * server_version: the server version * last_block_processed: the last block processed * server_alive: True * [optional] zonefile_count: the number of zonefiles known
[ "Get", "information", "from", "the", "running", "server", ":", "*", "last_block_seen", ":", "the", "last", "block", "height", "seen", "*", "consensus", ":", "the", "consensus", "hash", "for", "that", "block", "*", "server_version", ":", "the", "server", "version", "*", "last_block_processed", ":", "the", "last", "block", "processed", "*", "server_alive", ":", "True", "*", "[", "optional", "]", "zonefile_count", ":", "the", "number", "of", "zonefiles", "known" ]
python
train
karan/TPB
tpb/utils.py
https://github.com/karan/TPB/blob/f424a73a10d4bcf4e363d7e7e8cb915a3a057671/tpb/utils.py#L56-L63
def _segment(cls, segment): """ Returns a property capable of setting and getting a segment. """ return property( fget=lambda x: cls._get_segment(x, segment), fset=lambda x, v: cls._set_segment(x, segment, v), )
[ "def", "_segment", "(", "cls", ",", "segment", ")", ":", "return", "property", "(", "fget", "=", "lambda", "x", ":", "cls", ".", "_get_segment", "(", "x", ",", "segment", ")", ",", "fset", "=", "lambda", "x", ",", "v", ":", "cls", ".", "_set_segment", "(", "x", ",", "segment", ",", "v", ")", ",", ")" ]
Returns a property capable of setting and getting a segment.
[ "Returns", "a", "property", "capable", "of", "setting", "and", "getting", "a", "segment", "." ]
python
train
ZELLMECHANIK-DRESDEN/dclab
dclab/external/statsmodels/nonparametric/_kernel_base.py
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/external/statsmodels/nonparametric/_kernel_base.py#L58-L66
def _set_defaults(self, defaults): """Sets the default values for the efficient estimation""" self.n_res = defaults.n_res self.n_sub = defaults.n_sub self.randomize = defaults.randomize self.return_median = defaults.return_median self.efficient = defaults.efficient self.return_only_bw = defaults.return_only_bw self.n_jobs = defaults.n_jobs
[ "def", "_set_defaults", "(", "self", ",", "defaults", ")", ":", "self", ".", "n_res", "=", "defaults", ".", "n_res", "self", ".", "n_sub", "=", "defaults", ".", "n_sub", "self", ".", "randomize", "=", "defaults", ".", "randomize", "self", ".", "return_median", "=", "defaults", ".", "return_median", "self", ".", "efficient", "=", "defaults", ".", "efficient", "self", ".", "return_only_bw", "=", "defaults", ".", "return_only_bw", "self", ".", "n_jobs", "=", "defaults", ".", "n_jobs" ]
Sets the default values for the efficient estimation
[ "Sets", "the", "default", "values", "for", "the", "efficient", "estimation" ]
python
train
Miserlou/Zappa
zappa/cli.py
https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/cli.py#L2744-L2757
def shamelessly_promote(): """ Shamelessly promote our little community. """ click.echo("Need " + click.style("help", fg='green', bold=True) + "? Found a " + click.style("bug", fg='green', bold=True) + "? Let us " + click.style("know", fg='green', bold=True) + "! :D") click.echo("File bug reports on " + click.style("GitHub", bold=True) + " here: " + click.style("https://github.com/Miserlou/Zappa", fg='cyan', bold=True)) click.echo("And join our " + click.style("Slack", bold=True) + " channel here: " + click.style("https://slack.zappa.io", fg='cyan', bold=True)) click.echo("Love!,") click.echo(" ~ Team " + click.style("Zappa", bold=True) + "!")
[ "def", "shamelessly_promote", "(", ")", ":", "click", ".", "echo", "(", "\"Need \"", "+", "click", ".", "style", "(", "\"help\"", ",", "fg", "=", "'green'", ",", "bold", "=", "True", ")", "+", "\"? Found a \"", "+", "click", ".", "style", "(", "\"bug\"", ",", "fg", "=", "'green'", ",", "bold", "=", "True", ")", "+", "\"? Let us \"", "+", "click", ".", "style", "(", "\"know\"", ",", "fg", "=", "'green'", ",", "bold", "=", "True", ")", "+", "\"! :D\"", ")", "click", ".", "echo", "(", "\"File bug reports on \"", "+", "click", ".", "style", "(", "\"GitHub\"", ",", "bold", "=", "True", ")", "+", "\" here: \"", "+", "click", ".", "style", "(", "\"https://github.com/Miserlou/Zappa\"", ",", "fg", "=", "'cyan'", ",", "bold", "=", "True", ")", ")", "click", ".", "echo", "(", "\"And join our \"", "+", "click", ".", "style", "(", "\"Slack\"", ",", "bold", "=", "True", ")", "+", "\" channel here: \"", "+", "click", ".", "style", "(", "\"https://slack.zappa.io\"", ",", "fg", "=", "'cyan'", ",", "bold", "=", "True", ")", ")", "click", ".", "echo", "(", "\"Love!,\"", ")", "click", ".", "echo", "(", "\" ~ Team \"", "+", "click", ".", "style", "(", "\"Zappa\"", ",", "bold", "=", "True", ")", "+", "\"!\"", ")" ]
Shamelessly promote our little community.
[ "Shamelessly", "promote", "our", "little", "community", "." ]
python
train
ConsenSys/mythril-classic
mythril/ethereum/interface/leveldb/accountindexing.py
https://github.com/ConsenSys/mythril-classic/blob/27af71c34b2ce94f4fae5613ec457f93df1a8f56/mythril/ethereum/interface/leveldb/accountindexing.py#L111-L177
def updateIfNeeded(self): """update address index.""" headBlock = self.db.reader._get_head_block() if headBlock is not None: # avoid restarting search if head block is same & we already initialized # this is required for fastSync handling if self.lastBlock is not None: self.lastBlock = max(self.lastBlock, headBlock.number) else: self.lastBlock = headBlock.number lastProcessed = self.db.reader._get_last_indexed_number() if lastProcessed is not None: self.lastProcessedBlock = utils.big_endian_to_int(lastProcessed) # in fast sync head block is at 0 (e.g. in fastSync), we can't use it to determine length if self.lastBlock is not None and self.lastBlock == 0: self.lastBlock = 2e9 if self.lastBlock is None or ( self.lastProcessedBlock is not None and self.lastBlock <= self.lastProcessedBlock ): return blockNum = 0 if self.lastProcessedBlock is not None: blockNum = self.lastProcessedBlock + 1 print( "Updating hash-to-address index from block " + str(self.lastProcessedBlock) ) else: print("Starting hash-to-address index") count = 0 processed = 0 while blockNum <= self.lastBlock: # leveldb cannot be accessed on multiple processes (not even readonly) # multithread version performs significantly worse than serial try: results = self._process(blockNum) except: break # store new mappings self.db.writer._start_writing() count += len(results) for addr in results: self.db.writer._store_account_address(addr) self.db.writer._commit_batch() processed += BATCH_SIZE blockNum = min(blockNum + BATCH_SIZE, self.lastBlock + 1) cost_time = time.time() - ethereum.start_time print( "%d blocks processed (in %d seconds), %d unique addresses found, next block: %d" % (processed, cost_time, count, min(self.lastBlock, blockNum)) ) self.lastProcessedBlock = blockNum - 1 self.db.writer._set_last_indexed_number(self.lastProcessedBlock) print("Finished indexing") self.lastBlock = self.lastProcessedBlock
[ "def", "updateIfNeeded", "(", "self", ")", ":", "headBlock", "=", "self", ".", "db", ".", "reader", ".", "_get_head_block", "(", ")", "if", "headBlock", "is", "not", "None", ":", "# avoid restarting search if head block is same & we already initialized", "# this is required for fastSync handling", "if", "self", ".", "lastBlock", "is", "not", "None", ":", "self", ".", "lastBlock", "=", "max", "(", "self", ".", "lastBlock", ",", "headBlock", ".", "number", ")", "else", ":", "self", ".", "lastBlock", "=", "headBlock", ".", "number", "lastProcessed", "=", "self", ".", "db", ".", "reader", ".", "_get_last_indexed_number", "(", ")", "if", "lastProcessed", "is", "not", "None", ":", "self", ".", "lastProcessedBlock", "=", "utils", ".", "big_endian_to_int", "(", "lastProcessed", ")", "# in fast sync head block is at 0 (e.g. in fastSync), we can't use it to determine length", "if", "self", ".", "lastBlock", "is", "not", "None", "and", "self", ".", "lastBlock", "==", "0", ":", "self", ".", "lastBlock", "=", "2e9", "if", "self", ".", "lastBlock", "is", "None", "or", "(", "self", ".", "lastProcessedBlock", "is", "not", "None", "and", "self", ".", "lastBlock", "<=", "self", ".", "lastProcessedBlock", ")", ":", "return", "blockNum", "=", "0", "if", "self", ".", "lastProcessedBlock", "is", "not", "None", ":", "blockNum", "=", "self", ".", "lastProcessedBlock", "+", "1", "print", "(", "\"Updating hash-to-address index from block \"", "+", "str", "(", "self", ".", "lastProcessedBlock", ")", ")", "else", ":", "print", "(", "\"Starting hash-to-address index\"", ")", "count", "=", "0", "processed", "=", "0", "while", "blockNum", "<=", "self", ".", "lastBlock", ":", "# leveldb cannot be accessed on multiple processes (not even readonly)", "# multithread version performs significantly worse than serial", "try", ":", "results", "=", "self", ".", "_process", "(", "blockNum", ")", "except", ":", "break", "# store new mappings", "self", ".", "db", ".", "writer", ".", "_start_writing", "(", ")", "count", "+=", "len", "(", "results", ")", "for", "addr", "in", "results", ":", "self", ".", "db", ".", "writer", ".", "_store_account_address", "(", "addr", ")", "self", ".", "db", ".", "writer", ".", "_commit_batch", "(", ")", "processed", "+=", "BATCH_SIZE", "blockNum", "=", "min", "(", "blockNum", "+", "BATCH_SIZE", ",", "self", ".", "lastBlock", "+", "1", ")", "cost_time", "=", "time", ".", "time", "(", ")", "-", "ethereum", ".", "start_time", "print", "(", "\"%d blocks processed (in %d seconds), %d unique addresses found, next block: %d\"", "%", "(", "processed", ",", "cost_time", ",", "count", ",", "min", "(", "self", ".", "lastBlock", ",", "blockNum", ")", ")", ")", "self", ".", "lastProcessedBlock", "=", "blockNum", "-", "1", "self", ".", "db", ".", "writer", ".", "_set_last_indexed_number", "(", "self", ".", "lastProcessedBlock", ")", "print", "(", "\"Finished indexing\"", ")", "self", ".", "lastBlock", "=", "self", ".", "lastProcessedBlock" ]
update address index.
[ "update", "address", "index", "." ]
python
train
pyQode/pyqode.core
pyqode/core/widgets/splittable_tab_widget.py
https://github.com/pyQode/pyqode.core/blob/a99ec6cd22d519394f613309412f8329dc4e90cb/pyqode/core/widgets/splittable_tab_widget.py#L993-L1003
def get_filter(cls, mimetype): """ Returns a filter string for the file dialog. The filter is based on the mime type. :param mimetype: path from which the filter must be derived. :return: Filter string """ filters = ' '.join( ['*%s' % ext for ext in mimetypes.guess_all_extensions(mimetype)]) return '%s (%s)' % (mimetype, filters)
[ "def", "get_filter", "(", "cls", ",", "mimetype", ")", ":", "filters", "=", "' '", ".", "join", "(", "[", "'*%s'", "%", "ext", "for", "ext", "in", "mimetypes", ".", "guess_all_extensions", "(", "mimetype", ")", "]", ")", "return", "'%s (%s)'", "%", "(", "mimetype", ",", "filters", ")" ]
Returns a filter string for the file dialog. The filter is based on the mime type. :param mimetype: path from which the filter must be derived. :return: Filter string
[ "Returns", "a", "filter", "string", "for", "the", "file", "dialog", ".", "The", "filter", "is", "based", "on", "the", "mime", "type", "." ]
python
train
tjcsl/cslbot
cslbot/helpers/handler.py
https://github.com/tjcsl/cslbot/blob/aebe07be47141f61d7c180706bddfb707f19b2b5/cslbot/helpers/handler.py#L259-L285
def do_part(self, cmdargs, nick, target, msgtype, send, c): """Leaves a channel. Prevent user from leaving the primary channel. """ channel = self.config['core']['channel'] botnick = self.config['core']['nick'] if not cmdargs: # don't leave the primary channel if target == channel: send("%s must have a home." % botnick) return else: cmdargs = target if not cmdargs.startswith(('#', '+', '@')): cmdargs = '#' + cmdargs # don't leave the primary channel if cmdargs == channel: send("%s must have a home." % botnick) return # don't leave the control channel if cmdargs == self.config['core']['ctrlchan']: send("%s must remain under control, or bad things will happen." % botnick) return self.send(cmdargs, nick, "Leaving at the request of %s" % nick, msgtype) c.part(cmdargs)
[ "def", "do_part", "(", "self", ",", "cmdargs", ",", "nick", ",", "target", ",", "msgtype", ",", "send", ",", "c", ")", ":", "channel", "=", "self", ".", "config", "[", "'core'", "]", "[", "'channel'", "]", "botnick", "=", "self", ".", "config", "[", "'core'", "]", "[", "'nick'", "]", "if", "not", "cmdargs", ":", "# don't leave the primary channel", "if", "target", "==", "channel", ":", "send", "(", "\"%s must have a home.\"", "%", "botnick", ")", "return", "else", ":", "cmdargs", "=", "target", "if", "not", "cmdargs", ".", "startswith", "(", "(", "'#'", ",", "'+'", ",", "'@'", ")", ")", ":", "cmdargs", "=", "'#'", "+", "cmdargs", "# don't leave the primary channel", "if", "cmdargs", "==", "channel", ":", "send", "(", "\"%s must have a home.\"", "%", "botnick", ")", "return", "# don't leave the control channel", "if", "cmdargs", "==", "self", ".", "config", "[", "'core'", "]", "[", "'ctrlchan'", "]", ":", "send", "(", "\"%s must remain under control, or bad things will happen.\"", "%", "botnick", ")", "return", "self", ".", "send", "(", "cmdargs", ",", "nick", ",", "\"Leaving at the request of %s\"", "%", "nick", ",", "msgtype", ")", "c", ".", "part", "(", "cmdargs", ")" ]
Leaves a channel. Prevent user from leaving the primary channel.
[ "Leaves", "a", "channel", "." ]
python
train
Duke-GCB/DukeDSClient
ddsc/core/fileuploader.py
https://github.com/Duke-GCB/DukeDSClient/blob/117f68fb9bae82e4c81ea487ad5d61ac350f3726/ddsc/core/fileuploader.py#L126-L138
def create_upload(self, project_id, path_data, hash_data, remote_filename=None, storage_provider_id=None): """ Create a chunked upload id to pass to create_file_chunk_url to create upload urls. :param project_id: str: uuid of the project :param path_data: PathData: holds file system data about the file we are uploading :param hash_data: HashData: contains hash alg and value for the file we are uploading :param remote_filename: str: name to use for our remote file (defaults to path_data basename otherwise) :param storage_provider_id: str: optional storage provider id :return: str: uuid for the upload """ upload_response = self._create_upload(project_id, path_data, hash_data, remote_filename=remote_filename, storage_provider_id=storage_provider_id, chunked=True) return upload_response['id']
[ "def", "create_upload", "(", "self", ",", "project_id", ",", "path_data", ",", "hash_data", ",", "remote_filename", "=", "None", ",", "storage_provider_id", "=", "None", ")", ":", "upload_response", "=", "self", ".", "_create_upload", "(", "project_id", ",", "path_data", ",", "hash_data", ",", "remote_filename", "=", "remote_filename", ",", "storage_provider_id", "=", "storage_provider_id", ",", "chunked", "=", "True", ")", "return", "upload_response", "[", "'id'", "]" ]
Create a chunked upload id to pass to create_file_chunk_url to create upload urls. :param project_id: str: uuid of the project :param path_data: PathData: holds file system data about the file we are uploading :param hash_data: HashData: contains hash alg and value for the file we are uploading :param remote_filename: str: name to use for our remote file (defaults to path_data basename otherwise) :param storage_provider_id: str: optional storage provider id :return: str: uuid for the upload
[ "Create", "a", "chunked", "upload", "id", "to", "pass", "to", "create_file_chunk_url", "to", "create", "upload", "urls", ".", ":", "param", "project_id", ":", "str", ":", "uuid", "of", "the", "project", ":", "param", "path_data", ":", "PathData", ":", "holds", "file", "system", "data", "about", "the", "file", "we", "are", "uploading", ":", "param", "hash_data", ":", "HashData", ":", "contains", "hash", "alg", "and", "value", "for", "the", "file", "we", "are", "uploading", ":", "param", "remote_filename", ":", "str", ":", "name", "to", "use", "for", "our", "remote", "file", "(", "defaults", "to", "path_data", "basename", "otherwise", ")", ":", "param", "storage_provider_id", ":", "str", ":", "optional", "storage", "provider", "id", ":", "return", ":", "str", ":", "uuid", "for", "the", "upload" ]
python
train
dstufft/storages
storages/core.py
https://github.com/dstufft/storages/blob/0d893afc1db32cd83eaf8e2ad4ed51b37933d5f0/storages/core.py#L79-L94
def get_available_name(self, name): """ Returns a filename that's free on the target storage system, and available for new content to be written to. """ dir_name, file_name = os.path.split(name) file_root, file_ext = os.path.splitext(file_name) # If the filename already exists, add an underscore and a number (before # the file extension, if one exists) to the filename until the generated # filename doesn't exist. count = itertools.count(1) while self.exists(name): # file_ext includes the dot. name = os.path.join(dir_name, "%s_%s%s" % (file_root, next(count), file_ext)) return name
[ "def", "get_available_name", "(", "self", ",", "name", ")", ":", "dir_name", ",", "file_name", "=", "os", ".", "path", ".", "split", "(", "name", ")", "file_root", ",", "file_ext", "=", "os", ".", "path", ".", "splitext", "(", "file_name", ")", "# If the filename already exists, add an underscore and a number (before", "# the file extension, if one exists) to the filename until the generated", "# filename doesn't exist.", "count", "=", "itertools", ".", "count", "(", "1", ")", "while", "self", ".", "exists", "(", "name", ")", ":", "# file_ext includes the dot.", "name", "=", "os", ".", "path", ".", "join", "(", "dir_name", ",", "\"%s_%s%s\"", "%", "(", "file_root", ",", "next", "(", "count", ")", ",", "file_ext", ")", ")", "return", "name" ]
Returns a filename that's free on the target storage system, and available for new content to be written to.
[ "Returns", "a", "filename", "that", "s", "free", "on", "the", "target", "storage", "system", "and", "available", "for", "new", "content", "to", "be", "written", "to", "." ]
python
train
edibledinos/pwnypack
pwnypack/bytecode.py
https://github.com/edibledinos/pwnypack/blob/e0a5a8e6ef3f4f1f7e1b91ee379711f4a49cb0e6/pwnypack/bytecode.py#L574-L587
def from_function(cls, f, *args, **kwargs): """ Create a new instance from a function. Gets the code object from the function and passes it and any other specified parameters to :meth:`from_code`. Arguments: f(function): The function to get the code object from. Returns: CodeObject: A new :class:`CodeObject` instance. """ return cls.from_code(six.get_function_code(f), *args, **kwargs)
[ "def", "from_function", "(", "cls", ",", "f", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "cls", ".", "from_code", "(", "six", ".", "get_function_code", "(", "f", ")", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Create a new instance from a function. Gets the code object from the function and passes it and any other specified parameters to :meth:`from_code`. Arguments: f(function): The function to get the code object from. Returns: CodeObject: A new :class:`CodeObject` instance.
[ "Create", "a", "new", "instance", "from", "a", "function", ".", "Gets", "the", "code", "object", "from", "the", "function", "and", "passes", "it", "and", "any", "other", "specified", "parameters", "to", ":", "meth", ":", "from_code", "." ]
python
train
juicer/juicer
juicer/utils/__init__.py
https://github.com/juicer/juicer/blob/0c9f0fd59e293d45df6b46e81f675d33221c600d/juicer/utils/__init__.py#L337-L352
def find_pattern(search_base, pattern='*.rpm'): """ `search_base` - The directory to begin walking down. `pattern` - File pattern to match for. This is a generator which yields the full path to files (one at a time) which match the given glob (`pattern`). """ # Stolen from http://rosettacode.org/wiki/Walk_a_directory/Recursively#Python if (not os.path.isdir(search_base)) and os.path.exists(search_base): # Adapt the algorithm to gracefully handle non-directory search paths yield search_base else: for root, dirs, files in os.walk(search_base): for filename in fnmatch.filter(files, pattern): yield os.path.join(root, filename)
[ "def", "find_pattern", "(", "search_base", ",", "pattern", "=", "'*.rpm'", ")", ":", "# Stolen from http://rosettacode.org/wiki/Walk_a_directory/Recursively#Python", "if", "(", "not", "os", ".", "path", ".", "isdir", "(", "search_base", ")", ")", "and", "os", ".", "path", ".", "exists", "(", "search_base", ")", ":", "# Adapt the algorithm to gracefully handle non-directory search paths", "yield", "search_base", "else", ":", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "search_base", ")", ":", "for", "filename", "in", "fnmatch", ".", "filter", "(", "files", ",", "pattern", ")", ":", "yield", "os", ".", "path", ".", "join", "(", "root", ",", "filename", ")" ]
`search_base` - The directory to begin walking down. `pattern` - File pattern to match for. This is a generator which yields the full path to files (one at a time) which match the given glob (`pattern`).
[ "search_base", "-", "The", "directory", "to", "begin", "walking", "down", ".", "pattern", "-", "File", "pattern", "to", "match", "for", "." ]
python
train
InspectorMustache/base16-builder-python
pybase16_builder/updater.py
https://github.com/InspectorMustache/base16-builder-python/blob/586f1f87ee9f70696ab19c542af6ef55c6548a2e/pybase16_builder/updater.py#L34-L57
def git_clone(git_url, path): """Clone git repository at $git_url to $path.""" if os.path.exists(os.path.join(path, '.git')): # get rid of local repo if it already exists shutil.rmtree(path) os.makedirs(path, exist_ok=True) print('Start cloning from {}…'.format(git_url)) git_proc = subprocess.Popen(['git', 'clone', git_url, path], stderr=subprocess.PIPE, stdout=subprocess.PIPE, env={'GIT_TERMINAL_PROMPT': '0'}) try: stdoutmsg, stderrmsg = git_proc.communicate(timeout=120) except subprocess.TimeoutExpired: git_proc.kill() stderrmsg = b'Timed out.' if git_proc.returncode == 0: print('Cloned {}.'.format(git_url)) else: print('Error cloning from {}:\n{}'.format(git_url, stderrmsg.decode('utf-8')))
[ "def", "git_clone", "(", "git_url", ",", "path", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "path", ",", "'.git'", ")", ")", ":", "# get rid of local repo if it already exists", "shutil", ".", "rmtree", "(", "path", ")", "os", ".", "makedirs", "(", "path", ",", "exist_ok", "=", "True", ")", "print", "(", "'Start cloning from {}…'.f", "o", "rmat(g", "i", "t_url))", "", "", "git_proc", "=", "subprocess", ".", "Popen", "(", "[", "'git'", ",", "'clone'", ",", "git_url", ",", "path", "]", ",", "stderr", "=", "subprocess", ".", "PIPE", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "env", "=", "{", "'GIT_TERMINAL_PROMPT'", ":", "'0'", "}", ")", "try", ":", "stdoutmsg", ",", "stderrmsg", "=", "git_proc", ".", "communicate", "(", "timeout", "=", "120", ")", "except", "subprocess", ".", "TimeoutExpired", ":", "git_proc", ".", "kill", "(", ")", "stderrmsg", "=", "b'Timed out.'", "if", "git_proc", ".", "returncode", "==", "0", ":", "print", "(", "'Cloned {}.'", ".", "format", "(", "git_url", ")", ")", "else", ":", "print", "(", "'Error cloning from {}:\\n{}'", ".", "format", "(", "git_url", ",", "stderrmsg", ".", "decode", "(", "'utf-8'", ")", ")", ")" ]
Clone git repository at $git_url to $path.
[ "Clone", "git", "repository", "at", "$git_url", "to", "$path", "." ]
python
train
NiklasRosenstein-Python/nr-deprecated
nr/ast/dynamic_eval.py
https://github.com/NiklasRosenstein-Python/nr-deprecated/blob/f9f8b89ea1b084841a8ab65784eaf68852686b2a/nr/ast/dynamic_eval.py#L114-L123
def __get_subscript(self, name, ctx=None): """ Returns `<data_var>["<name>"]` """ assert isinstance(name, string_types), name return ast.Subscript( value=ast.Name(id=self.data_var, ctx=ast.Load()), slice=ast.Index(value=ast.Str(s=name)), ctx=ctx)
[ "def", "__get_subscript", "(", "self", ",", "name", ",", "ctx", "=", "None", ")", ":", "assert", "isinstance", "(", "name", ",", "string_types", ")", ",", "name", "return", "ast", ".", "Subscript", "(", "value", "=", "ast", ".", "Name", "(", "id", "=", "self", ".", "data_var", ",", "ctx", "=", "ast", ".", "Load", "(", ")", ")", ",", "slice", "=", "ast", ".", "Index", "(", "value", "=", "ast", ".", "Str", "(", "s", "=", "name", ")", ")", ",", "ctx", "=", "ctx", ")" ]
Returns `<data_var>["<name>"]`
[ "Returns", "<data_var", ">", "[", "<name", ">", "]" ]
python
train
hyperledger/indy-plenum
plenum/common/signer_did.py
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/common/signer_did.py#L122-L129
def sign(self, msg: Dict) -> Dict: """ Return a signature for the given message. """ ser = serialize_msg_for_signing(msg, topLevelKeysToIgnore=[f.SIG.nm]) bsig = self.naclSigner.signature(ser) sig = base58.b58encode(bsig).decode("utf-8") return sig
[ "def", "sign", "(", "self", ",", "msg", ":", "Dict", ")", "->", "Dict", ":", "ser", "=", "serialize_msg_for_signing", "(", "msg", ",", "topLevelKeysToIgnore", "=", "[", "f", ".", "SIG", ".", "nm", "]", ")", "bsig", "=", "self", ".", "naclSigner", ".", "signature", "(", "ser", ")", "sig", "=", "base58", ".", "b58encode", "(", "bsig", ")", ".", "decode", "(", "\"utf-8\"", ")", "return", "sig" ]
Return a signature for the given message.
[ "Return", "a", "signature", "for", "the", "given", "message", "." ]
python
train
connectordb/connectordb-python
connectordb/_websocket.py
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_websocket.py#L69-L86
def setauth(self,basic_auth): """ setauth can be used during runtime to make sure that authentication is reset. it can be used when changing passwords/apikeys to make sure reconnects succeed """ self.headers = [] # If we have auth if basic_auth is not None: # we use a cheap hack to get the basic auth header out of the auth object. # This snippet ends up with us having an array of the necessary headers # to perform authentication. class auth_extractor(): def __init__(self): self.headers = {} extractor = auth_extractor() basic_auth(extractor) for header in extractor.headers: self.headers.append("%s: %s" % (header, extractor.headers[header]))
[ "def", "setauth", "(", "self", ",", "basic_auth", ")", ":", "self", ".", "headers", "=", "[", "]", "# If we have auth", "if", "basic_auth", "is", "not", "None", ":", "# we use a cheap hack to get the basic auth header out of the auth object.", "# This snippet ends up with us having an array of the necessary headers", "# to perform authentication.", "class", "auth_extractor", "(", ")", ":", "def", "__init__", "(", "self", ")", ":", "self", ".", "headers", "=", "{", "}", "extractor", "=", "auth_extractor", "(", ")", "basic_auth", "(", "extractor", ")", "for", "header", "in", "extractor", ".", "headers", ":", "self", ".", "headers", ".", "append", "(", "\"%s: %s\"", "%", "(", "header", ",", "extractor", ".", "headers", "[", "header", "]", ")", ")" ]
setauth can be used during runtime to make sure that authentication is reset. it can be used when changing passwords/apikeys to make sure reconnects succeed
[ "setauth", "can", "be", "used", "during", "runtime", "to", "make", "sure", "that", "authentication", "is", "reset", ".", "it", "can", "be", "used", "when", "changing", "passwords", "/", "apikeys", "to", "make", "sure", "reconnects", "succeed" ]
python
test
pypa/pipenv
pipenv/vendor/pexpect/screen.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/pexpect/screen.py#L339-L345
def scroll_constrain (self): '''This keeps the scroll region within the screen region.''' if self.scroll_row_start <= 0: self.scroll_row_start = 1 if self.scroll_row_end > self.rows: self.scroll_row_end = self.rows
[ "def", "scroll_constrain", "(", "self", ")", ":", "if", "self", ".", "scroll_row_start", "<=", "0", ":", "self", ".", "scroll_row_start", "=", "1", "if", "self", ".", "scroll_row_end", ">", "self", ".", "rows", ":", "self", ".", "scroll_row_end", "=", "self", ".", "rows" ]
This keeps the scroll region within the screen region.
[ "This", "keeps", "the", "scroll", "region", "within", "the", "screen", "region", "." ]
python
train
pydata/xarray
xarray/coding/variables.py
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/coding/variables.py#L116-L125
def pop_to(source, dest, key, name=None): """ A convenience function which pops a key k from source to dest. None values are not passed on. If k already exists in dest an error is raised. """ value = source.pop(key, None) if value is not None: safe_setitem(dest, key, value, name=name) return value
[ "def", "pop_to", "(", "source", ",", "dest", ",", "key", ",", "name", "=", "None", ")", ":", "value", "=", "source", ".", "pop", "(", "key", ",", "None", ")", "if", "value", "is", "not", "None", ":", "safe_setitem", "(", "dest", ",", "key", ",", "value", ",", "name", "=", "name", ")", "return", "value" ]
A convenience function which pops a key k from source to dest. None values are not passed on. If k already exists in dest an error is raised.
[ "A", "convenience", "function", "which", "pops", "a", "key", "k", "from", "source", "to", "dest", ".", "None", "values", "are", "not", "passed", "on", ".", "If", "k", "already", "exists", "in", "dest", "an", "error", "is", "raised", "." ]
python
train
DataBiosphere/toil
src/toil/batchSystems/lsfHelper.py
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/batchSystems/lsfHelper.py#L108-L117
def apply_lsadmin(fn): """ apply fn to each line of lsadmin, returning the result """ cmd = ["lsadmin", "showconf", "lim"] try: output = subprocess.check_output(cmd).decode('utf-8') except: return None return fn(output.split("\n"))
[ "def", "apply_lsadmin", "(", "fn", ")", ":", "cmd", "=", "[", "\"lsadmin\"", ",", "\"showconf\"", ",", "\"lim\"", "]", "try", ":", "output", "=", "subprocess", ".", "check_output", "(", "cmd", ")", ".", "decode", "(", "'utf-8'", ")", "except", ":", "return", "None", "return", "fn", "(", "output", ".", "split", "(", "\"\\n\"", ")", ")" ]
apply fn to each line of lsadmin, returning the result
[ "apply", "fn", "to", "each", "line", "of", "lsadmin", "returning", "the", "result" ]
python
train
watchforstock/evohome-client
evohomeclient2/__init__.py
https://github.com/watchforstock/evohome-client/blob/f1cb9273e97946d79c0651f00a218abbf7ada53a/evohomeclient2/__init__.py#L236-L243
def gateway(self): """Return the detail of the gateway.""" url = 'https://tccna.honeywell.com/WebAPI/emea/api/v1/gateway' response = requests.get(url, headers=self._headers()) response.raise_for_status() return response.json()
[ "def", "gateway", "(", "self", ")", ":", "url", "=", "'https://tccna.honeywell.com/WebAPI/emea/api/v1/gateway'", "response", "=", "requests", ".", "get", "(", "url", ",", "headers", "=", "self", ".", "_headers", "(", ")", ")", "response", ".", "raise_for_status", "(", ")", "return", "response", ".", "json", "(", ")" ]
Return the detail of the gateway.
[ "Return", "the", "detail", "of", "the", "gateway", "." ]
python
train
greenbone/ospd
ospd/misc.py
https://github.com/greenbone/ospd/blob/cef773166b15a19c17764721d3fe404fa0e107bf/ospd/misc.py#L565-L594
def target_to_list(target): """ Attempt to return a list of single hosts from a target string. """ # Is it an IPv4 address ? new_list = target_to_ipv4(target) # Is it an IPv6 address ? if not new_list: new_list = target_to_ipv6(target) # Is it an IPv4 CIDR ? if not new_list: new_list = target_to_ipv4_cidr(target) # Is it an IPv6 CIDR ? if not new_list: new_list = target_to_ipv6_cidr(target) # Is it an IPv4 short-range ? if not new_list: new_list = target_to_ipv4_short(target) # Is it an IPv4 long-range ? if not new_list: new_list = target_to_ipv4_long(target) # Is it an IPv6 short-range ? if not new_list: new_list = target_to_ipv6_short(target) # Is it an IPv6 long-range ? if not new_list: new_list = target_to_ipv6_long(target) # Is it a hostname ? if not new_list: new_list = target_to_hostname(target) return new_list
[ "def", "target_to_list", "(", "target", ")", ":", "# Is it an IPv4 address ?", "new_list", "=", "target_to_ipv4", "(", "target", ")", "# Is it an IPv6 address ?", "if", "not", "new_list", ":", "new_list", "=", "target_to_ipv6", "(", "target", ")", "# Is it an IPv4 CIDR ?", "if", "not", "new_list", ":", "new_list", "=", "target_to_ipv4_cidr", "(", "target", ")", "# Is it an IPv6 CIDR ?", "if", "not", "new_list", ":", "new_list", "=", "target_to_ipv6_cidr", "(", "target", ")", "# Is it an IPv4 short-range ?", "if", "not", "new_list", ":", "new_list", "=", "target_to_ipv4_short", "(", "target", ")", "# Is it an IPv4 long-range ?", "if", "not", "new_list", ":", "new_list", "=", "target_to_ipv4_long", "(", "target", ")", "# Is it an IPv6 short-range ?", "if", "not", "new_list", ":", "new_list", "=", "target_to_ipv6_short", "(", "target", ")", "# Is it an IPv6 long-range ?", "if", "not", "new_list", ":", "new_list", "=", "target_to_ipv6_long", "(", "target", ")", "# Is it a hostname ?", "if", "not", "new_list", ":", "new_list", "=", "target_to_hostname", "(", "target", ")", "return", "new_list" ]
Attempt to return a list of single hosts from a target string.
[ "Attempt", "to", "return", "a", "list", "of", "single", "hosts", "from", "a", "target", "string", "." ]
python
train
glitchassassin/lackey
lackey/RegionMatching.py
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L1506-L1515
def getEvents(self): """ Returns a list of all events that have occurred. Empties the internal queue. """ caught_events = self._observer.caught_events self._observer.caught_events = [] for event in caught_events: self._observer.activate_event(event["name"]) return caught_events
[ "def", "getEvents", "(", "self", ")", ":", "caught_events", "=", "self", ".", "_observer", ".", "caught_events", "self", ".", "_observer", ".", "caught_events", "=", "[", "]", "for", "event", "in", "caught_events", ":", "self", ".", "_observer", ".", "activate_event", "(", "event", "[", "\"name\"", "]", ")", "return", "caught_events" ]
Returns a list of all events that have occurred. Empties the internal queue.
[ "Returns", "a", "list", "of", "all", "events", "that", "have", "occurred", "." ]
python
train
wesm/feather
cpp/build-support/cpplint.py
https://github.com/wesm/feather/blob/99267b30461c46b9e437f95e1d9338a92a854270/cpp/build-support/cpplint.py#L6048-L6121
def ProcessConfigOverrides(filename): """ Loads the configuration files and processes the config overrides. Args: filename: The name of the file being processed by the linter. Returns: False if the current |filename| should not be processed further. """ abs_filename = os.path.abspath(filename) cfg_filters = [] keep_looking = True while keep_looking: abs_path, base_name = os.path.split(abs_filename) if not base_name: break # Reached the root directory. cfg_file = os.path.join(abs_path, "CPPLINT.cfg") abs_filename = abs_path if not os.path.isfile(cfg_file): continue try: with open(cfg_file) as file_handle: for line in file_handle: line, _, _ = line.partition('#') # Remove comments. if not line.strip(): continue name, _, val = line.partition('=') name = name.strip() val = val.strip() if name == 'set noparent': keep_looking = False elif name == 'filter': cfg_filters.append(val) elif name == 'exclude_files': # When matching exclude_files pattern, use the base_name of # the current file name or the directory name we are processing. # For example, if we are checking for lint errors in /foo/bar/baz.cc # and we found the .cfg file at /foo/CPPLINT.cfg, then the config # file's "exclude_files" filter is meant to be checked against "bar" # and not "baz" nor "bar/baz.cc". if base_name: pattern = re.compile(val) if pattern.match(base_name): sys.stderr.write('Ignoring "%s": file excluded by "%s". ' 'File path component "%s" matches ' 'pattern "%s"\n' % (filename, cfg_file, base_name, val)) return False elif name == 'linelength': global _line_length try: _line_length = int(val) except ValueError: sys.stderr.write('Line length must be numeric.') else: sys.stderr.write( 'Invalid configuration option (%s) in file %s\n' % (name, cfg_file)) except IOError: sys.stderr.write( "Skipping config file '%s': Can't open for reading\n" % cfg_file) keep_looking = False # Apply all the accumulated filters in reverse order (top-level directory # config options having the least priority). for filter in reversed(cfg_filters): _AddFilters(filter) return True
[ "def", "ProcessConfigOverrides", "(", "filename", ")", ":", "abs_filename", "=", "os", ".", "path", ".", "abspath", "(", "filename", ")", "cfg_filters", "=", "[", "]", "keep_looking", "=", "True", "while", "keep_looking", ":", "abs_path", ",", "base_name", "=", "os", ".", "path", ".", "split", "(", "abs_filename", ")", "if", "not", "base_name", ":", "break", "# Reached the root directory.", "cfg_file", "=", "os", ".", "path", ".", "join", "(", "abs_path", ",", "\"CPPLINT.cfg\"", ")", "abs_filename", "=", "abs_path", "if", "not", "os", ".", "path", ".", "isfile", "(", "cfg_file", ")", ":", "continue", "try", ":", "with", "open", "(", "cfg_file", ")", "as", "file_handle", ":", "for", "line", "in", "file_handle", ":", "line", ",", "_", ",", "_", "=", "line", ".", "partition", "(", "'#'", ")", "# Remove comments.", "if", "not", "line", ".", "strip", "(", ")", ":", "continue", "name", ",", "_", ",", "val", "=", "line", ".", "partition", "(", "'='", ")", "name", "=", "name", ".", "strip", "(", ")", "val", "=", "val", ".", "strip", "(", ")", "if", "name", "==", "'set noparent'", ":", "keep_looking", "=", "False", "elif", "name", "==", "'filter'", ":", "cfg_filters", ".", "append", "(", "val", ")", "elif", "name", "==", "'exclude_files'", ":", "# When matching exclude_files pattern, use the base_name of", "# the current file name or the directory name we are processing.", "# For example, if we are checking for lint errors in /foo/bar/baz.cc", "# and we found the .cfg file at /foo/CPPLINT.cfg, then the config", "# file's \"exclude_files\" filter is meant to be checked against \"bar\"", "# and not \"baz\" nor \"bar/baz.cc\".", "if", "base_name", ":", "pattern", "=", "re", ".", "compile", "(", "val", ")", "if", "pattern", ".", "match", "(", "base_name", ")", ":", "sys", ".", "stderr", ".", "write", "(", "'Ignoring \"%s\": file excluded by \"%s\". '", "'File path component \"%s\" matches '", "'pattern \"%s\"\\n'", "%", "(", "filename", ",", "cfg_file", ",", "base_name", ",", "val", ")", ")", "return", "False", "elif", "name", "==", "'linelength'", ":", "global", "_line_length", "try", ":", "_line_length", "=", "int", "(", "val", ")", "except", "ValueError", ":", "sys", ".", "stderr", ".", "write", "(", "'Line length must be numeric.'", ")", "else", ":", "sys", ".", "stderr", ".", "write", "(", "'Invalid configuration option (%s) in file %s\\n'", "%", "(", "name", ",", "cfg_file", ")", ")", "except", "IOError", ":", "sys", ".", "stderr", ".", "write", "(", "\"Skipping config file '%s': Can't open for reading\\n\"", "%", "cfg_file", ")", "keep_looking", "=", "False", "# Apply all the accumulated filters in reverse order (top-level directory", "# config options having the least priority).", "for", "filter", "in", "reversed", "(", "cfg_filters", ")", ":", "_AddFilters", "(", "filter", ")", "return", "True" ]
Loads the configuration files and processes the config overrides. Args: filename: The name of the file being processed by the linter. Returns: False if the current |filename| should not be processed further.
[ "Loads", "the", "configuration", "files", "and", "processes", "the", "config", "overrides", "." ]
python
train
chemlab/chemlab
chemlab/utils/pbc.py
https://github.com/chemlab/chemlab/blob/c8730966316d101e24f39ac3b96b51282aba0abe/chemlab/utils/pbc.py#L123-L134
def geometric_center(coords, periodic): '''Geometric center taking into account periodic boundaries''' max_vals = periodic theta = 2 * np.pi * (coords / max_vals) eps = np.cos(theta) * max_vals / (2 * np.pi) zeta = np.sin(theta) * max_vals / (2 * np.pi) eps_avg = eps.sum(axis=0) zeta_avg = zeta.sum(axis=0) theta_avg = np.arctan2(-zeta_avg, -eps_avg) + np.pi return theta_avg * max_vals / (2 * np.pi)
[ "def", "geometric_center", "(", "coords", ",", "periodic", ")", ":", "max_vals", "=", "periodic", "theta", "=", "2", "*", "np", ".", "pi", "*", "(", "coords", "/", "max_vals", ")", "eps", "=", "np", ".", "cos", "(", "theta", ")", "*", "max_vals", "/", "(", "2", "*", "np", ".", "pi", ")", "zeta", "=", "np", ".", "sin", "(", "theta", ")", "*", "max_vals", "/", "(", "2", "*", "np", ".", "pi", ")", "eps_avg", "=", "eps", ".", "sum", "(", "axis", "=", "0", ")", "zeta_avg", "=", "zeta", ".", "sum", "(", "axis", "=", "0", ")", "theta_avg", "=", "np", ".", "arctan2", "(", "-", "zeta_avg", ",", "-", "eps_avg", ")", "+", "np", ".", "pi", "return", "theta_avg", "*", "max_vals", "/", "(", "2", "*", "np", ".", "pi", ")" ]
Geometric center taking into account periodic boundaries
[ "Geometric", "center", "taking", "into", "account", "periodic", "boundaries" ]
python
train
gsi-upm/soil
soil/history.py
https://github.com/gsi-upm/soil/blob/a3ea434f237f039c3cadbc2e0a83ae626d77b818/soil/history.py#L79-L90
def save_record(self, agent_id, t_step, key, value): ''' Save a collection of records to the database. Database writes are cached. ''' value = self.convert(key, value) self._tups.append(Record(agent_id=agent_id, t_step=t_step, key=key, value=value)) if len(self._tups) > 100: self.flush_cache()
[ "def", "save_record", "(", "self", ",", "agent_id", ",", "t_step", ",", "key", ",", "value", ")", ":", "value", "=", "self", ".", "convert", "(", "key", ",", "value", ")", "self", ".", "_tups", ".", "append", "(", "Record", "(", "agent_id", "=", "agent_id", ",", "t_step", "=", "t_step", ",", "key", "=", "key", ",", "value", "=", "value", ")", ")", "if", "len", "(", "self", ".", "_tups", ")", ">", "100", ":", "self", ".", "flush_cache", "(", ")" ]
Save a collection of records to the database. Database writes are cached.
[ "Save", "a", "collection", "of", "records", "to", "the", "database", ".", "Database", "writes", "are", "cached", "." ]
python
train
aiogram/aiogram
aiogram/bot/bot.py
https://github.com/aiogram/aiogram/blob/2af930149ce2482547721e2c8755c10307295e48/aiogram/bot/bot.py#L1325-L1346
async def set_chat_sticker_set(self, chat_id: typing.Union[base.Integer, base.String], sticker_set_name: base.String) -> base.Boolean: """ Use this method to set a new group sticker set for a supergroup. The bot must be an administrator in the chat for this to work and must have the appropriate admin rights. Use the field can_set_sticker_set optionally returned in getChat requests to check if the bot can use this method. Source: https://core.telegram.org/bots/api#setchatstickerset :param chat_id: Unique identifier for the target chat or username of the target supergroup :type chat_id: :obj:`typing.Union[base.Integer, base.String]` :param sticker_set_name: Name of the sticker set to be set as the group sticker set :type sticker_set_name: :obj:`base.String` :return: Returns True on success :rtype: :obj:`base.Boolean` """ payload = generate_payload(**locals()) result = await self.request(api.Methods.SET_CHAT_STICKER_SET, payload) return result
[ "async", "def", "set_chat_sticker_set", "(", "self", ",", "chat_id", ":", "typing", ".", "Union", "[", "base", ".", "Integer", ",", "base", ".", "String", "]", ",", "sticker_set_name", ":", "base", ".", "String", ")", "->", "base", ".", "Boolean", ":", "payload", "=", "generate_payload", "(", "*", "*", "locals", "(", ")", ")", "result", "=", "await", "self", ".", "request", "(", "api", ".", "Methods", ".", "SET_CHAT_STICKER_SET", ",", "payload", ")", "return", "result" ]
Use this method to set a new group sticker set for a supergroup. The bot must be an administrator in the chat for this to work and must have the appropriate admin rights. Use the field can_set_sticker_set optionally returned in getChat requests to check if the bot can use this method. Source: https://core.telegram.org/bots/api#setchatstickerset :param chat_id: Unique identifier for the target chat or username of the target supergroup :type chat_id: :obj:`typing.Union[base.Integer, base.String]` :param sticker_set_name: Name of the sticker set to be set as the group sticker set :type sticker_set_name: :obj:`base.String` :return: Returns True on success :rtype: :obj:`base.Boolean`
[ "Use", "this", "method", "to", "set", "a", "new", "group", "sticker", "set", "for", "a", "supergroup", ".", "The", "bot", "must", "be", "an", "administrator", "in", "the", "chat", "for", "this", "to", "work", "and", "must", "have", "the", "appropriate", "admin", "rights", "." ]
python
train
raiden-network/raiden
raiden/network/rpc/client.py
https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/network/rpc/client.py#L232-L262
def dependencies_order_of_build(target_contract, dependencies_map): """ Return an ordered list of contracts that is sufficient to successfully deploy the target contract. Note: This function assumes that the `dependencies_map` is an acyclic graph. """ if not dependencies_map: return [target_contract] if target_contract not in dependencies_map: raise ValueError('no dependencies defined for {}'.format(target_contract)) order = [target_contract] todo = list(dependencies_map[target_contract]) while todo: target_contract = todo.pop(0) target_pos = len(order) for dependency in dependencies_map[target_contract]: # we need to add the current contract before all its depedencies if dependency in order: target_pos = order.index(dependency) else: todo.append(dependency) order.insert(target_pos, target_contract) order.reverse() return order
[ "def", "dependencies_order_of_build", "(", "target_contract", ",", "dependencies_map", ")", ":", "if", "not", "dependencies_map", ":", "return", "[", "target_contract", "]", "if", "target_contract", "not", "in", "dependencies_map", ":", "raise", "ValueError", "(", "'no dependencies defined for {}'", ".", "format", "(", "target_contract", ")", ")", "order", "=", "[", "target_contract", "]", "todo", "=", "list", "(", "dependencies_map", "[", "target_contract", "]", ")", "while", "todo", ":", "target_contract", "=", "todo", ".", "pop", "(", "0", ")", "target_pos", "=", "len", "(", "order", ")", "for", "dependency", "in", "dependencies_map", "[", "target_contract", "]", ":", "# we need to add the current contract before all its depedencies", "if", "dependency", "in", "order", ":", "target_pos", "=", "order", ".", "index", "(", "dependency", ")", "else", ":", "todo", ".", "append", "(", "dependency", ")", "order", ".", "insert", "(", "target_pos", ",", "target_contract", ")", "order", ".", "reverse", "(", ")", "return", "order" ]
Return an ordered list of contracts that is sufficient to successfully deploy the target contract. Note: This function assumes that the `dependencies_map` is an acyclic graph.
[ "Return", "an", "ordered", "list", "of", "contracts", "that", "is", "sufficient", "to", "successfully", "deploy", "the", "target", "contract", "." ]
python
train
larsyencken/proj
proj/__init__.py
https://github.com/larsyencken/proj/blob/44fd72aeb9bbf72046d81c4e9e4306a23335dc0a/proj/__init__.py#L145-L161
def restore(folder): "Restore a project from the archive." if os.path.isdir(folder): bail('a folder of the same name already exists!') pattern = os.path.join(PROJ_ARCHIVE, '*', '*', folder) matches = glob.glob(pattern) if not matches: bail('no project matches: ' + folder) if len(matches) > 1: print('Warning: multiple matches, picking the most recent', file=sys.stderr) source = sorted(matches)[-1] print(source, '-->', folder) shutil.move(source, '.')
[ "def", "restore", "(", "folder", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "folder", ")", ":", "bail", "(", "'a folder of the same name already exists!'", ")", "pattern", "=", "os", ".", "path", ".", "join", "(", "PROJ_ARCHIVE", ",", "'*'", ",", "'*'", ",", "folder", ")", "matches", "=", "glob", ".", "glob", "(", "pattern", ")", "if", "not", "matches", ":", "bail", "(", "'no project matches: '", "+", "folder", ")", "if", "len", "(", "matches", ")", ">", "1", ":", "print", "(", "'Warning: multiple matches, picking the most recent'", ",", "file", "=", "sys", ".", "stderr", ")", "source", "=", "sorted", "(", "matches", ")", "[", "-", "1", "]", "print", "(", "source", ",", "'-->'", ",", "folder", ")", "shutil", ".", "move", "(", "source", ",", "'.'", ")" ]
Restore a project from the archive.
[ "Restore", "a", "project", "from", "the", "archive", "." ]
python
test
vecnet/vecnet.simulation
vecnet/simulation/__init__.py
https://github.com/vecnet/vecnet.simulation/blob/3a4b3df7b12418c6fa8a7d9cd49656a1c031fc0e/vecnet/simulation/__init__.py#L32-L39
def read_json_file(cls, path): """ Read an instance from a JSON-formatted file. :return: A new instance """ with open(path, 'r') as f: return cls.from_dict(json.load(f))
[ "def", "read_json_file", "(", "cls", ",", "path", ")", ":", "with", "open", "(", "path", ",", "'r'", ")", "as", "f", ":", "return", "cls", ".", "from_dict", "(", "json", ".", "load", "(", "f", ")", ")" ]
Read an instance from a JSON-formatted file. :return: A new instance
[ "Read", "an", "instance", "from", "a", "JSON", "-", "formatted", "file", "." ]
python
train
pybel/pybel
src/pybel/parser/parse_bel.py
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/parser/parse_bel.py#L743-L772
def _handle_relation(self, tokens: ParseResults) -> str: """Handle a relation.""" subject_node_dsl = self.ensure_node(tokens[SUBJECT]) object_node_dsl = self.ensure_node(tokens[OBJECT]) subject_modifier = modifier_po_to_dict(tokens[SUBJECT]) object_modifier = modifier_po_to_dict(tokens[OBJECT]) annotations = { annotation_name: ( { ae: True for ae in annotation_entry } if isinstance(annotation_entry, set) else { annotation_entry: True } ) for annotation_name, annotation_entry in self.control_parser.annotations.items() } return self._add_qualified_edge( subject_node_dsl, object_node_dsl, relation=tokens[RELATION], annotations=annotations, subject_modifier=subject_modifier, object_modifier=object_modifier, )
[ "def", "_handle_relation", "(", "self", ",", "tokens", ":", "ParseResults", ")", "->", "str", ":", "subject_node_dsl", "=", "self", ".", "ensure_node", "(", "tokens", "[", "SUBJECT", "]", ")", "object_node_dsl", "=", "self", ".", "ensure_node", "(", "tokens", "[", "OBJECT", "]", ")", "subject_modifier", "=", "modifier_po_to_dict", "(", "tokens", "[", "SUBJECT", "]", ")", "object_modifier", "=", "modifier_po_to_dict", "(", "tokens", "[", "OBJECT", "]", ")", "annotations", "=", "{", "annotation_name", ":", "(", "{", "ae", ":", "True", "for", "ae", "in", "annotation_entry", "}", "if", "isinstance", "(", "annotation_entry", ",", "set", ")", "else", "{", "annotation_entry", ":", "True", "}", ")", "for", "annotation_name", ",", "annotation_entry", "in", "self", ".", "control_parser", ".", "annotations", ".", "items", "(", ")", "}", "return", "self", ".", "_add_qualified_edge", "(", "subject_node_dsl", ",", "object_node_dsl", ",", "relation", "=", "tokens", "[", "RELATION", "]", ",", "annotations", "=", "annotations", ",", "subject_modifier", "=", "subject_modifier", ",", "object_modifier", "=", "object_modifier", ",", ")" ]
Handle a relation.
[ "Handle", "a", "relation", "." ]
python
train
NJDFan/ctypes-bitfield
bitfield/walk.py
https://github.com/NJDFan/ctypes-bitfield/blob/ae76b1dcfef7ecc90bd1900735b94ddee41a6376/bitfield/walk.py#L361-L380
def _createbound(obj): """Create a new BoundNode representing a given object.""" # Start by allowing objects to define custom unbound reference hooks try: kls = obj._unboundreference_() except AttributeError: kls = type(obj) unbound = _createunbound(kls) def valueget(): return obj for t in (BoundBitfieldNode, BoundStructureNode, BoundArrayNode): if isinstance(unbound, t._unboundtype): kls = t break else: kls = BoundSimpleNode child = kls(unbound, valueget) return child
[ "def", "_createbound", "(", "obj", ")", ":", "# Start by allowing objects to define custom unbound reference hooks", "try", ":", "kls", "=", "obj", ".", "_unboundreference_", "(", ")", "except", "AttributeError", ":", "kls", "=", "type", "(", "obj", ")", "unbound", "=", "_createunbound", "(", "kls", ")", "def", "valueget", "(", ")", ":", "return", "obj", "for", "t", "in", "(", "BoundBitfieldNode", ",", "BoundStructureNode", ",", "BoundArrayNode", ")", ":", "if", "isinstance", "(", "unbound", ",", "t", ".", "_unboundtype", ")", ":", "kls", "=", "t", "break", "else", ":", "kls", "=", "BoundSimpleNode", "child", "=", "kls", "(", "unbound", ",", "valueget", ")", "return", "child" ]
Create a new BoundNode representing a given object.
[ "Create", "a", "new", "BoundNode", "representing", "a", "given", "object", "." ]
python
train
pygobject/pgi
pgi/overrides/Gtk.py
https://github.com/pygobject/pgi/blob/2090435df6241a15ec2a78379a36b738b728652c/pgi/overrides/Gtk.py#L257-L278
def insert_text(self, text, position): """insert_text(self, text, position) :param new_text: the text to append :type new_text: :obj:`str` :param position: location of the position text will be inserted at :type position: :obj:`int` :returns: location of the position text will be inserted at :rtype: :obj:`int` Inserts `new_text` into the contents of the widget, at position `position`. Note that the position is in characters, not in bytes. """ return super(Editable, self).insert_text(text, -1, position)
[ "def", "insert_text", "(", "self", ",", "text", ",", "position", ")", ":", "return", "super", "(", "Editable", ",", "self", ")", ".", "insert_text", "(", "text", ",", "-", "1", ",", "position", ")" ]
insert_text(self, text, position) :param new_text: the text to append :type new_text: :obj:`str` :param position: location of the position text will be inserted at :type position: :obj:`int` :returns: location of the position text will be inserted at :rtype: :obj:`int` Inserts `new_text` into the contents of the widget, at position `position`. Note that the position is in characters, not in bytes.
[ "insert_text", "(", "self", "text", "position", ")" ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/tailf_confd_monitoring.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/tailf_confd_monitoring.py#L201-L213
def confd_state_netconf_listen_tcp_port(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") confd_state = ET.SubElement(config, "confd-state", xmlns="http://tail-f.com/yang/confd-monitoring") netconf = ET.SubElement(confd_state, "netconf") listen = ET.SubElement(netconf, "listen") tcp = ET.SubElement(listen, "tcp") port = ET.SubElement(tcp, "port") port.text = kwargs.pop('port') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "confd_state_netconf_listen_tcp_port", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "confd_state", "=", "ET", ".", "SubElement", "(", "config", ",", "\"confd-state\"", ",", "xmlns", "=", "\"http://tail-f.com/yang/confd-monitoring\"", ")", "netconf", "=", "ET", ".", "SubElement", "(", "confd_state", ",", "\"netconf\"", ")", "listen", "=", "ET", ".", "SubElement", "(", "netconf", ",", "\"listen\"", ")", "tcp", "=", "ET", ".", "SubElement", "(", "listen", ",", "\"tcp\"", ")", "port", "=", "ET", ".", "SubElement", "(", "tcp", ",", "\"port\"", ")", "port", ".", "text", "=", "kwargs", ".", "pop", "(", "'port'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
ClericPy/torequests
torequests/dummy.py
https://github.com/ClericPy/torequests/blob/1793261688d7a47e1c3a0830d83f8552f5e3e5d9/torequests/dummy.py#L235-L262
def submit(self, coro, callback=None): """Submit a coro as NewTask to self.loop without loop.frequncy control. :: from torequests.dummy import Loop import asyncio loop = Loop() async def test(i): result = await asyncio.sleep(1) return (loop.frequency, i) coro = test(0) task = loop.submit(coro) print(task) # loop.x can be ignore loop.x print(task.x) # <NewTask pending coro=<test() running at torequests/temp_code.py:58>> # (Frequency(sem=<0/0>, interval=0, name=loop_sem), 0) """ callback = callback or self.default_callback if self.async_running: return self.run_coroutine_threadsafe(coro, callback=callback) else: return NewTask(coro, loop=self.loop, callback=callback)
[ "def", "submit", "(", "self", ",", "coro", ",", "callback", "=", "None", ")", ":", "callback", "=", "callback", "or", "self", ".", "default_callback", "if", "self", ".", "async_running", ":", "return", "self", ".", "run_coroutine_threadsafe", "(", "coro", ",", "callback", "=", "callback", ")", "else", ":", "return", "NewTask", "(", "coro", ",", "loop", "=", "self", ".", "loop", ",", "callback", "=", "callback", ")" ]
Submit a coro as NewTask to self.loop without loop.frequncy control. :: from torequests.dummy import Loop import asyncio loop = Loop() async def test(i): result = await asyncio.sleep(1) return (loop.frequency, i) coro = test(0) task = loop.submit(coro) print(task) # loop.x can be ignore loop.x print(task.x) # <NewTask pending coro=<test() running at torequests/temp_code.py:58>> # (Frequency(sem=<0/0>, interval=0, name=loop_sem), 0)
[ "Submit", "a", "coro", "as", "NewTask", "to", "self", ".", "loop", "without", "loop", ".", "frequncy", "control", "." ]
python
train
jbarlow83/OCRmyPDF
src/ocrmypdf/pdfinfo/__init__.py
https://github.com/jbarlow83/OCRmyPDF/blob/79c84eefa353632a3d7ccddbd398c6678c1c1777/src/ocrmypdf/pdfinfo/__init__.py#L401-L426
def _find_regular_images(container, contentsinfo): """Find images stored in the container's /Resources /XObject Usually the container is a page, but it could also be a Form XObject that contains images. Generates images with their DPI at time of drawing. """ for pdfimage, xobj in _image_xobjects(container): # For each image that is drawn on this, check if we drawing the # current image - yes this is O(n^2), but n == 1 almost always for draw in contentsinfo.xobject_settings: if draw.name != xobj: continue if draw.stack_depth == 0 and _is_unit_square(draw.shorthand): # At least one PDF in the wild (and test suite) draws an image # when the graphics stack depth is 0, meaning that the image # gets drawn into a square of 1x1 PDF units (or 1/72", # or 0.35 mm). The equivalent DPI will be >100,000. Exclude # these from our DPI calculation for the page. continue yield ImageInfo(name=draw.name, pdfimage=pdfimage, shorthand=draw.shorthand)
[ "def", "_find_regular_images", "(", "container", ",", "contentsinfo", ")", ":", "for", "pdfimage", ",", "xobj", "in", "_image_xobjects", "(", "container", ")", ":", "# For each image that is drawn on this, check if we drawing the", "# current image - yes this is O(n^2), but n == 1 almost always", "for", "draw", "in", "contentsinfo", ".", "xobject_settings", ":", "if", "draw", ".", "name", "!=", "xobj", ":", "continue", "if", "draw", ".", "stack_depth", "==", "0", "and", "_is_unit_square", "(", "draw", ".", "shorthand", ")", ":", "# At least one PDF in the wild (and test suite) draws an image", "# when the graphics stack depth is 0, meaning that the image", "# gets drawn into a square of 1x1 PDF units (or 1/72\",", "# or 0.35 mm). The equivalent DPI will be >100,000. Exclude", "# these from our DPI calculation for the page.", "continue", "yield", "ImageInfo", "(", "name", "=", "draw", ".", "name", ",", "pdfimage", "=", "pdfimage", ",", "shorthand", "=", "draw", ".", "shorthand", ")" ]
Find images stored in the container's /Resources /XObject Usually the container is a page, but it could also be a Form XObject that contains images. Generates images with their DPI at time of drawing.
[ "Find", "images", "stored", "in", "the", "container", "s", "/", "Resources", "/", "XObject" ]
python
train
cloud9ers/gurumate
environment/share/doc/ipython/examples/parallel/dagdeps.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/share/doc/ipython/examples/parallel/dagdeps.py#L48-L54
def make_bintree(levels): """Make a symmetrical binary tree with @levels""" G = nx.DiGraph() root = '0' G.add_node(root) add_children(G, root, levels, 2) return G
[ "def", "make_bintree", "(", "levels", ")", ":", "G", "=", "nx", ".", "DiGraph", "(", ")", "root", "=", "'0'", "G", ".", "add_node", "(", "root", ")", "add_children", "(", "G", ",", "root", ",", "levels", ",", "2", ")", "return", "G" ]
Make a symmetrical binary tree with @levels
[ "Make", "a", "symmetrical", "binary", "tree", "with" ]
python
test
mwhooker/jones
jones/jones.py
https://github.com/mwhooker/jones/blob/121e89572ca063f456b8e94cbb8cbee26c307a8f/jones/jones.py#L65-L68
def _set(self, data, version): """serialize and set data to self.path.""" self.zk.set(self.path, json.dumps(data), version)
[ "def", "_set", "(", "self", ",", "data", ",", "version", ")", ":", "self", ".", "zk", ".", "set", "(", "self", ".", "path", ",", "json", ".", "dumps", "(", "data", ")", ",", "version", ")" ]
serialize and set data to self.path.
[ "serialize", "and", "set", "data", "to", "self", ".", "path", "." ]
python
train
SmokinCaterpillar/pypet
pypet/naturalnaming.py
https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/pypet/naturalnaming.py#L3380-L3416
def f_load(self, recursive=True, load_data=pypetconstants.LOAD_DATA, max_depth=None): """Loads a group from disk. :param recursive: Default is ``True``. Whether recursively all nodes below the current node should be loaded, too. Note that links are never evaluated recursively. Only the linked node will be loaded if it does not exist in the tree, yet. Any nodes or links of this linked node are not loaded. :param load_data: Flag how to load the data. For how to choose 'load_data' see :ref:`more-on-loading`. :param max_depth: In case `recursive` is `True`, you can specify the maximum depth to load load data relative from current node. :returns: The node itself. """ traj = self._nn_interface._root_instance storage_service = traj.v_storage_service storage_service.load(pypetconstants.GROUP, self, trajectory_name=traj.v_name, load_data=load_data, recursive=recursive, max_depth=max_depth) return self
[ "def", "f_load", "(", "self", ",", "recursive", "=", "True", ",", "load_data", "=", "pypetconstants", ".", "LOAD_DATA", ",", "max_depth", "=", "None", ")", ":", "traj", "=", "self", ".", "_nn_interface", ".", "_root_instance", "storage_service", "=", "traj", ".", "v_storage_service", "storage_service", ".", "load", "(", "pypetconstants", ".", "GROUP", ",", "self", ",", "trajectory_name", "=", "traj", ".", "v_name", ",", "load_data", "=", "load_data", ",", "recursive", "=", "recursive", ",", "max_depth", "=", "max_depth", ")", "return", "self" ]
Loads a group from disk. :param recursive: Default is ``True``. Whether recursively all nodes below the current node should be loaded, too. Note that links are never evaluated recursively. Only the linked node will be loaded if it does not exist in the tree, yet. Any nodes or links of this linked node are not loaded. :param load_data: Flag how to load the data. For how to choose 'load_data' see :ref:`more-on-loading`. :param max_depth: In case `recursive` is `True`, you can specify the maximum depth to load load data relative from current node. :returns: The node itself.
[ "Loads", "a", "group", "from", "disk", "." ]
python
test
chrlie/shorten
shorten/__init__.py
https://github.com/chrlie/shorten/blob/fb762a199979aefaa28c88fa035e88ea8ce4d639/shorten/__init__.py#L23-L42
def make_store(name, min_length=4, **kwargs): """\ Creates a store with a reasonable keygen. .. deprecated:: 2.0.0 Instantiate stores directly e.g. ``shorten.MemoryStore(min_length=4)`` """ if name not in stores: raise ValueError('valid stores are {0}'.format(', '.join(stores))) if name == 'memcache': store = MemcacheStore elif name == 'memory': store = MemoryStore elif name == 'redis': store = RedisStore return store(min_length=min_length, **kwargs)
[ "def", "make_store", "(", "name", ",", "min_length", "=", "4", ",", "*", "*", "kwargs", ")", ":", "if", "name", "not", "in", "stores", ":", "raise", "ValueError", "(", "'valid stores are {0}'", ".", "format", "(", "', '", ".", "join", "(", "stores", ")", ")", ")", "if", "name", "==", "'memcache'", ":", "store", "=", "MemcacheStore", "elif", "name", "==", "'memory'", ":", "store", "=", "MemoryStore", "elif", "name", "==", "'redis'", ":", "store", "=", "RedisStore", "return", "store", "(", "min_length", "=", "min_length", ",", "*", "*", "kwargs", ")" ]
\ Creates a store with a reasonable keygen. .. deprecated:: 2.0.0 Instantiate stores directly e.g. ``shorten.MemoryStore(min_length=4)``
[ "\\", "Creates", "a", "store", "with", "a", "reasonable", "keygen", "." ]
python
train
jlmadurga/permabots
permabots/views/api/handler.py
https://github.com/jlmadurga/permabots/blob/781a91702529a23fe7bc2aa84c5d88e961412466/permabots/views/api/handler.py#L332-L341
def get(self, request, bot_id, id, format=None): """ Get list of source state of a handler --- serializer: StateSerializer responseMessages: - code: 401 message: Not authenticated """ return super(SourceStateList, self).get(request, bot_id, id, format)
[ "def", "get", "(", "self", ",", "request", ",", "bot_id", ",", "id", ",", "format", "=", "None", ")", ":", "return", "super", "(", "SourceStateList", ",", "self", ")", ".", "get", "(", "request", ",", "bot_id", ",", "id", ",", "format", ")" ]
Get list of source state of a handler --- serializer: StateSerializer responseMessages: - code: 401 message: Not authenticated
[ "Get", "list", "of", "source", "state", "of", "a", "handler", "---", "serializer", ":", "StateSerializer", "responseMessages", ":", "-", "code", ":", "401", "message", ":", "Not", "authenticated" ]
python
train
rossant/ipymd
ipymd/core/format_manager.py
https://github.com/rossant/ipymd/blob/d87c9ebc59d67fe78b0139ee00e0e5307682e303/ipymd/core/format_manager.py#L166-L181
def load(self, file, name=None): """Load a file. The format name can be specified explicitly or inferred from the file extension.""" if name is None: name = self.format_from_extension(op.splitext(file)[1]) file_format = self.file_type(name) if file_format == 'text': return _read_text(file) elif file_format == 'json': return _read_json(file) else: load_function = self._formats[name].get('load', None) if load_function is None: raise IOError("The format must declare a file type or " "load/save functions.") return load_function(file)
[ "def", "load", "(", "self", ",", "file", ",", "name", "=", "None", ")", ":", "if", "name", "is", "None", ":", "name", "=", "self", ".", "format_from_extension", "(", "op", ".", "splitext", "(", "file", ")", "[", "1", "]", ")", "file_format", "=", "self", ".", "file_type", "(", "name", ")", "if", "file_format", "==", "'text'", ":", "return", "_read_text", "(", "file", ")", "elif", "file_format", "==", "'json'", ":", "return", "_read_json", "(", "file", ")", "else", ":", "load_function", "=", "self", ".", "_formats", "[", "name", "]", ".", "get", "(", "'load'", ",", "None", ")", "if", "load_function", "is", "None", ":", "raise", "IOError", "(", "\"The format must declare a file type or \"", "\"load/save functions.\"", ")", "return", "load_function", "(", "file", ")" ]
Load a file. The format name can be specified explicitly or inferred from the file extension.
[ "Load", "a", "file", ".", "The", "format", "name", "can", "be", "specified", "explicitly", "or", "inferred", "from", "the", "file", "extension", "." ]
python
train
inveniosoftware/invenio-oauthclient
invenio_oauthclient/contrib/cern.py
https://github.com/inveniosoftware/invenio-oauthclient/blob/2500dc6935738107617aeade79e050d7608004bb/invenio_oauthclient/contrib/cern.py#L219-L229
def fetch_extra_data(resource): """Return a dict with extra data retrieved from cern oauth.""" person_id = resource.get('PersonID', [None])[0] identity_class = resource.get('IdentityClass', [None])[0] department = resource.get('Department', [None])[0] return dict( person_id=person_id, identity_class=identity_class, department=department )
[ "def", "fetch_extra_data", "(", "resource", ")", ":", "person_id", "=", "resource", ".", "get", "(", "'PersonID'", ",", "[", "None", "]", ")", "[", "0", "]", "identity_class", "=", "resource", ".", "get", "(", "'IdentityClass'", ",", "[", "None", "]", ")", "[", "0", "]", "department", "=", "resource", ".", "get", "(", "'Department'", ",", "[", "None", "]", ")", "[", "0", "]", "return", "dict", "(", "person_id", "=", "person_id", ",", "identity_class", "=", "identity_class", ",", "department", "=", "department", ")" ]
Return a dict with extra data retrieved from cern oauth.
[ "Return", "a", "dict", "with", "extra", "data", "retrieved", "from", "cern", "oauth", "." ]
python
train
koordinates/python-client
koordinates/sources.py
https://github.com/koordinates/python-client/blob/f3dc7cd164f5a9499b2454cd1d4516e9d4b3c252/koordinates/sources.py#L300-L307
def get_log_lines(self, source_id, scan_id): """ Get the log text for a scan object :rtype: Iterator over log lines. """ target_url = self.client.get_url('SCAN', 'GET', 'log', {'source_id': source_id, 'scan_id': scan_id}) r = self.client.request('GET', target_url, headers={'Accept': 'text/plain'}, stream=True) return r.iter_lines(decode_unicode=True)
[ "def", "get_log_lines", "(", "self", ",", "source_id", ",", "scan_id", ")", ":", "target_url", "=", "self", ".", "client", ".", "get_url", "(", "'SCAN'", ",", "'GET'", ",", "'log'", ",", "{", "'source_id'", ":", "source_id", ",", "'scan_id'", ":", "scan_id", "}", ")", "r", "=", "self", ".", "client", ".", "request", "(", "'GET'", ",", "target_url", ",", "headers", "=", "{", "'Accept'", ":", "'text/plain'", "}", ",", "stream", "=", "True", ")", "return", "r", ".", "iter_lines", "(", "decode_unicode", "=", "True", ")" ]
Get the log text for a scan object :rtype: Iterator over log lines.
[ "Get", "the", "log", "text", "for", "a", "scan", "object", ":", "rtype", ":", "Iterator", "over", "log", "lines", "." ]
python
train
saltstack/salt
salt/modules/ps.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ps.py#L50-L59
def _get_proc_cmdline(proc): ''' Returns the cmdline of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.cmdline() if PSUTIL2 else proc.cmdline) except (psutil.NoSuchProcess, psutil.AccessDenied): return []
[ "def", "_get_proc_cmdline", "(", "proc", ")", ":", "try", ":", "return", "salt", ".", "utils", ".", "data", ".", "decode", "(", "proc", ".", "cmdline", "(", ")", "if", "PSUTIL2", "else", "proc", ".", "cmdline", ")", "except", "(", "psutil", ".", "NoSuchProcess", ",", "psutil", ".", "AccessDenied", ")", ":", "return", "[", "]" ]
Returns the cmdline of a Process instance. It's backward compatible with < 2.0 versions of psutil.
[ "Returns", "the", "cmdline", "of", "a", "Process", "instance", "." ]
python
train
johntruckenbrodt/spatialist
spatialist/auxil.py
https://github.com/johntruckenbrodt/spatialist/blob/007f49296a156de8d7168ad235b5a5b8e8d3633d/spatialist/auxil.py#L137-L171
def gdalbuildvrt(src, dst, options=None, void=True): """ a simple wrapper for :osgeo:func:`gdal.BuildVRT` Parameters ---------- src: str, list, :osgeo:class:`ogr.DataSource` or :osgeo:class:`gdal.Dataset` the input data set(s) dst: str the output data set options: dict additional parameters passed to gdal.BuildVRT; see :osgeo:func:`gdal.BuildVRTOptions` void: bool just write the results and don't return anything? If not, the spatial object is returned Returns ------- """ options = {} if options is None else options if 'outputBounds' in options.keys() and gdal.__version__ < '2.4.0': warnings.warn('\ncreating VRT files with subsetted extent is very likely to cause problems. ' 'Please use GDAL version >= 2.4.0, which fixed the problem.\n' 'see here for a description of the problem:\n' ' https://gis.stackexchange.com/questions/314333/' 'sampling-error-using-gdalwarp-on-a-subsetted-vrt\n' 'and here for the release note of GDAL 2.4.0:\n' ' https://trac.osgeo.org/gdal/wiki/Release/2.4.0-News') out = gdal.BuildVRT(dst, src, options=gdal.BuildVRTOptions(**options)) if void: out = None else: return out
[ "def", "gdalbuildvrt", "(", "src", ",", "dst", ",", "options", "=", "None", ",", "void", "=", "True", ")", ":", "options", "=", "{", "}", "if", "options", "is", "None", "else", "options", "if", "'outputBounds'", "in", "options", ".", "keys", "(", ")", "and", "gdal", ".", "__version__", "<", "'2.4.0'", ":", "warnings", ".", "warn", "(", "'\\ncreating VRT files with subsetted extent is very likely to cause problems. '", "'Please use GDAL version >= 2.4.0, which fixed the problem.\\n'", "'see here for a description of the problem:\\n'", "' https://gis.stackexchange.com/questions/314333/'", "'sampling-error-using-gdalwarp-on-a-subsetted-vrt\\n'", "'and here for the release note of GDAL 2.4.0:\\n'", "' https://trac.osgeo.org/gdal/wiki/Release/2.4.0-News'", ")", "out", "=", "gdal", ".", "BuildVRT", "(", "dst", ",", "src", ",", "options", "=", "gdal", ".", "BuildVRTOptions", "(", "*", "*", "options", ")", ")", "if", "void", ":", "out", "=", "None", "else", ":", "return", "out" ]
a simple wrapper for :osgeo:func:`gdal.BuildVRT` Parameters ---------- src: str, list, :osgeo:class:`ogr.DataSource` or :osgeo:class:`gdal.Dataset` the input data set(s) dst: str the output data set options: dict additional parameters passed to gdal.BuildVRT; see :osgeo:func:`gdal.BuildVRTOptions` void: bool just write the results and don't return anything? If not, the spatial object is returned Returns -------
[ "a", "simple", "wrapper", "for", ":", "osgeo", ":", "func", ":", "gdal", ".", "BuildVRT" ]
python
train
AASHE/python-membersuite-api-client
membersuite_api_client/client.py
https://github.com/AASHE/python-membersuite-api-client/blob/221f5ed8bc7d4424237a4669c5af9edc11819ee9/membersuite_api_client/client.py#L45-L63
def request_session(self): """ Performs initial request to initialize session and get session id necessary to construct all future requests. :return: Session ID to be placed in header of all other requests. """ concierge_request_header = self.construct_concierge_header( url="http://membersuite.com/contracts/IConciergeAPIService/WhoAmI") result = self.client.service.WhoAmI( _soapheaders=[concierge_request_header]) self.session_id = get_session_id(result=result) if not self.session_id: raise MembersuiteLoginError( result["body"]["WhoAmIResult"]["Errors"]) return self.session_id
[ "def", "request_session", "(", "self", ")", ":", "concierge_request_header", "=", "self", ".", "construct_concierge_header", "(", "url", "=", "\"http://membersuite.com/contracts/IConciergeAPIService/WhoAmI\"", ")", "result", "=", "self", ".", "client", ".", "service", ".", "WhoAmI", "(", "_soapheaders", "=", "[", "concierge_request_header", "]", ")", "self", ".", "session_id", "=", "get_session_id", "(", "result", "=", "result", ")", "if", "not", "self", ".", "session_id", ":", "raise", "MembersuiteLoginError", "(", "result", "[", "\"body\"", "]", "[", "\"WhoAmIResult\"", "]", "[", "\"Errors\"", "]", ")", "return", "self", ".", "session_id" ]
Performs initial request to initialize session and get session id necessary to construct all future requests. :return: Session ID to be placed in header of all other requests.
[ "Performs", "initial", "request", "to", "initialize", "session", "and", "get", "session", "id", "necessary", "to", "construct", "all", "future", "requests", ".", ":", "return", ":", "Session", "ID", "to", "be", "placed", "in", "header", "of", "all", "other", "requests", "." ]
python
train
notanumber/xapian-haystack
xapian_backend.py
https://github.com/notanumber/xapian-haystack/blob/2247b23d3cb6322ce477d45f84d52da47a940348/xapian_backend.py#L1009-L1101
def _do_date_facets(results, date_facets): """ Private method that facets a document by date ranges Required arguments: `results` -- A list SearchResults to facet `date_facets` -- A dictionary containing facet parameters: {'field': {'start_date': ..., 'end_date': ...: 'gap_by': '...', 'gap_amount': n}} nb., gap must be one of the following: year|month|day|hour|minute|second For each date facet field in `date_facets`, generates a list of date ranges (from `start_date` to `end_date` by `gap_by`) then iterates through `results` and tallies the count for each date_facet. Returns a dictionary of date facets (fields) containing a list with entries for each range and a count of documents matching the range. eg. { 'pub_date': [ (datetime.datetime(2009, 1, 1, 0, 0), 5), (datetime.datetime(2009, 2, 1, 0, 0), 0), (datetime.datetime(2009, 3, 1, 0, 0), 0), (datetime.datetime(2008, 4, 1, 0, 0), 1), (datetime.datetime(2008, 5, 1, 0, 0), 2), ], } """ def next_datetime(previous, gap_value, gap_type): year = previous.year month = previous.month if gap_type == 'year': next = previous.replace(year=year + gap_value) elif gap_type == 'month': if month + gap_value <= 12: next = previous.replace(month=month + gap_value) else: next = previous.replace( month=((month + gap_value) % 12), year=(year + (month + gap_value) // 12) ) elif gap_type == 'day': next = previous + datetime.timedelta(days=gap_value) elif gap_type == 'hour': return previous + datetime.timedelta(hours=gap_value) elif gap_type == 'minute': next = previous + datetime.timedelta(minutes=gap_value) elif gap_type == 'second': next = previous + datetime.timedelta(seconds=gap_value) else: raise TypeError('\'gap_by\' must be ' '{second, minute, day, month, year}') return next facet_dict = {} for date_facet, facet_params in list(date_facets.items()): gap_type = facet_params.get('gap_by') gap_value = facet_params.get('gap_amount', 1) date_range = facet_params['start_date'] # construct the bins of the histogram facet_list = [] while date_range < facet_params['end_date']: facet_list.append((date_range, 0)) date_range = next_datetime(date_range, gap_value, gap_type) facet_list = sorted(facet_list, key=lambda x: x[0], reverse=True) for result in results: result_date = getattr(result, date_facet) # convert date to datetime if not isinstance(result_date, datetime.datetime): result_date = datetime.datetime(result_date.year, result_date.month, result_date.day) # ignore results outside the boundaries. if facet_list[0][0] < result_date < facet_list[-1][0]: continue # populate the histogram by putting the result on the right bin. for n, facet_date in enumerate(facet_list): if result_date > facet_date[0]: # equal to facet_list[n][1] += 1, but for a tuple facet_list[n] = (facet_list[n][0], (facet_list[n][1] + 1)) break # bin found; go to next result facet_dict[date_facet] = facet_list return facet_dict
[ "def", "_do_date_facets", "(", "results", ",", "date_facets", ")", ":", "def", "next_datetime", "(", "previous", ",", "gap_value", ",", "gap_type", ")", ":", "year", "=", "previous", ".", "year", "month", "=", "previous", ".", "month", "if", "gap_type", "==", "'year'", ":", "next", "=", "previous", ".", "replace", "(", "year", "=", "year", "+", "gap_value", ")", "elif", "gap_type", "==", "'month'", ":", "if", "month", "+", "gap_value", "<=", "12", ":", "next", "=", "previous", ".", "replace", "(", "month", "=", "month", "+", "gap_value", ")", "else", ":", "next", "=", "previous", ".", "replace", "(", "month", "=", "(", "(", "month", "+", "gap_value", ")", "%", "12", ")", ",", "year", "=", "(", "year", "+", "(", "month", "+", "gap_value", ")", "//", "12", ")", ")", "elif", "gap_type", "==", "'day'", ":", "next", "=", "previous", "+", "datetime", ".", "timedelta", "(", "days", "=", "gap_value", ")", "elif", "gap_type", "==", "'hour'", ":", "return", "previous", "+", "datetime", ".", "timedelta", "(", "hours", "=", "gap_value", ")", "elif", "gap_type", "==", "'minute'", ":", "next", "=", "previous", "+", "datetime", ".", "timedelta", "(", "minutes", "=", "gap_value", ")", "elif", "gap_type", "==", "'second'", ":", "next", "=", "previous", "+", "datetime", ".", "timedelta", "(", "seconds", "=", "gap_value", ")", "else", ":", "raise", "TypeError", "(", "'\\'gap_by\\' must be '", "'{second, minute, day, month, year}'", ")", "return", "next", "facet_dict", "=", "{", "}", "for", "date_facet", ",", "facet_params", "in", "list", "(", "date_facets", ".", "items", "(", ")", ")", ":", "gap_type", "=", "facet_params", ".", "get", "(", "'gap_by'", ")", "gap_value", "=", "facet_params", ".", "get", "(", "'gap_amount'", ",", "1", ")", "date_range", "=", "facet_params", "[", "'start_date'", "]", "# construct the bins of the histogram", "facet_list", "=", "[", "]", "while", "date_range", "<", "facet_params", "[", "'end_date'", "]", ":", "facet_list", ".", "append", "(", "(", "date_range", ",", "0", ")", ")", "date_range", "=", "next_datetime", "(", "date_range", ",", "gap_value", ",", "gap_type", ")", "facet_list", "=", "sorted", "(", "facet_list", ",", "key", "=", "lambda", "x", ":", "x", "[", "0", "]", ",", "reverse", "=", "True", ")", "for", "result", "in", "results", ":", "result_date", "=", "getattr", "(", "result", ",", "date_facet", ")", "# convert date to datetime", "if", "not", "isinstance", "(", "result_date", ",", "datetime", ".", "datetime", ")", ":", "result_date", "=", "datetime", ".", "datetime", "(", "result_date", ".", "year", ",", "result_date", ".", "month", ",", "result_date", ".", "day", ")", "# ignore results outside the boundaries.", "if", "facet_list", "[", "0", "]", "[", "0", "]", "<", "result_date", "<", "facet_list", "[", "-", "1", "]", "[", "0", "]", ":", "continue", "# populate the histogram by putting the result on the right bin.", "for", "n", ",", "facet_date", "in", "enumerate", "(", "facet_list", ")", ":", "if", "result_date", ">", "facet_date", "[", "0", "]", ":", "# equal to facet_list[n][1] += 1, but for a tuple", "facet_list", "[", "n", "]", "=", "(", "facet_list", "[", "n", "]", "[", "0", "]", ",", "(", "facet_list", "[", "n", "]", "[", "1", "]", "+", "1", ")", ")", "break", "# bin found; go to next result", "facet_dict", "[", "date_facet", "]", "=", "facet_list", "return", "facet_dict" ]
Private method that facets a document by date ranges Required arguments: `results` -- A list SearchResults to facet `date_facets` -- A dictionary containing facet parameters: {'field': {'start_date': ..., 'end_date': ...: 'gap_by': '...', 'gap_amount': n}} nb., gap must be one of the following: year|month|day|hour|minute|second For each date facet field in `date_facets`, generates a list of date ranges (from `start_date` to `end_date` by `gap_by`) then iterates through `results` and tallies the count for each date_facet. Returns a dictionary of date facets (fields) containing a list with entries for each range and a count of documents matching the range. eg. { 'pub_date': [ (datetime.datetime(2009, 1, 1, 0, 0), 5), (datetime.datetime(2009, 2, 1, 0, 0), 0), (datetime.datetime(2009, 3, 1, 0, 0), 0), (datetime.datetime(2008, 4, 1, 0, 0), 1), (datetime.datetime(2008, 5, 1, 0, 0), 2), ], }
[ "Private", "method", "that", "facets", "a", "document", "by", "date", "ranges" ]
python
train
LuminosoInsight/luminoso-api-client-python
luminoso_api/v4_client.py
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v4_client.py#L360-L387
def change_path(self, path): """ Return a new LuminosoClient for a subpath of this one. For example, you might want to start with a LuminosoClient for `https://analytics.luminoso.com/api/v4/`, then get a new one for `https://analytics.luminoso.com/api/v4/projects/myaccount/myprojectid`. You accomplish that with the following call: newclient = client.change_path('projects/myaccount/myproject_id') If you start the path with `/`, it will start from the root_url instead of the current url: project_area = newclient.change_path('/projects/myaccount') The advantage of using `.change_path` is that you will not need to re-authenticate like you would if you ran `.connect` again. You can use `.change_path` to split off as many sub-clients as you want, and you don't have to stop using the old one just because you got a new one with `.change_path`. """ if path.startswith('/'): url = self.root_url + path else: url = self.url + path return self.__class__(self.session, url)
[ "def", "change_path", "(", "self", ",", "path", ")", ":", "if", "path", ".", "startswith", "(", "'/'", ")", ":", "url", "=", "self", ".", "root_url", "+", "path", "else", ":", "url", "=", "self", ".", "url", "+", "path", "return", "self", ".", "__class__", "(", "self", ".", "session", ",", "url", ")" ]
Return a new LuminosoClient for a subpath of this one. For example, you might want to start with a LuminosoClient for `https://analytics.luminoso.com/api/v4/`, then get a new one for `https://analytics.luminoso.com/api/v4/projects/myaccount/myprojectid`. You accomplish that with the following call: newclient = client.change_path('projects/myaccount/myproject_id') If you start the path with `/`, it will start from the root_url instead of the current url: project_area = newclient.change_path('/projects/myaccount') The advantage of using `.change_path` is that you will not need to re-authenticate like you would if you ran `.connect` again. You can use `.change_path` to split off as many sub-clients as you want, and you don't have to stop using the old one just because you got a new one with `.change_path`.
[ "Return", "a", "new", "LuminosoClient", "for", "a", "subpath", "of", "this", "one", "." ]
python
test
saltstack/salt
salt/modules/localemod.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/localemod.py#L166-L228
def set_locale(locale): ''' Sets the current system locale CLI Example: .. code-block:: bash salt '*' locale.set_locale 'en_US.UTF-8' ''' lc_ctl = salt.utils.systemd.booted(__context__) # localectl on SLE12 is installed but the integration is broken -- config is rewritten by YaST2 if lc_ctl and not (__grains__['os_family'] in ['Suse'] and __grains__['osmajorrelease'] in [12]): return _localectl_set(locale) if 'Suse' in __grains__['os_family']: # this block applies to all SUSE systems - also with systemd if not __salt__['file.file_exists']('/etc/sysconfig/language'): __salt__['file.touch']('/etc/sysconfig/language') __salt__['file.replace']( '/etc/sysconfig/language', '^RC_LANG=.*', 'RC_LANG="{0}"'.format(locale), append_if_not_found=True ) elif 'RedHat' in __grains__['os_family']: if not __salt__['file.file_exists']('/etc/sysconfig/i18n'): __salt__['file.touch']('/etc/sysconfig/i18n') __salt__['file.replace']( '/etc/sysconfig/i18n', '^LANG=.*', 'LANG="{0}"'.format(locale), append_if_not_found=True ) elif 'Debian' in __grains__['os_family']: # this block only applies to Debian without systemd update_locale = salt.utils.path.which('update-locale') if update_locale is None: raise CommandExecutionError( 'Cannot set locale: "update-locale" was not found.') __salt__['cmd.run'](update_locale) # (re)generate /etc/default/locale __salt__['file.replace']( '/etc/default/locale', '^LANG=.*', 'LANG="{0}"'.format(locale), append_if_not_found=True ) elif 'Gentoo' in __grains__['os_family']: cmd = 'eselect --brief locale set {0}'.format(locale) return __salt__['cmd.retcode'](cmd, python_shell=False) == 0 elif 'Solaris' in __grains__['os_family']: if locale not in __salt__['locale.list_avail'](): return False __salt__['file.replace']( '/etc/default/init', '^LANG=.*', 'LANG="{0}"'.format(locale), append_if_not_found=True ) else: raise CommandExecutionError('Error: Unsupported platform!') return True
[ "def", "set_locale", "(", "locale", ")", ":", "lc_ctl", "=", "salt", ".", "utils", ".", "systemd", ".", "booted", "(", "__context__", ")", "# localectl on SLE12 is installed but the integration is broken -- config is rewritten by YaST2", "if", "lc_ctl", "and", "not", "(", "__grains__", "[", "'os_family'", "]", "in", "[", "'Suse'", "]", "and", "__grains__", "[", "'osmajorrelease'", "]", "in", "[", "12", "]", ")", ":", "return", "_localectl_set", "(", "locale", ")", "if", "'Suse'", "in", "__grains__", "[", "'os_family'", "]", ":", "# this block applies to all SUSE systems - also with systemd", "if", "not", "__salt__", "[", "'file.file_exists'", "]", "(", "'/etc/sysconfig/language'", ")", ":", "__salt__", "[", "'file.touch'", "]", "(", "'/etc/sysconfig/language'", ")", "__salt__", "[", "'file.replace'", "]", "(", "'/etc/sysconfig/language'", ",", "'^RC_LANG=.*'", ",", "'RC_LANG=\"{0}\"'", ".", "format", "(", "locale", ")", ",", "append_if_not_found", "=", "True", ")", "elif", "'RedHat'", "in", "__grains__", "[", "'os_family'", "]", ":", "if", "not", "__salt__", "[", "'file.file_exists'", "]", "(", "'/etc/sysconfig/i18n'", ")", ":", "__salt__", "[", "'file.touch'", "]", "(", "'/etc/sysconfig/i18n'", ")", "__salt__", "[", "'file.replace'", "]", "(", "'/etc/sysconfig/i18n'", ",", "'^LANG=.*'", ",", "'LANG=\"{0}\"'", ".", "format", "(", "locale", ")", ",", "append_if_not_found", "=", "True", ")", "elif", "'Debian'", "in", "__grains__", "[", "'os_family'", "]", ":", "# this block only applies to Debian without systemd", "update_locale", "=", "salt", ".", "utils", ".", "path", ".", "which", "(", "'update-locale'", ")", "if", "update_locale", "is", "None", ":", "raise", "CommandExecutionError", "(", "'Cannot set locale: \"update-locale\" was not found.'", ")", "__salt__", "[", "'cmd.run'", "]", "(", "update_locale", ")", "# (re)generate /etc/default/locale", "__salt__", "[", "'file.replace'", "]", "(", "'/etc/default/locale'", ",", "'^LANG=.*'", ",", "'LANG=\"{0}\"'", ".", "format", "(", "locale", ")", ",", "append_if_not_found", "=", "True", ")", "elif", "'Gentoo'", "in", "__grains__", "[", "'os_family'", "]", ":", "cmd", "=", "'eselect --brief locale set {0}'", ".", "format", "(", "locale", ")", "return", "__salt__", "[", "'cmd.retcode'", "]", "(", "cmd", ",", "python_shell", "=", "False", ")", "==", "0", "elif", "'Solaris'", "in", "__grains__", "[", "'os_family'", "]", ":", "if", "locale", "not", "in", "__salt__", "[", "'locale.list_avail'", "]", "(", ")", ":", "return", "False", "__salt__", "[", "'file.replace'", "]", "(", "'/etc/default/init'", ",", "'^LANG=.*'", ",", "'LANG=\"{0}\"'", ".", "format", "(", "locale", ")", ",", "append_if_not_found", "=", "True", ")", "else", ":", "raise", "CommandExecutionError", "(", "'Error: Unsupported platform!'", ")", "return", "True" ]
Sets the current system locale CLI Example: .. code-block:: bash salt '*' locale.set_locale 'en_US.UTF-8'
[ "Sets", "the", "current", "system", "locale" ]
python
train
WhyNotHugo/django-afip
django_afip/models.py
https://github.com/WhyNotHugo/django-afip/blob/5fb73213f1fe86ca52b501ffd0737911ef26ddb3/django_afip/models.py#L367-L371
def get_ticket(self, service): """Return an existing AuthTicket for a given service.""" return self.auth_tickets \ .filter(expires__gt=datetime.now(timezone.utc), service=service) \ .last()
[ "def", "get_ticket", "(", "self", ",", "service", ")", ":", "return", "self", ".", "auth_tickets", ".", "filter", "(", "expires__gt", "=", "datetime", ".", "now", "(", "timezone", ".", "utc", ")", ",", "service", "=", "service", ")", ".", "last", "(", ")" ]
Return an existing AuthTicket for a given service.
[ "Return", "an", "existing", "AuthTicket", "for", "a", "given", "service", "." ]
python
train
davidwtbuxton/notrequests
notrequests.py
https://github.com/davidwtbuxton/notrequests/blob/e48ee6107a58c2f373c33f78e3302608edeba7f3/notrequests.py#L193-L209
def detect_encoding(value): """Returns the character encoding for a JSON string.""" # https://tools.ietf.org/html/rfc4627#section-3 if six.PY2: null_pattern = tuple(bool(ord(char)) for char in value[:4]) else: null_pattern = tuple(bool(char) for char in value[:4]) encodings = { # Zero is a null-byte, 1 is anything else. (0, 0, 0, 1): 'utf-32-be', (0, 1, 0, 1): 'utf-16-be', (1, 0, 0, 0): 'utf-32-le', (1, 0, 1, 0): 'utf-16-le', } return encodings.get(null_pattern, 'utf-8')
[ "def", "detect_encoding", "(", "value", ")", ":", "# https://tools.ietf.org/html/rfc4627#section-3", "if", "six", ".", "PY2", ":", "null_pattern", "=", "tuple", "(", "bool", "(", "ord", "(", "char", ")", ")", "for", "char", "in", "value", "[", ":", "4", "]", ")", "else", ":", "null_pattern", "=", "tuple", "(", "bool", "(", "char", ")", "for", "char", "in", "value", "[", ":", "4", "]", ")", "encodings", "=", "{", "# Zero is a null-byte, 1 is anything else.", "(", "0", ",", "0", ",", "0", ",", "1", ")", ":", "'utf-32-be'", ",", "(", "0", ",", "1", ",", "0", ",", "1", ")", ":", "'utf-16-be'", ",", "(", "1", ",", "0", ",", "0", ",", "0", ")", ":", "'utf-32-le'", ",", "(", "1", ",", "0", ",", "1", ",", "0", ")", ":", "'utf-16-le'", ",", "}", "return", "encodings", ".", "get", "(", "null_pattern", ",", "'utf-8'", ")" ]
Returns the character encoding for a JSON string.
[ "Returns", "the", "character", "encoding", "for", "a", "JSON", "string", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L4302-L4365
def multihead_attention_2d(query_antecedent, memory_antecedent, total_key_depth, total_value_depth, output_depth, num_heads, attention_type="local_attention_2d", query_shape=(8, 16), memory_flange=(8, 16), name=None): """2d Multihead scaled-dot-product attention with inp/output transformations. Args: query_antecedent: a Tensor with shape [batch, h, w, depth_k] memory_antecedent: a Tensor with shape [batch, h, w, depth_k] total_key_depth: an integer total_value_depth: an integer output_depth: an integer num_heads: an integer dividing total_key_depth and total_value_depth attention_type: String, type of attention function to use. query_shape: an tuple indicating the height and width of each query block. memory_flange: an integer indicating how much to look in height and width name: an optional string Returns: A Tensor of shape [batch, h, w, output_depth] Raises: ValueError: if the key depth or value depth are not divisible by the number of attention heads. """ if total_key_depth % num_heads != 0: raise ValueError("Key depth (%d) must be divisible by the number of " "attention heads (%d)." % (total_key_depth, num_heads)) if total_value_depth % num_heads != 0: raise ValueError("Value depth (%d) must be divisible by the number of " "attention heads (%d)." % (total_value_depth, num_heads)) with tf.variable_scope( name, default_name="multihead_attention_2d", values=[query_antecedent, memory_antecedent]): q, k, v = compute_qkv(query_antecedent, memory_antecedent, total_key_depth, total_value_depth) # after splitting, shape is [batch, heads, h, w, depth] q = split_heads_2d(q, num_heads) k = split_heads_2d(k, num_heads) v = split_heads_2d(v, num_heads) key_depth_per_head = total_key_depth // num_heads q *= key_depth_per_head**-0.5 if attention_type == "local_attention_2d": x = local_attention_2d( q, k, v, query_shape=query_shape, memory_flange=memory_flange) elif attention_type == "masked_local_attention_2d": assert attention_type == "masked_local_attention_2d" x = masked_local_attention_2d( q, k, v, query_shape=query_shape, memory_flange=memory_flange) else: assert attention_type == "unmasked_local_attention_2d_tpu" x = dot_product_unmasked_attention_local_2d_tpu( q, k, v, None, max_relative_position=None, query_shape=query_shape) x = combine_heads_2d(x) x = common_layers.dense( x, output_depth, use_bias=False, name="output_transform") return x
[ "def", "multihead_attention_2d", "(", "query_antecedent", ",", "memory_antecedent", ",", "total_key_depth", ",", "total_value_depth", ",", "output_depth", ",", "num_heads", ",", "attention_type", "=", "\"local_attention_2d\"", ",", "query_shape", "=", "(", "8", ",", "16", ")", ",", "memory_flange", "=", "(", "8", ",", "16", ")", ",", "name", "=", "None", ")", ":", "if", "total_key_depth", "%", "num_heads", "!=", "0", ":", "raise", "ValueError", "(", "\"Key depth (%d) must be divisible by the number of \"", "\"attention heads (%d).\"", "%", "(", "total_key_depth", ",", "num_heads", ")", ")", "if", "total_value_depth", "%", "num_heads", "!=", "0", ":", "raise", "ValueError", "(", "\"Value depth (%d) must be divisible by the number of \"", "\"attention heads (%d).\"", "%", "(", "total_value_depth", ",", "num_heads", ")", ")", "with", "tf", ".", "variable_scope", "(", "name", ",", "default_name", "=", "\"multihead_attention_2d\"", ",", "values", "=", "[", "query_antecedent", ",", "memory_antecedent", "]", ")", ":", "q", ",", "k", ",", "v", "=", "compute_qkv", "(", "query_antecedent", ",", "memory_antecedent", ",", "total_key_depth", ",", "total_value_depth", ")", "# after splitting, shape is [batch, heads, h, w, depth]", "q", "=", "split_heads_2d", "(", "q", ",", "num_heads", ")", "k", "=", "split_heads_2d", "(", "k", ",", "num_heads", ")", "v", "=", "split_heads_2d", "(", "v", ",", "num_heads", ")", "key_depth_per_head", "=", "total_key_depth", "//", "num_heads", "q", "*=", "key_depth_per_head", "**", "-", "0.5", "if", "attention_type", "==", "\"local_attention_2d\"", ":", "x", "=", "local_attention_2d", "(", "q", ",", "k", ",", "v", ",", "query_shape", "=", "query_shape", ",", "memory_flange", "=", "memory_flange", ")", "elif", "attention_type", "==", "\"masked_local_attention_2d\"", ":", "assert", "attention_type", "==", "\"masked_local_attention_2d\"", "x", "=", "masked_local_attention_2d", "(", "q", ",", "k", ",", "v", ",", "query_shape", "=", "query_shape", ",", "memory_flange", "=", "memory_flange", ")", "else", ":", "assert", "attention_type", "==", "\"unmasked_local_attention_2d_tpu\"", "x", "=", "dot_product_unmasked_attention_local_2d_tpu", "(", "q", ",", "k", ",", "v", ",", "None", ",", "max_relative_position", "=", "None", ",", "query_shape", "=", "query_shape", ")", "x", "=", "combine_heads_2d", "(", "x", ")", "x", "=", "common_layers", ".", "dense", "(", "x", ",", "output_depth", ",", "use_bias", "=", "False", ",", "name", "=", "\"output_transform\"", ")", "return", "x" ]
2d Multihead scaled-dot-product attention with inp/output transformations. Args: query_antecedent: a Tensor with shape [batch, h, w, depth_k] memory_antecedent: a Tensor with shape [batch, h, w, depth_k] total_key_depth: an integer total_value_depth: an integer output_depth: an integer num_heads: an integer dividing total_key_depth and total_value_depth attention_type: String, type of attention function to use. query_shape: an tuple indicating the height and width of each query block. memory_flange: an integer indicating how much to look in height and width name: an optional string Returns: A Tensor of shape [batch, h, w, output_depth] Raises: ValueError: if the key depth or value depth are not divisible by the number of attention heads.
[ "2d", "Multihead", "scaled", "-", "dot", "-", "product", "attention", "with", "inp", "/", "output", "transformations", "." ]
python
train
Alignak-monitoring-contrib/alignak-backend-client
alignak_backend_client/client.py
https://github.com/Alignak-monitoring-contrib/alignak-backend-client/blob/1e21f6ce703e66984d1f9b20fe7866460ab50b39/alignak_backend_client/client.py#L674-L694
def delete(self, endpoint, headers): """ Method to delete an item or all items headers['If-Match'] must contain the _etag identifier of the element to delete :param endpoint: endpoint (API URL) :type endpoint: str :param headers: headers (example: Content-Type) :type headers: dict :return: response (deletion information) :rtype: dict """ response = self.get_response(method='DELETE', endpoint=endpoint, headers=headers) logger.debug("delete, response: %s", response) if response.status_code != 204: # pragma: no cover - should not happen ... resp = self.decode(response=response) resp = {"_status": "OK"} return resp
[ "def", "delete", "(", "self", ",", "endpoint", ",", "headers", ")", ":", "response", "=", "self", ".", "get_response", "(", "method", "=", "'DELETE'", ",", "endpoint", "=", "endpoint", ",", "headers", "=", "headers", ")", "logger", ".", "debug", "(", "\"delete, response: %s\"", ",", "response", ")", "if", "response", ".", "status_code", "!=", "204", ":", "# pragma: no cover - should not happen ...", "resp", "=", "self", ".", "decode", "(", "response", "=", "response", ")", "resp", "=", "{", "\"_status\"", ":", "\"OK\"", "}", "return", "resp" ]
Method to delete an item or all items headers['If-Match'] must contain the _etag identifier of the element to delete :param endpoint: endpoint (API URL) :type endpoint: str :param headers: headers (example: Content-Type) :type headers: dict :return: response (deletion information) :rtype: dict
[ "Method", "to", "delete", "an", "item", "or", "all", "items" ]
python
test
dwavesystems/dwave_networkx
dwave_networkx/algorithms/tsp.py
https://github.com/dwavesystems/dwave_networkx/blob/9ea1223ddbc7e86db2f90b8b23e250e6642c3d68/dwave_networkx/algorithms/tsp.py#L30-L107
def traveling_salesman(G, sampler=None, lagrange=2, weight='weight', **sampler_args): """Returns an approximate minimum traveling salesperson route. Defines a QUBO with ground states corresponding to the minimum routes and uses the sampler to sample from it. A route is a cycle in the graph that reaches each node exactly once. A minimum route is a route with the smallest total edge weight. Parameters ---------- G : NetworkX graph The graph on which to find a minimum traveling salesperson route. This should be a complete graph with non-zero weights on every edge. sampler : A binary quadratic model sampler. A sampler is a process that samples from low energy states in models defined by an Ising equation or a Quadratic Unconstrained Binary Optimization Problem (QUBO). A sampler is expected to have a 'sample_qubo' and 'sample_ising' method. A sampler is expected to return an iterable of samples, in order of increasing energy. If no sampler is provided, one must be provided using the `set_default_sampler` function. lagrange : optional (default 2) Lagrange parameter to weight constraints (visit every city once) versus objective (shortest distance route). weight : optional (default 'weight') The name of the edge attribute containing the weight. sampler_args : Additional keyword parameters are passed to the sampler. Returns ------- route : list List of nodes in order to be visited on a route Examples -------- This example uses a `dimod <https://github.com/dwavesystems/dimod>`_ sampler to find a minimum route in a five-cities problem. >>> import dwave_networkx as dnx >>> import networkx as nx >>> import dimod ... >>> G = nx.complete_graph(4) >>> G.add_weighted_edges_from({(0, 1, 1), (0, 2, 2), (0, 3, 3), (1, 2, 3), ... (1, 3, 4), (2, 3, 5)}) >>> dnx.traveling_salesman(G, dimod.ExactSolver()) [2, 1, 0, 3] Notes ----- Samplers by their nature may not return the optimal solution. This function does not attempt to confirm the quality of the returned sample. """ # Get a QUBO representation of the problem Q = traveling_salesman_qubo(G, lagrange, weight) # use the sampler to find low energy states response = sampler.sample_qubo(Q, **sampler_args) # we want the lowest energy sample, in order by stop number sample = next(iter(response)) route = [] for entry in sample: if sample[entry] > 0: route.append(entry) route.sort(key=lambda x: x[1]) return list((x[0] for x in route))
[ "def", "traveling_salesman", "(", "G", ",", "sampler", "=", "None", ",", "lagrange", "=", "2", ",", "weight", "=", "'weight'", ",", "*", "*", "sampler_args", ")", ":", "# Get a QUBO representation of the problem", "Q", "=", "traveling_salesman_qubo", "(", "G", ",", "lagrange", ",", "weight", ")", "# use the sampler to find low energy states", "response", "=", "sampler", ".", "sample_qubo", "(", "Q", ",", "*", "*", "sampler_args", ")", "# we want the lowest energy sample, in order by stop number", "sample", "=", "next", "(", "iter", "(", "response", ")", ")", "route", "=", "[", "]", "for", "entry", "in", "sample", ":", "if", "sample", "[", "entry", "]", ">", "0", ":", "route", ".", "append", "(", "entry", ")", "route", ".", "sort", "(", "key", "=", "lambda", "x", ":", "x", "[", "1", "]", ")", "return", "list", "(", "(", "x", "[", "0", "]", "for", "x", "in", "route", ")", ")" ]
Returns an approximate minimum traveling salesperson route. Defines a QUBO with ground states corresponding to the minimum routes and uses the sampler to sample from it. A route is a cycle in the graph that reaches each node exactly once. A minimum route is a route with the smallest total edge weight. Parameters ---------- G : NetworkX graph The graph on which to find a minimum traveling salesperson route. This should be a complete graph with non-zero weights on every edge. sampler : A binary quadratic model sampler. A sampler is a process that samples from low energy states in models defined by an Ising equation or a Quadratic Unconstrained Binary Optimization Problem (QUBO). A sampler is expected to have a 'sample_qubo' and 'sample_ising' method. A sampler is expected to return an iterable of samples, in order of increasing energy. If no sampler is provided, one must be provided using the `set_default_sampler` function. lagrange : optional (default 2) Lagrange parameter to weight constraints (visit every city once) versus objective (shortest distance route). weight : optional (default 'weight') The name of the edge attribute containing the weight. sampler_args : Additional keyword parameters are passed to the sampler. Returns ------- route : list List of nodes in order to be visited on a route Examples -------- This example uses a `dimod <https://github.com/dwavesystems/dimod>`_ sampler to find a minimum route in a five-cities problem. >>> import dwave_networkx as dnx >>> import networkx as nx >>> import dimod ... >>> G = nx.complete_graph(4) >>> G.add_weighted_edges_from({(0, 1, 1), (0, 2, 2), (0, 3, 3), (1, 2, 3), ... (1, 3, 4), (2, 3, 5)}) >>> dnx.traveling_salesman(G, dimod.ExactSolver()) [2, 1, 0, 3] Notes ----- Samplers by their nature may not return the optimal solution. This function does not attempt to confirm the quality of the returned sample.
[ "Returns", "an", "approximate", "minimum", "traveling", "salesperson", "route", "." ]
python
train
opendatateam/udata
udata/core/dataset/rdf.py
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/dataset/rdf.py#L164-L214
def dataset_to_rdf(dataset, graph=None): ''' Map a dataset domain model to a DCAT/RDF graph ''' # Use the unlocalized permalink to the dataset as URI when available # unless there is already an upstream URI if 'uri' in dataset.extras: id = URIRef(dataset.extras['uri']) elif dataset.id: id = URIRef(url_for('datasets.show_redirect', dataset=dataset.id, _external=True)) else: id = BNode() # Expose upstream identifier if present if 'dct:identifier' in dataset.extras: identifier = dataset.extras['dct:identifier'] else: identifier = dataset.id graph = graph or Graph(namespace_manager=namespace_manager) d = graph.resource(id) d.set(RDF.type, DCAT.Dataset) d.set(DCT.identifier, Literal(identifier)) d.set(DCT.title, Literal(dataset.title)) d.set(DCT.description, Literal(dataset.description)) d.set(DCT.issued, Literal(dataset.created_at)) d.set(DCT.modified, Literal(dataset.last_modified)) if dataset.acronym: d.set(SKOS.altLabel, Literal(dataset.acronym)) for tag in dataset.tags: d.add(DCAT.keyword, Literal(tag)) for resource in dataset.resources: d.add(DCAT.distribution, resource_to_rdf(resource, dataset, graph)) if dataset.owner: d.add(DCT.publisher, user_to_rdf(dataset.owner, graph)) elif dataset.organization: d.add(DCT.publisher, organization_to_rdf(dataset.organization, graph)) if dataset.temporal_coverage: d.set(DCT.temporal, temporal_to_rdf(dataset.temporal_coverage, graph)) frequency = frequency_to_rdf(dataset.frequency) if frequency: d.set(DCT.accrualPeriodicity, frequency) return d
[ "def", "dataset_to_rdf", "(", "dataset", ",", "graph", "=", "None", ")", ":", "# Use the unlocalized permalink to the dataset as URI when available", "# unless there is already an upstream URI", "if", "'uri'", "in", "dataset", ".", "extras", ":", "id", "=", "URIRef", "(", "dataset", ".", "extras", "[", "'uri'", "]", ")", "elif", "dataset", ".", "id", ":", "id", "=", "URIRef", "(", "url_for", "(", "'datasets.show_redirect'", ",", "dataset", "=", "dataset", ".", "id", ",", "_external", "=", "True", ")", ")", "else", ":", "id", "=", "BNode", "(", ")", "# Expose upstream identifier if present", "if", "'dct:identifier'", "in", "dataset", ".", "extras", ":", "identifier", "=", "dataset", ".", "extras", "[", "'dct:identifier'", "]", "else", ":", "identifier", "=", "dataset", ".", "id", "graph", "=", "graph", "or", "Graph", "(", "namespace_manager", "=", "namespace_manager", ")", "d", "=", "graph", ".", "resource", "(", "id", ")", "d", ".", "set", "(", "RDF", ".", "type", ",", "DCAT", ".", "Dataset", ")", "d", ".", "set", "(", "DCT", ".", "identifier", ",", "Literal", "(", "identifier", ")", ")", "d", ".", "set", "(", "DCT", ".", "title", ",", "Literal", "(", "dataset", ".", "title", ")", ")", "d", ".", "set", "(", "DCT", ".", "description", ",", "Literal", "(", "dataset", ".", "description", ")", ")", "d", ".", "set", "(", "DCT", ".", "issued", ",", "Literal", "(", "dataset", ".", "created_at", ")", ")", "d", ".", "set", "(", "DCT", ".", "modified", ",", "Literal", "(", "dataset", ".", "last_modified", ")", ")", "if", "dataset", ".", "acronym", ":", "d", ".", "set", "(", "SKOS", ".", "altLabel", ",", "Literal", "(", "dataset", ".", "acronym", ")", ")", "for", "tag", "in", "dataset", ".", "tags", ":", "d", ".", "add", "(", "DCAT", ".", "keyword", ",", "Literal", "(", "tag", ")", ")", "for", "resource", "in", "dataset", ".", "resources", ":", "d", ".", "add", "(", "DCAT", ".", "distribution", ",", "resource_to_rdf", "(", "resource", ",", "dataset", ",", "graph", ")", ")", "if", "dataset", ".", "owner", ":", "d", ".", "add", "(", "DCT", ".", "publisher", ",", "user_to_rdf", "(", "dataset", ".", "owner", ",", "graph", ")", ")", "elif", "dataset", ".", "organization", ":", "d", ".", "add", "(", "DCT", ".", "publisher", ",", "organization_to_rdf", "(", "dataset", ".", "organization", ",", "graph", ")", ")", "if", "dataset", ".", "temporal_coverage", ":", "d", ".", "set", "(", "DCT", ".", "temporal", ",", "temporal_to_rdf", "(", "dataset", ".", "temporal_coverage", ",", "graph", ")", ")", "frequency", "=", "frequency_to_rdf", "(", "dataset", ".", "frequency", ")", "if", "frequency", ":", "d", ".", "set", "(", "DCT", ".", "accrualPeriodicity", ",", "frequency", ")", "return", "d" ]
Map a dataset domain model to a DCAT/RDF graph
[ "Map", "a", "dataset", "domain", "model", "to", "a", "DCAT", "/", "RDF", "graph" ]
python
train
keunwoochoi/kapre
kapre/backend_keras.py
https://github.com/keunwoochoi/kapre/blob/8517f45d3ccb9fff1ec0049b9c3e4389f81c20aa/kapre/backend_keras.py#L7-L25
def amplitude_to_decibel(x, amin=1e-10, dynamic_range=80.0): """[K] Convert (linear) amplitude to decibel (log10(x)). x: Keras *batch* tensor or variable. It has to be batch because of sample-wise `K.max()`. amin: minimum amplitude. amplitude smaller than `amin` is set to this. dynamic_range: dynamic_range in decibel """ log_spec = 10 * K.log(K.maximum(x, amin)) / np.log(10).astype(K.floatx()) if K.ndim(x) > 1: axis = tuple(range(K.ndim(x))[1:]) else: axis = None log_spec = log_spec - K.max(log_spec, axis=axis, keepdims=True) # [-?, 0] log_spec = K.maximum(log_spec, -1 * dynamic_range) # [-80, 0] return log_spec
[ "def", "amplitude_to_decibel", "(", "x", ",", "amin", "=", "1e-10", ",", "dynamic_range", "=", "80.0", ")", ":", "log_spec", "=", "10", "*", "K", ".", "log", "(", "K", ".", "maximum", "(", "x", ",", "amin", ")", ")", "/", "np", ".", "log", "(", "10", ")", ".", "astype", "(", "K", ".", "floatx", "(", ")", ")", "if", "K", ".", "ndim", "(", "x", ")", ">", "1", ":", "axis", "=", "tuple", "(", "range", "(", "K", ".", "ndim", "(", "x", ")", ")", "[", "1", ":", "]", ")", "else", ":", "axis", "=", "None", "log_spec", "=", "log_spec", "-", "K", ".", "max", "(", "log_spec", ",", "axis", "=", "axis", ",", "keepdims", "=", "True", ")", "# [-?, 0]", "log_spec", "=", "K", ".", "maximum", "(", "log_spec", ",", "-", "1", "*", "dynamic_range", ")", "# [-80, 0]", "return", "log_spec" ]
[K] Convert (linear) amplitude to decibel (log10(x)). x: Keras *batch* tensor or variable. It has to be batch because of sample-wise `K.max()`. amin: minimum amplitude. amplitude smaller than `amin` is set to this. dynamic_range: dynamic_range in decibel
[ "[", "K", "]", "Convert", "(", "linear", ")", "amplitude", "to", "decibel", "(", "log10", "(", "x", "))", "." ]
python
train
elsampsa/valkka-live
valkka/live/filterchain.py
https://github.com/elsampsa/valkka-live/blob/218bb2ecf71c516c85b1b6e075454bba13090cd8/valkka/live/filterchain.py#L229-L244
def getDevice(self, **kwargs): """Like get, but returns a Device instance (RTSPCameraDevice, etc.) """ filterchain = self.get(**kwargs) if not filterchain: return None # get filterchain init parameters that are compatible with RTSPCameraDevice input parameters pars = filterchain.getParDic(DataModel.RTSPCameraDevice.parameter_defs) # .. and instantiate an RTSPCameraDevice with those parameters device = DataModel.RTSPCameraDevice(**pars) print(self.pre, "getDevice :", pars, device) return device
[ "def", "getDevice", "(", "self", ",", "*", "*", "kwargs", ")", ":", "filterchain", "=", "self", ".", "get", "(", "*", "*", "kwargs", ")", "if", "not", "filterchain", ":", "return", "None", "# get filterchain init parameters that are compatible with RTSPCameraDevice input parameters", "pars", "=", "filterchain", ".", "getParDic", "(", "DataModel", ".", "RTSPCameraDevice", ".", "parameter_defs", ")", "# .. and instantiate an RTSPCameraDevice with those parameters", "device", "=", "DataModel", ".", "RTSPCameraDevice", "(", "*", "*", "pars", ")", "print", "(", "self", ".", "pre", ",", "\"getDevice :\"", ",", "pars", ",", "device", ")", "return", "device" ]
Like get, but returns a Device instance (RTSPCameraDevice, etc.)
[ "Like", "get", "but", "returns", "a", "Device", "instance", "(", "RTSPCameraDevice", "etc", ".", ")" ]
python
train
watson-developer-cloud/python-sdk
ibm_watson/discovery_v1.py
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/discovery_v1.py#L12146-L12151
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'xpaths') and self.xpaths is not None: _dict['xpaths'] = self.xpaths return _dict
[ "def", "_to_dict", "(", "self", ")", ":", "_dict", "=", "{", "}", "if", "hasattr", "(", "self", ",", "'xpaths'", ")", "and", "self", ".", "xpaths", "is", "not", "None", ":", "_dict", "[", "'xpaths'", "]", "=", "self", ".", "xpaths", "return", "_dict" ]
Return a json dictionary representing this model.
[ "Return", "a", "json", "dictionary", "representing", "this", "model", "." ]
python
train
monarch-initiative/dipper
dipper/sources/OMIA.py
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/OMIA.py#L236-L304
def find_omim_type(self): ''' This f(x) needs to be rehomed and shared. Use OMIM's discription of their identifiers to heuristically partition them into genes | phenotypes-diseases type could be - `obsolete` Check `omim_replaced` populated as side effect - 'Suspected' (phenotype) Ignoring thus far - 'gene' - 'Phenotype' - 'heritable_phenotypic_marker' Probable phenotype - 'has_affected_feature' Use as both a gene and a phenotype :return hash of omim_number to ontology_curie ''' src_key = 'mimtitles' myfile = '/'.join((self.rawdir, self.files[src_key]['file'])) # col = self.files[src_key]['columns'] omim_type = {} with open(myfile, 'r') as filereader: reader = csv.reader(filereader, delimiter='\t') # todo header check for row in reader: if row[0][0] == '#': # skip comments continue elif row[0] == 'Caret': # moved|removed|split -> moved twice # populating a dict from an omim to a set of omims # here as a side effect which is less than ideal (prefix, omim_id, destination, empty, empty) = row omim_type[omim_id] = self.globaltt['obsolete'] if row[2][:9] == 'MOVED TO ': token = row[2].split(' ') rep = token[2] if not re.match(r'^[0-9]{6}$', rep): LOG.error('Report malformed omim replacement %s', rep) # clean up one I know about if rep[0] == '{' and rep[7] == '}': rep = rep[1:6] LOG.info('cleaned up %s', rep) if len(rep) == 7 and rep[6] == ',': rep = rep[:5] LOG.info('cleaned up %s', rep) # asuming splits are typically to both gene & phenotype if len(token) > 3: self.omim_replaced[omim_id] = {rep, token[4]} else: self.omim_replaced[omim_id] = {rep} elif row[0] == 'Asterisk': # declared as gene (prefix, omim_id, pref_label, alt_label, inc_label) = row omim_type[omim_id] = self.globaltt['gene'] elif row[0] == 'NULL': # potential model of disease? (prefix, omim_id, pref_label, alt_label, inc_label) = row # omim_type[omim_id] = self.globaltt['Suspected'] # NCIT:C71458 elif row[0] == 'Number Sign': (prefix, omim_id, pref_label, alt_label, inc_label) = row omim_type[omim_id] = self.globaltt['Phenotype'] elif row[0] == 'Percent': (prefix, omim_id, pref_label, alt_label, inc_label) = row omim_type[omim_id] = self.globaltt['heritable_phenotypic_marker'] elif row[0] == 'Plus': (prefix, omim_id, pref_label, alt_label, inc_label) = row # to be interperted as a gene and/or a phenotype omim_type[omim_id] = self.globaltt['has_affected_feature'] else: LOG.error('Unlnown OMIM type line %s', reader.line_num) return omim_type
[ "def", "find_omim_type", "(", "self", ")", ":", "src_key", "=", "'mimtitles'", "myfile", "=", "'/'", ".", "join", "(", "(", "self", ".", "rawdir", ",", "self", ".", "files", "[", "src_key", "]", "[", "'file'", "]", ")", ")", "# col = self.files[src_key]['columns']", "omim_type", "=", "{", "}", "with", "open", "(", "myfile", ",", "'r'", ")", "as", "filereader", ":", "reader", "=", "csv", ".", "reader", "(", "filereader", ",", "delimiter", "=", "'\\t'", ")", "# todo header check", "for", "row", "in", "reader", ":", "if", "row", "[", "0", "]", "[", "0", "]", "==", "'#'", ":", "# skip comments", "continue", "elif", "row", "[", "0", "]", "==", "'Caret'", ":", "# moved|removed|split -> moved twice", "# populating a dict from an omim to a set of omims", "# here as a side effect which is less than ideal", "(", "prefix", ",", "omim_id", ",", "destination", ",", "empty", ",", "empty", ")", "=", "row", "omim_type", "[", "omim_id", "]", "=", "self", ".", "globaltt", "[", "'obsolete'", "]", "if", "row", "[", "2", "]", "[", ":", "9", "]", "==", "'MOVED TO '", ":", "token", "=", "row", "[", "2", "]", ".", "split", "(", "' '", ")", "rep", "=", "token", "[", "2", "]", "if", "not", "re", ".", "match", "(", "r'^[0-9]{6}$'", ",", "rep", ")", ":", "LOG", ".", "error", "(", "'Report malformed omim replacement %s'", ",", "rep", ")", "# clean up one I know about", "if", "rep", "[", "0", "]", "==", "'{'", "and", "rep", "[", "7", "]", "==", "'}'", ":", "rep", "=", "rep", "[", "1", ":", "6", "]", "LOG", ".", "info", "(", "'cleaned up %s'", ",", "rep", ")", "if", "len", "(", "rep", ")", "==", "7", "and", "rep", "[", "6", "]", "==", "','", ":", "rep", "=", "rep", "[", ":", "5", "]", "LOG", ".", "info", "(", "'cleaned up %s'", ",", "rep", ")", "# asuming splits are typically to both gene & phenotype", "if", "len", "(", "token", ")", ">", "3", ":", "self", ".", "omim_replaced", "[", "omim_id", "]", "=", "{", "rep", ",", "token", "[", "4", "]", "}", "else", ":", "self", ".", "omim_replaced", "[", "omim_id", "]", "=", "{", "rep", "}", "elif", "row", "[", "0", "]", "==", "'Asterisk'", ":", "# declared as gene", "(", "prefix", ",", "omim_id", ",", "pref_label", ",", "alt_label", ",", "inc_label", ")", "=", "row", "omim_type", "[", "omim_id", "]", "=", "self", ".", "globaltt", "[", "'gene'", "]", "elif", "row", "[", "0", "]", "==", "'NULL'", ":", "# potential model of disease?", "(", "prefix", ",", "omim_id", ",", "pref_label", ",", "alt_label", ",", "inc_label", ")", "=", "row", "#", "omim_type", "[", "omim_id", "]", "=", "self", ".", "globaltt", "[", "'Suspected'", "]", "# NCIT:C71458", "elif", "row", "[", "0", "]", "==", "'Number Sign'", ":", "(", "prefix", ",", "omim_id", ",", "pref_label", ",", "alt_label", ",", "inc_label", ")", "=", "row", "omim_type", "[", "omim_id", "]", "=", "self", ".", "globaltt", "[", "'Phenotype'", "]", "elif", "row", "[", "0", "]", "==", "'Percent'", ":", "(", "prefix", ",", "omim_id", ",", "pref_label", ",", "alt_label", ",", "inc_label", ")", "=", "row", "omim_type", "[", "omim_id", "]", "=", "self", ".", "globaltt", "[", "'heritable_phenotypic_marker'", "]", "elif", "row", "[", "0", "]", "==", "'Plus'", ":", "(", "prefix", ",", "omim_id", ",", "pref_label", ",", "alt_label", ",", "inc_label", ")", "=", "row", "# to be interperted as a gene and/or a phenotype", "omim_type", "[", "omim_id", "]", "=", "self", ".", "globaltt", "[", "'has_affected_feature'", "]", "else", ":", "LOG", ".", "error", "(", "'Unlnown OMIM type line %s'", ",", "reader", ".", "line_num", ")", "return", "omim_type" ]
This f(x) needs to be rehomed and shared. Use OMIM's discription of their identifiers to heuristically partition them into genes | phenotypes-diseases type could be - `obsolete` Check `omim_replaced` populated as side effect - 'Suspected' (phenotype) Ignoring thus far - 'gene' - 'Phenotype' - 'heritable_phenotypic_marker' Probable phenotype - 'has_affected_feature' Use as both a gene and a phenotype :return hash of omim_number to ontology_curie
[ "This", "f", "(", "x", ")", "needs", "to", "be", "rehomed", "and", "shared", ".", "Use", "OMIM", "s", "discription", "of", "their", "identifiers", "to", "heuristically", "partition", "them", "into", "genes", "|", "phenotypes", "-", "diseases", "type", "could", "be", "-", "obsolete", "Check", "omim_replaced", "populated", "as", "side", "effect", "-", "Suspected", "(", "phenotype", ")", "Ignoring", "thus", "far", "-", "gene", "-", "Phenotype", "-", "heritable_phenotypic_marker", "Probable", "phenotype", "-", "has_affected_feature", "Use", "as", "both", "a", "gene", "and", "a", "phenotype" ]
python
train
mastro35/flows
flows/Actions/Action.py
https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/Actions/Action.py#L107-L121
def send_message(self, output): """ Send a message to the socket """ file_system_event = None if self.my_action_input: file_system_event = self.my_action_input.file_system_event or None output_action = ActionInput(file_system_event, output, self.name, "*") Global.MESSAGE_DISPATCHER.send_message(output_action)
[ "def", "send_message", "(", "self", ",", "output", ")", ":", "file_system_event", "=", "None", "if", "self", ".", "my_action_input", ":", "file_system_event", "=", "self", ".", "my_action_input", ".", "file_system_event", "or", "None", "output_action", "=", "ActionInput", "(", "file_system_event", ",", "output", ",", "self", ".", "name", ",", "\"*\"", ")", "Global", ".", "MESSAGE_DISPATCHER", ".", "send_message", "(", "output_action", ")" ]
Send a message to the socket
[ "Send", "a", "message", "to", "the", "socket" ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/frontend/qt/console/console_widget.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/frontend/qt/console/console_widget.py#L514-L520
def cut(self): """ Copy the currently selected text to the clipboard and delete it if it's inside the input buffer. """ self.copy() if self.can_cut(): self._control.textCursor().removeSelectedText()
[ "def", "cut", "(", "self", ")", ":", "self", ".", "copy", "(", ")", "if", "self", ".", "can_cut", "(", ")", ":", "self", ".", "_control", ".", "textCursor", "(", ")", ".", "removeSelectedText", "(", ")" ]
Copy the currently selected text to the clipboard and delete it if it's inside the input buffer.
[ "Copy", "the", "currently", "selected", "text", "to", "the", "clipboard", "and", "delete", "it", "if", "it", "s", "inside", "the", "input", "buffer", "." ]
python
test
materialsvirtuallab/monty
monty/dev.py
https://github.com/materialsvirtuallab/monty/blob/d99d6f3c68372d83489d28ff515566c93cd569e2/monty/dev.py#L97-L195
def get_ncpus(): """ .. note:: If you are using Python >= 2.7, multiprocessing.cpu_count() already provides the number of CPUs. In fact, this is the first method tried. The purpose of this function is to cater to old Python versions that still exist on many Linux style clusters. Number of virtual or physical CPUs on this system, i.e. user/real as output by time(1) when called with an optimally scaling userspace-only program. Return -1 if ncpus cannot be detected. Taken from: http://stackoverflow.com/questions/1006289/how-to-find-out-the-number-of- cpus-in-python """ # Python 2.6+ # May raise NonImplementedError try: return multiprocessing.cpu_count() except (ImportError, NotImplementedError): pass # POSIX try: res = int(os.sysconf('SC_NPROCESSORS_ONLN')) if res > 0: return res except (AttributeError, ValueError): pass # Windows try: res = int(os.environ['NUMBER_OF_PROCESSORS']) if res > 0: return res except (KeyError, ValueError): pass # jython try: from java.lang import Runtime runtime = Runtime.getRuntime() res = runtime.availableProcessors() if res > 0: return res except ImportError: pass # BSD try: sysctl = subprocess.Popen(['sysctl', '-n', 'hw.ncpu'], stdout=subprocess.PIPE) scstdout = sysctl.communicate()[0] res = int(scstdout) if res > 0: return res except (OSError, ValueError): pass # Linux try: res = open('/proc/cpuinfo').read().count('processor\t:') if res > 0: return res except IOError: pass # Solaris try: pseudo_devices = os.listdir('/devices/pseudo/') expr = re.compile('^cpuid@[0-9]+$') res = 0 for pd in pseudo_devices: if expr.match(pd) is not None: res += 1 if res > 0: return res except OSError: pass # Other UNIXes (heuristic) try: try: dmesg = open('/var/run/dmesg.boot').read() except IOError: dmesg_process = subprocess.Popen(['dmesg'], stdout=subprocess.PIPE) dmesg = dmesg_process.communicate()[0] res = 0 while '\ncpu' + str(res) + ':' in dmesg: res += 1 if res > 0: return res except OSError: pass logger.warning('Cannot determine number of CPUs on this system!') return -1
[ "def", "get_ncpus", "(", ")", ":", "# Python 2.6+", "# May raise NonImplementedError", "try", ":", "return", "multiprocessing", ".", "cpu_count", "(", ")", "except", "(", "ImportError", ",", "NotImplementedError", ")", ":", "pass", "# POSIX", "try", ":", "res", "=", "int", "(", "os", ".", "sysconf", "(", "'SC_NPROCESSORS_ONLN'", ")", ")", "if", "res", ">", "0", ":", "return", "res", "except", "(", "AttributeError", ",", "ValueError", ")", ":", "pass", "# Windows", "try", ":", "res", "=", "int", "(", "os", ".", "environ", "[", "'NUMBER_OF_PROCESSORS'", "]", ")", "if", "res", ">", "0", ":", "return", "res", "except", "(", "KeyError", ",", "ValueError", ")", ":", "pass", "# jython", "try", ":", "from", "java", ".", "lang", "import", "Runtime", "runtime", "=", "Runtime", ".", "getRuntime", "(", ")", "res", "=", "runtime", ".", "availableProcessors", "(", ")", "if", "res", ">", "0", ":", "return", "res", "except", "ImportError", ":", "pass", "# BSD", "try", ":", "sysctl", "=", "subprocess", ".", "Popen", "(", "[", "'sysctl'", ",", "'-n'", ",", "'hw.ncpu'", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "scstdout", "=", "sysctl", ".", "communicate", "(", ")", "[", "0", "]", "res", "=", "int", "(", "scstdout", ")", "if", "res", ">", "0", ":", "return", "res", "except", "(", "OSError", ",", "ValueError", ")", ":", "pass", "# Linux", "try", ":", "res", "=", "open", "(", "'/proc/cpuinfo'", ")", ".", "read", "(", ")", ".", "count", "(", "'processor\\t:'", ")", "if", "res", ">", "0", ":", "return", "res", "except", "IOError", ":", "pass", "# Solaris", "try", ":", "pseudo_devices", "=", "os", ".", "listdir", "(", "'/devices/pseudo/'", ")", "expr", "=", "re", ".", "compile", "(", "'^cpuid@[0-9]+$'", ")", "res", "=", "0", "for", "pd", "in", "pseudo_devices", ":", "if", "expr", ".", "match", "(", "pd", ")", "is", "not", "None", ":", "res", "+=", "1", "if", "res", ">", "0", ":", "return", "res", "except", "OSError", ":", "pass", "# Other UNIXes (heuristic)", "try", ":", "try", ":", "dmesg", "=", "open", "(", "'/var/run/dmesg.boot'", ")", ".", "read", "(", ")", "except", "IOError", ":", "dmesg_process", "=", "subprocess", ".", "Popen", "(", "[", "'dmesg'", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "dmesg", "=", "dmesg_process", ".", "communicate", "(", ")", "[", "0", "]", "res", "=", "0", "while", "'\\ncpu'", "+", "str", "(", "res", ")", "+", "':'", "in", "dmesg", ":", "res", "+=", "1", "if", "res", ">", "0", ":", "return", "res", "except", "OSError", ":", "pass", "logger", ".", "warning", "(", "'Cannot determine number of CPUs on this system!'", ")", "return", "-", "1" ]
.. note:: If you are using Python >= 2.7, multiprocessing.cpu_count() already provides the number of CPUs. In fact, this is the first method tried. The purpose of this function is to cater to old Python versions that still exist on many Linux style clusters. Number of virtual or physical CPUs on this system, i.e. user/real as output by time(1) when called with an optimally scaling userspace-only program. Return -1 if ncpus cannot be detected. Taken from: http://stackoverflow.com/questions/1006289/how-to-find-out-the-number-of- cpus-in-python
[ "..", "note", "::" ]
python
train
kata198/AdvancedHTMLParser
AdvancedHTMLParser/Tags.py
https://github.com/kata198/AdvancedHTMLParser/blob/06aeea5d8e2ea86e155aae0fc237623d3e9b7f9d/AdvancedHTMLParser/Tags.py#L2267-L2283
def filterCollection(self, filterFunc): ''' filterCollection - Filters only the immediate objects contained within this Collection against a function, not including any children @param filterFunc <function> - A function or lambda expression that returns True to have that element match @return TagCollection<AdvancedTag> ''' ret = TagCollection() if len(self) == 0: return ret for tag in self: if filterFunc(tag) is True: ret.append(tag) return ret
[ "def", "filterCollection", "(", "self", ",", "filterFunc", ")", ":", "ret", "=", "TagCollection", "(", ")", "if", "len", "(", "self", ")", "==", "0", ":", "return", "ret", "for", "tag", "in", "self", ":", "if", "filterFunc", "(", "tag", ")", "is", "True", ":", "ret", ".", "append", "(", "tag", ")", "return", "ret" ]
filterCollection - Filters only the immediate objects contained within this Collection against a function, not including any children @param filterFunc <function> - A function or lambda expression that returns True to have that element match @return TagCollection<AdvancedTag>
[ "filterCollection", "-", "Filters", "only", "the", "immediate", "objects", "contained", "within", "this", "Collection", "against", "a", "function", "not", "including", "any", "children" ]
python
train
slawek87/yql-finance
examples/stock_price.py
https://github.com/slawek87/yql-finance/blob/52b1ac6720db09c4d8a9864b171506e90a8d3964/examples/stock_price.py#L17-L27
def fetch_googl(): """Returns stock prices for Google company.""" yql = YQL('GOOGL', '2014-01-01', '2014-01-10') for item in yql: print item.get('date'), item.get('price') yql.select('GOOGL', '2014-01-01', '2014-01-10') for item in yql: print item.get('date'), item.get('price')
[ "def", "fetch_googl", "(", ")", ":", "yql", "=", "YQL", "(", "'GOOGL'", ",", "'2014-01-01'", ",", "'2014-01-10'", ")", "for", "item", "in", "yql", ":", "print", "item", ".", "get", "(", "'date'", ")", ",", "item", ".", "get", "(", "'price'", ")", "yql", ".", "select", "(", "'GOOGL'", ",", "'2014-01-01'", ",", "'2014-01-10'", ")", "for", "item", "in", "yql", ":", "print", "item", ".", "get", "(", "'date'", ")", ",", "item", ".", "get", "(", "'price'", ")" ]
Returns stock prices for Google company.
[ "Returns", "stock", "prices", "for", "Google", "company", "." ]
python
train
cmbruns/pyopenvr
src/openvr/glframework/__init__.py
https://github.com/cmbruns/pyopenvr/blob/68395d26bb3df6ab1f0f059c38d441f962938be6/src/openvr/glframework/__init__.py#L26-L41
def shader_substring(body, stack_frame=1): """ Call this method from a function that defines a literal shader string as the "body" argument. Dresses up a shader string in two ways: 1) Insert #line number declaration 2) un-indents The line number information can help debug glsl compile errors. The unindenting allows you to type the shader code at a pleasing indent level in your python method, while still creating an unindented GLSL string at the end. """ line_count = len(body.splitlines(True)) line_number = inspect.stack()[stack_frame][2] + 1 - line_count return """\ #line %d %s """ % (line_number, textwrap.dedent(body))
[ "def", "shader_substring", "(", "body", ",", "stack_frame", "=", "1", ")", ":", "line_count", "=", "len", "(", "body", ".", "splitlines", "(", "True", ")", ")", "line_number", "=", "inspect", ".", "stack", "(", ")", "[", "stack_frame", "]", "[", "2", "]", "+", "1", "-", "line_count", "return", "\"\"\"\\\r\n#line %d\r\n%s\r\n\"\"\"", "%", "(", "line_number", ",", "textwrap", ".", "dedent", "(", "body", ")", ")" ]
Call this method from a function that defines a literal shader string as the "body" argument. Dresses up a shader string in two ways: 1) Insert #line number declaration 2) un-indents The line number information can help debug glsl compile errors. The unindenting allows you to type the shader code at a pleasing indent level in your python method, while still creating an unindented GLSL string at the end.
[ "Call", "this", "method", "from", "a", "function", "that", "defines", "a", "literal", "shader", "string", "as", "the", "body", "argument", ".", "Dresses", "up", "a", "shader", "string", "in", "two", "ways", ":", "1", ")", "Insert", "#line", "number", "declaration", "2", ")", "un", "-", "indents", "The", "line", "number", "information", "can", "help", "debug", "glsl", "compile", "errors", ".", "The", "unindenting", "allows", "you", "to", "type", "the", "shader", "code", "at", "a", "pleasing", "indent", "level", "in", "your", "python", "method", "while", "still", "creating", "an", "unindented", "GLSL", "string", "at", "the", "end", "." ]
python
train
awacha/sastool
sastool/fitting/fitfunctions/sasbasic.py
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/fitting/fitfunctions/sasbasic.py#L157-L182
def GuinierPorodGuinier(q, G, Rg1, alpha, Rg2): """Empirical Guinier-Porod-Guinier scattering Inputs: ------- ``q``: independent variable ``G``: factor for the first Guinier-branch ``Rg1``: the first radius of gyration ``alpha``: the power-law exponent ``Rg2``: the second radius of gyration Formula: -------- ``G*exp(-q^2*Rg1^2/3)`` if ``q<q_sep1``. ``A*q^alpha`` if ``q_sep1 <= q <=q_sep2``. ``G2*exp(-q^2*Rg2^2/3)`` if ``q_sep2<q``. The parameters ``A``,``G2``, ``q_sep1``, ``q_sep2`` are determined from conditions of smoothness at the cross-overs. Literature: ----------- B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43, 716-719. """ return GuinierPorodMulti(q, G, Rg1, alpha, Rg2)
[ "def", "GuinierPorodGuinier", "(", "q", ",", "G", ",", "Rg1", ",", "alpha", ",", "Rg2", ")", ":", "return", "GuinierPorodMulti", "(", "q", ",", "G", ",", "Rg1", ",", "alpha", ",", "Rg2", ")" ]
Empirical Guinier-Porod-Guinier scattering Inputs: ------- ``q``: independent variable ``G``: factor for the first Guinier-branch ``Rg1``: the first radius of gyration ``alpha``: the power-law exponent ``Rg2``: the second radius of gyration Formula: -------- ``G*exp(-q^2*Rg1^2/3)`` if ``q<q_sep1``. ``A*q^alpha`` if ``q_sep1 <= q <=q_sep2``. ``G2*exp(-q^2*Rg2^2/3)`` if ``q_sep2<q``. The parameters ``A``,``G2``, ``q_sep1``, ``q_sep2`` are determined from conditions of smoothness at the cross-overs. Literature: ----------- B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43, 716-719.
[ "Empirical", "Guinier", "-", "Porod", "-", "Guinier", "scattering" ]
python
train
eugene-eeo/graphlite
graphlite/query.py
https://github.com/eugene-eeo/graphlite/blob/8d17e9549ee8610570dcde1b427431a2584395b7/graphlite/query.py#L43-L50
def gen_query(self): """ Generate an SQL query for the edge object. """ return ( SQL.forwards_relation(self.src, self.rel) if self.dst is None else SQL.inverse_relation(self.dst, self.rel) )
[ "def", "gen_query", "(", "self", ")", ":", "return", "(", "SQL", ".", "forwards_relation", "(", "self", ".", "src", ",", "self", ".", "rel", ")", "if", "self", ".", "dst", "is", "None", "else", "SQL", ".", "inverse_relation", "(", "self", ".", "dst", ",", "self", ".", "rel", ")", ")" ]
Generate an SQL query for the edge object.
[ "Generate", "an", "SQL", "query", "for", "the", "edge", "object", "." ]
python
train
nerdvegas/rez
src/rez/vendor/memcache/memcache.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/memcache/memcache.py#L550-L559
def replace(self, key, val, time=0, min_compress_len=0): '''Replace existing key with value. Like L{set}, but only stores in memcache if the key already exists. The opposite of L{add}. @return: Nonzero on success. @rtype: int ''' return self._set("replace", key, val, time, min_compress_len)
[ "def", "replace", "(", "self", ",", "key", ",", "val", ",", "time", "=", "0", ",", "min_compress_len", "=", "0", ")", ":", "return", "self", ".", "_set", "(", "\"replace\"", ",", "key", ",", "val", ",", "time", ",", "min_compress_len", ")" ]
Replace existing key with value. Like L{set}, but only stores in memcache if the key already exists. The opposite of L{add}. @return: Nonzero on success. @rtype: int
[ "Replace", "existing", "key", "with", "value", "." ]
python
train
materialsproject/pymatgen
pymatgen/analysis/pourbaix_diagram.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/pourbaix_diagram.py#L338-L342
def from_dict(cls, d): """ Returns an IonEntry object from a dict. """ return IonEntry(Ion.from_dict(d["ion"]), d["energy"], d.get("name", None))
[ "def", "from_dict", "(", "cls", ",", "d", ")", ":", "return", "IonEntry", "(", "Ion", ".", "from_dict", "(", "d", "[", "\"ion\"", "]", ")", ",", "d", "[", "\"energy\"", "]", ",", "d", ".", "get", "(", "\"name\"", ",", "None", ")", ")" ]
Returns an IonEntry object from a dict.
[ "Returns", "an", "IonEntry", "object", "from", "a", "dict", "." ]
python
train
s0md3v/Photon
core/utils.py
https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/core/utils.py#L15-L23
def regxy(pattern, response, supress_regex, custom): """Extract a string based on regex pattern supplied by user.""" try: matches = re.findall(r'%s' % pattern, response) for match in matches: verb('Custom regex', match) custom.add(match) except: supress_regex = True
[ "def", "regxy", "(", "pattern", ",", "response", ",", "supress_regex", ",", "custom", ")", ":", "try", ":", "matches", "=", "re", ".", "findall", "(", "r'%s'", "%", "pattern", ",", "response", ")", "for", "match", "in", "matches", ":", "verb", "(", "'Custom regex'", ",", "match", ")", "custom", ".", "add", "(", "match", ")", "except", ":", "supress_regex", "=", "True" ]
Extract a string based on regex pattern supplied by user.
[ "Extract", "a", "string", "based", "on", "regex", "pattern", "supplied", "by", "user", "." ]
python
train
mattupstate/flask-security
flask_security/views.py
https://github.com/mattupstate/flask-security/blob/a401fb47018fbbbe0b899ea55afadfd0e3cd847a/flask_security/views.py#L187-L210
def send_confirmation(): """View function which sends confirmation instructions.""" form_class = _security.send_confirmation_form if request.is_json: form = form_class(MultiDict(request.get_json())) else: form = form_class() if form.validate_on_submit(): send_confirmation_instructions(form.user) if not request.is_json: do_flash(*get_message('CONFIRMATION_REQUEST', email=form.user.email)) if request.is_json: return _render_json(form) return _security.render_template( config_value('SEND_CONFIRMATION_TEMPLATE'), send_confirmation_form=form, **_ctx('send_confirmation') )
[ "def", "send_confirmation", "(", ")", ":", "form_class", "=", "_security", ".", "send_confirmation_form", "if", "request", ".", "is_json", ":", "form", "=", "form_class", "(", "MultiDict", "(", "request", ".", "get_json", "(", ")", ")", ")", "else", ":", "form", "=", "form_class", "(", ")", "if", "form", ".", "validate_on_submit", "(", ")", ":", "send_confirmation_instructions", "(", "form", ".", "user", ")", "if", "not", "request", ".", "is_json", ":", "do_flash", "(", "*", "get_message", "(", "'CONFIRMATION_REQUEST'", ",", "email", "=", "form", ".", "user", ".", "email", ")", ")", "if", "request", ".", "is_json", ":", "return", "_render_json", "(", "form", ")", "return", "_security", ".", "render_template", "(", "config_value", "(", "'SEND_CONFIRMATION_TEMPLATE'", ")", ",", "send_confirmation_form", "=", "form", ",", "*", "*", "_ctx", "(", "'send_confirmation'", ")", ")" ]
View function which sends confirmation instructions.
[ "View", "function", "which", "sends", "confirmation", "instructions", "." ]
python
train
oscarbranson/latools
latools/latools.py
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L2308-L2347
def correlation_plots(self, x_analyte, y_analyte, window=15, filt=True, recalc=False, samples=None, subset=None, outdir=None): """ Plot the local correlation between two analytes. Parameters ---------- x_analyte, y_analyte : str The names of the x and y analytes to correlate. window : int, None The rolling window used when calculating the correlation. filt : bool Whether or not to apply existing filters to the data before calculating this filter. recalc : bool If True, the correlation is re-calculated, even if it is already present. Returns ------- None """ if outdir is None: outdir = self.report_dir + '/correlations/' if not os.path.isdir(outdir): os.mkdir(outdir) if subset is not None: samples = self._get_samples(subset) elif samples is None: samples = self.subsets['All_Analyses'] elif isinstance(samples, str): samples = [samples] with self.pbar.set(total=len(samples), desc='Drawing Plots') as prog: for s in samples: f, a = self.data[s].correlation_plot(x_analyte=x_analyte, y_analyte=y_analyte, window=window, filt=filt, recalc=recalc) f.savefig('{}/{}_{}-{}.pdf'.format(outdir, s, x_analyte, y_analyte)) plt.close(f) prog.update() return
[ "def", "correlation_plots", "(", "self", ",", "x_analyte", ",", "y_analyte", ",", "window", "=", "15", ",", "filt", "=", "True", ",", "recalc", "=", "False", ",", "samples", "=", "None", ",", "subset", "=", "None", ",", "outdir", "=", "None", ")", ":", "if", "outdir", "is", "None", ":", "outdir", "=", "self", ".", "report_dir", "+", "'/correlations/'", "if", "not", "os", ".", "path", ".", "isdir", "(", "outdir", ")", ":", "os", ".", "mkdir", "(", "outdir", ")", "if", "subset", "is", "not", "None", ":", "samples", "=", "self", ".", "_get_samples", "(", "subset", ")", "elif", "samples", "is", "None", ":", "samples", "=", "self", ".", "subsets", "[", "'All_Analyses'", "]", "elif", "isinstance", "(", "samples", ",", "str", ")", ":", "samples", "=", "[", "samples", "]", "with", "self", ".", "pbar", ".", "set", "(", "total", "=", "len", "(", "samples", ")", ",", "desc", "=", "'Drawing Plots'", ")", "as", "prog", ":", "for", "s", "in", "samples", ":", "f", ",", "a", "=", "self", ".", "data", "[", "s", "]", ".", "correlation_plot", "(", "x_analyte", "=", "x_analyte", ",", "y_analyte", "=", "y_analyte", ",", "window", "=", "window", ",", "filt", "=", "filt", ",", "recalc", "=", "recalc", ")", "f", ".", "savefig", "(", "'{}/{}_{}-{}.pdf'", ".", "format", "(", "outdir", ",", "s", ",", "x_analyte", ",", "y_analyte", ")", ")", "plt", ".", "close", "(", "f", ")", "prog", ".", "update", "(", ")", "return" ]
Plot the local correlation between two analytes. Parameters ---------- x_analyte, y_analyte : str The names of the x and y analytes to correlate. window : int, None The rolling window used when calculating the correlation. filt : bool Whether or not to apply existing filters to the data before calculating this filter. recalc : bool If True, the correlation is re-calculated, even if it is already present. Returns ------- None
[ "Plot", "the", "local", "correlation", "between", "two", "analytes", "." ]
python
test
openpermissions/perch
perch/model.py
https://github.com/openpermissions/perch/blob/36d78994133918f3c52c187f19e50132960a0156/perch/model.py#L469-L479
def get_parent(self): """ Get the parent resource from the database The get, create & update methods will populate the parent for you. Use this method in the cases where parent has not been populated. """ if not self._parent: self._parent = yield self.parent_resource.get(self.parent_id) raise Return(self._parent)
[ "def", "get_parent", "(", "self", ")", ":", "if", "not", "self", ".", "_parent", ":", "self", ".", "_parent", "=", "yield", "self", ".", "parent_resource", ".", "get", "(", "self", ".", "parent_id", ")", "raise", "Return", "(", "self", ".", "_parent", ")" ]
Get the parent resource from the database The get, create & update methods will populate the parent for you. Use this method in the cases where parent has not been populated.
[ "Get", "the", "parent", "resource", "from", "the", "database" ]
python
train
wummel/linkchecker
third_party/dnspython/dns/tokenizer.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/third_party/dnspython/dns/tokenizer.py#L522-L531
def get_name(self, origin=None): """Read the next token and interpret it as a DNS name. @raises dns.exception.SyntaxError: @rtype: dns.name.Name object""" token = self.get() if not token.is_identifier(): raise dns.exception.SyntaxError('expecting an identifier') return dns.name.from_text(token.value, origin)
[ "def", "get_name", "(", "self", ",", "origin", "=", "None", ")", ":", "token", "=", "self", ".", "get", "(", ")", "if", "not", "token", ".", "is_identifier", "(", ")", ":", "raise", "dns", ".", "exception", ".", "SyntaxError", "(", "'expecting an identifier'", ")", "return", "dns", ".", "name", ".", "from_text", "(", "token", ".", "value", ",", "origin", ")" ]
Read the next token and interpret it as a DNS name. @raises dns.exception.SyntaxError: @rtype: dns.name.Name object
[ "Read", "the", "next", "token", "and", "interpret", "it", "as", "a", "DNS", "name", "." ]
python
train