repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
adafruit/Adafruit_Python_GPIO
Adafruit_GPIO/FT232H.py
https://github.com/adafruit/Adafruit_Python_GPIO/blob/a92a23d6b5869663b2bc1ccf78bb11585076a9c4/Adafruit_GPIO/FT232H.py#L364-L370
def output(self, pin, value): """Set the specified pin the provided high/low value. Value should be either HIGH/LOW or a boolean (true = high).""" if pin < 0 or pin > 15: raise ValueError('Pin must be between 0 and 15 (inclusive).') self._output_pin(pin, value) self.mpsse_write_gpio()
[ "def", "output", "(", "self", ",", "pin", ",", "value", ")", ":", "if", "pin", "<", "0", "or", "pin", ">", "15", ":", "raise", "ValueError", "(", "'Pin must be between 0 and 15 (inclusive).'", ")", "self", ".", "_output_pin", "(", "pin", ",", "value", ")", "self", ".", "mpsse_write_gpio", "(", ")" ]
Set the specified pin the provided high/low value. Value should be either HIGH/LOW or a boolean (true = high).
[ "Set", "the", "specified", "pin", "the", "provided", "high", "/", "low", "value", ".", "Value", "should", "be", "either", "HIGH", "/", "LOW", "or", "a", "boolean", "(", "true", "=", "high", ")", "." ]
python
valid
47.428571
sorgerlab/indra
indra/util/statement_presentation.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/util/statement_presentation.py#L40-L109
def group_and_sort_statements(stmt_list, ev_totals=None): """Group statements by type and arguments, and sort by prevalence. Parameters ---------- stmt_list : list[Statement] A list of INDRA statements. ev_totals : dict{int: int} A dictionary, keyed by statement hash (shallow) with counts of total evidence as the values. Including this will allow statements to be better sorted. Returns ------- sorted_groups : list[tuple] A list of tuples containing a sort key, the statement type, and a list of statements, also sorted by evidence count, for that key and type. The sort key contains a count of statements with those argument, the arguments (normalized strings), the count of statements with those arguements and type, and then the statement type. """ def _count(stmt): if ev_totals is None: return len(stmt.evidence) else: return ev_totals[stmt.get_hash()] stmt_rows = defaultdict(list) stmt_counts = defaultdict(lambda: 0) arg_counts = defaultdict(lambda: 0) for key, s in _get_keyed_stmts(stmt_list): # Update the counts, and add key if needed. stmt_rows[key].append(s) # Keep track of the total evidence counts for this statement and the # arguments. stmt_counts[key] += _count(s) # Add up the counts for the arguments, pairwise for Complexes and # Conversions. This allows, for example, a complex between MEK, ERK, # and something else to lend weight to the interactions between MEK # and ERK. if key[0] == 'Conversion': subj = key[1] for obj in key[2] + key[3]: arg_counts[(subj, obj)] += _count(s) else: arg_counts[key[1:]] += _count(s) # Sort the rows by count and agent names. def process_rows(stmt_rows): for key, stmts in stmt_rows.items(): verb = key[0] inps = key[1:] sub_count = stmt_counts[key] arg_count = arg_counts[inps] if verb == 'Complex' and sub_count == arg_count and len(inps) <= 2: if all([len(set(ag.name for ag in s.agent_list())) > 2 for s in stmts]): continue new_key = (arg_count, inps, sub_count, verb) stmts = sorted(stmts, key=lambda s: _count(s) + 1/(1+len(s.agent_list())), reverse=True) yield new_key, verb, stmts sorted_groups = sorted(process_rows(stmt_rows), key=lambda tpl: tpl[0], reverse=True) return sorted_groups
[ "def", "group_and_sort_statements", "(", "stmt_list", ",", "ev_totals", "=", "None", ")", ":", "def", "_count", "(", "stmt", ")", ":", "if", "ev_totals", "is", "None", ":", "return", "len", "(", "stmt", ".", "evidence", ")", "else", ":", "return", "ev_totals", "[", "stmt", ".", "get_hash", "(", ")", "]", "stmt_rows", "=", "defaultdict", "(", "list", ")", "stmt_counts", "=", "defaultdict", "(", "lambda", ":", "0", ")", "arg_counts", "=", "defaultdict", "(", "lambda", ":", "0", ")", "for", "key", ",", "s", "in", "_get_keyed_stmts", "(", "stmt_list", ")", ":", "# Update the counts, and add key if needed.", "stmt_rows", "[", "key", "]", ".", "append", "(", "s", ")", "# Keep track of the total evidence counts for this statement and the", "# arguments.", "stmt_counts", "[", "key", "]", "+=", "_count", "(", "s", ")", "# Add up the counts for the arguments, pairwise for Complexes and", "# Conversions. This allows, for example, a complex between MEK, ERK,", "# and something else to lend weight to the interactions between MEK", "# and ERK.", "if", "key", "[", "0", "]", "==", "'Conversion'", ":", "subj", "=", "key", "[", "1", "]", "for", "obj", "in", "key", "[", "2", "]", "+", "key", "[", "3", "]", ":", "arg_counts", "[", "(", "subj", ",", "obj", ")", "]", "+=", "_count", "(", "s", ")", "else", ":", "arg_counts", "[", "key", "[", "1", ":", "]", "]", "+=", "_count", "(", "s", ")", "# Sort the rows by count and agent names.", "def", "process_rows", "(", "stmt_rows", ")", ":", "for", "key", ",", "stmts", "in", "stmt_rows", ".", "items", "(", ")", ":", "verb", "=", "key", "[", "0", "]", "inps", "=", "key", "[", "1", ":", "]", "sub_count", "=", "stmt_counts", "[", "key", "]", "arg_count", "=", "arg_counts", "[", "inps", "]", "if", "verb", "==", "'Complex'", "and", "sub_count", "==", "arg_count", "and", "len", "(", "inps", ")", "<=", "2", ":", "if", "all", "(", "[", "len", "(", "set", "(", "ag", ".", "name", "for", "ag", "in", "s", ".", "agent_list", "(", ")", ")", ")", ">", "2", "for", "s", "in", "stmts", "]", ")", ":", "continue", "new_key", "=", "(", "arg_count", ",", "inps", ",", "sub_count", ",", "verb", ")", "stmts", "=", "sorted", "(", "stmts", ",", "key", "=", "lambda", "s", ":", "_count", "(", "s", ")", "+", "1", "/", "(", "1", "+", "len", "(", "s", ".", "agent_list", "(", ")", ")", ")", ",", "reverse", "=", "True", ")", "yield", "new_key", ",", "verb", ",", "stmts", "sorted_groups", "=", "sorted", "(", "process_rows", "(", "stmt_rows", ")", ",", "key", "=", "lambda", "tpl", ":", "tpl", "[", "0", "]", ",", "reverse", "=", "True", ")", "return", "sorted_groups" ]
Group statements by type and arguments, and sort by prevalence. Parameters ---------- stmt_list : list[Statement] A list of INDRA statements. ev_totals : dict{int: int} A dictionary, keyed by statement hash (shallow) with counts of total evidence as the values. Including this will allow statements to be better sorted. Returns ------- sorted_groups : list[tuple] A list of tuples containing a sort key, the statement type, and a list of statements, also sorted by evidence count, for that key and type. The sort key contains a count of statements with those argument, the arguments (normalized strings), the count of statements with those arguements and type, and then the statement type.
[ "Group", "statements", "by", "type", "and", "arguments", "and", "sort", "by", "prevalence", "." ]
python
train
38.1
dcaune/perseus-lib-python-common
majormode/perseus/model/geolocation.py
https://github.com/dcaune/perseus-lib-python-common/blob/ba48fe0fd9bb4a75b53e7d10c41ada36a72d4496/majormode/perseus/model/geolocation.py#L205-L234
def great_circle_distance(self, other): """ Return the great-circle distance, in meters, from this geographic coordinates to the specified other point, i.e., the shortest distance over the earth’s surface, ‘as-the-crow-flies’ distance between the points, ignoring any natural elevations of the ground. Haversine formula:: R = earth’s radius (mean radius = 6,371km) Δlat = lat2 − lat1 Δlong = long2 − long1 a = sin²(Δlat / 2) + cos(lat1).cos(lat2).sin²(Δlong/2) c = 2.atan2(√a, √(1−a)) d = R.c @param other: a ``GeoPoint`` instance. @return: the great-circle distance, in meters, between this geographic coordinates to the specified other point. """ distance_latitude = math.radians(abs(self.latitude - other.latitude)) distance_longitude = math.radians(abs(self.longitude - other.longitude)) a = math.sin(distance_latitude / 2) * math.sin(distance_latitude / 2) \ + math.cos(math.radians(self.latitude)) \ * math.cos(math.radians(other.latitude)) \ * math.sin(distance_longitude / 2) \ * math.sin(distance_longitude / 2) c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a)) return GeoPoint.EARTH_RADIUS_METERS * c
[ "def", "great_circle_distance", "(", "self", ",", "other", ")", ":", "distance_latitude", "=", "math", ".", "radians", "(", "abs", "(", "self", ".", "latitude", "-", "other", ".", "latitude", ")", ")", "distance_longitude", "=", "math", ".", "radians", "(", "abs", "(", "self", ".", "longitude", "-", "other", ".", "longitude", ")", ")", "a", "=", "math", ".", "sin", "(", "distance_latitude", "/", "2", ")", "*", "math", ".", "sin", "(", "distance_latitude", "/", "2", ")", "+", "math", ".", "cos", "(", "math", ".", "radians", "(", "self", ".", "latitude", ")", ")", "*", "math", ".", "cos", "(", "math", ".", "radians", "(", "other", ".", "latitude", ")", ")", "*", "math", ".", "sin", "(", "distance_longitude", "/", "2", ")", "*", "math", ".", "sin", "(", "distance_longitude", "/", "2", ")", "c", "=", "2", "*", "math", ".", "atan2", "(", "math", ".", "sqrt", "(", "a", ")", ",", "math", ".", "sqrt", "(", "1", "-", "a", ")", ")", "return", "GeoPoint", ".", "EARTH_RADIUS_METERS", "*", "c" ]
Return the great-circle distance, in meters, from this geographic coordinates to the specified other point, i.e., the shortest distance over the earth’s surface, ‘as-the-crow-flies’ distance between the points, ignoring any natural elevations of the ground. Haversine formula:: R = earth’s radius (mean radius = 6,371km) Δlat = lat2 − lat1 Δlong = long2 − long1 a = sin²(Δlat / 2) + cos(lat1).cos(lat2).sin²(Δlong/2) c = 2.atan2(√a, √(1−a)) d = R.c @param other: a ``GeoPoint`` instance. @return: the great-circle distance, in meters, between this geographic coordinates to the specified other point.
[ "Return", "the", "great", "-", "circle", "distance", "in", "meters", "from", "this", "geographic", "coordinates", "to", "the", "specified", "other", "point", "i", ".", "e", ".", "the", "shortest", "distance", "over", "the", "earth’s", "surface", "‘as", "-", "the", "-", "crow", "-", "flies’", "distance", "between", "the", "points", "ignoring", "any", "natural", "elevations", "of", "the", "ground", "." ]
python
train
44.133333
mlperf/training
translation/tensorflow/transformer/utils/metrics.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/translation/tensorflow/transformer/utils/metrics.py#L112-L130
def get_eval_metrics(logits, labels, params): """Return dictionary of model evaluation metrics.""" metrics = { "accuracy": _convert_to_eval_metric(padded_accuracy)(logits, labels), "accuracy_top5": _convert_to_eval_metric(padded_accuracy_top5)( logits, labels), "accuracy_per_sequence": _convert_to_eval_metric( padded_sequence_accuracy)(logits, labels), "neg_log_perplexity": _convert_to_eval_metric(padded_neg_log_perplexity)( logits, labels, params.vocab_size), "approx_bleu_score": _convert_to_eval_metric(bleu_score)(logits, labels), "rouge_2_fscore": _convert_to_eval_metric(rouge_2_fscore)(logits, labels), "rouge_L_fscore": _convert_to_eval_metric(rouge_l_fscore)(logits, labels), } # Prefix each of the metric names with "metrics/". This allows the metric # graphs to display under the "metrics" category in TensorBoard. metrics = {"metrics/%s" % k: v for k, v in six.iteritems(metrics)} return metrics
[ "def", "get_eval_metrics", "(", "logits", ",", "labels", ",", "params", ")", ":", "metrics", "=", "{", "\"accuracy\"", ":", "_convert_to_eval_metric", "(", "padded_accuracy", ")", "(", "logits", ",", "labels", ")", ",", "\"accuracy_top5\"", ":", "_convert_to_eval_metric", "(", "padded_accuracy_top5", ")", "(", "logits", ",", "labels", ")", ",", "\"accuracy_per_sequence\"", ":", "_convert_to_eval_metric", "(", "padded_sequence_accuracy", ")", "(", "logits", ",", "labels", ")", ",", "\"neg_log_perplexity\"", ":", "_convert_to_eval_metric", "(", "padded_neg_log_perplexity", ")", "(", "logits", ",", "labels", ",", "params", ".", "vocab_size", ")", ",", "\"approx_bleu_score\"", ":", "_convert_to_eval_metric", "(", "bleu_score", ")", "(", "logits", ",", "labels", ")", ",", "\"rouge_2_fscore\"", ":", "_convert_to_eval_metric", "(", "rouge_2_fscore", ")", "(", "logits", ",", "labels", ")", ",", "\"rouge_L_fscore\"", ":", "_convert_to_eval_metric", "(", "rouge_l_fscore", ")", "(", "logits", ",", "labels", ")", ",", "}", "# Prefix each of the metric names with \"metrics/\". This allows the metric", "# graphs to display under the \"metrics\" category in TensorBoard.", "metrics", "=", "{", "\"metrics/%s\"", "%", "k", ":", "v", "for", "k", ",", "v", "in", "six", ".", "iteritems", "(", "metrics", ")", "}", "return", "metrics" ]
Return dictionary of model evaluation metrics.
[ "Return", "dictionary", "of", "model", "evaluation", "metrics", "." ]
python
train
51.578947
cokelaer/spectrum
src/spectrum/window.py
https://github.com/cokelaer/spectrum/blob/bad6c32e3f10e185098748f67bb421b378b06afe/src/spectrum/window.py#L464-L506
def enbw(data): r"""Computes the equivalent noise bandwidth .. math:: ENBW = N \frac{\sum_{n=1}^{N} w_n^2}{\left(\sum_{n=1}^{N} w_n \right)^2} .. doctest:: >>> from spectrum import create_window, enbw >>> w = create_window(64, 'rectangular') >>> enbw(w) 1.0 The following table contains the ENBW values for some of the implemented windows in this module (with N=16384). They have been double checked against litterature (Source: [Harris]_, [Marple]_). If not present, it means that it has not been checked. =================== ============ ============= name ENBW litterature =================== ============ ============= rectangular 1. 1. triangle 1.3334 1.33 Hann 1.5001 1.5 Hamming 1.3629 1.36 blackman 1.7268 1.73 kaiser 1.7 blackmanharris,4 2.004 2. riesz 1.2000 1.2 riemann 1.32 1.3 parzen 1.917 1.92 tukey 0.25 1.102 1.1 bohman 1.7858 1.79 poisson 2 1.3130 1.3 hanningpoisson 0.5 1.609 1.61 cauchy 1.489 1.48 lanczos 1.3 =================== ============ ============= """ N = len(data) return N * np.sum(data**2) / np.sum(data)**2
[ "def", "enbw", "(", "data", ")", ":", "N", "=", "len", "(", "data", ")", "return", "N", "*", "np", ".", "sum", "(", "data", "**", "2", ")", "/", "np", ".", "sum", "(", "data", ")", "**", "2" ]
r"""Computes the equivalent noise bandwidth .. math:: ENBW = N \frac{\sum_{n=1}^{N} w_n^2}{\left(\sum_{n=1}^{N} w_n \right)^2} .. doctest:: >>> from spectrum import create_window, enbw >>> w = create_window(64, 'rectangular') >>> enbw(w) 1.0 The following table contains the ENBW values for some of the implemented windows in this module (with N=16384). They have been double checked against litterature (Source: [Harris]_, [Marple]_). If not present, it means that it has not been checked. =================== ============ ============= name ENBW litterature =================== ============ ============= rectangular 1. 1. triangle 1.3334 1.33 Hann 1.5001 1.5 Hamming 1.3629 1.36 blackman 1.7268 1.73 kaiser 1.7 blackmanharris,4 2.004 2. riesz 1.2000 1.2 riemann 1.32 1.3 parzen 1.917 1.92 tukey 0.25 1.102 1.1 bohman 1.7858 1.79 poisson 2 1.3130 1.3 hanningpoisson 0.5 1.609 1.61 cauchy 1.489 1.48 lanczos 1.3 =================== ============ =============
[ "r", "Computes", "the", "equivalent", "noise", "bandwidth" ]
python
valid
33.581395
janpipek/physt
physt/special.py
https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/special.py#L380-L412
def spherical_histogram(data=None, radial_bins="numpy", theta_bins=16, phi_bins=16, transformed=False, *args, **kwargs): """Facade construction function for the SphericalHistogram. """ dropna = kwargs.pop("dropna", True) data = _prepare_data(data, transformed=transformed, klass=SphericalHistogram, dropna=dropna) if isinstance(theta_bins, int): theta_range = (0, np.pi) if "theta_range" in "kwargs": theta_range = kwargs["theta_range"] elif "range" in "kwargs": theta_range = kwargs["range"][1] theta_range = list(theta_range) + [theta_bins + 1] theta_bins = np.linspace(*theta_range) if isinstance(phi_bins, int): phi_range = (0, 2 * np.pi) if "phi_range" in "kwargs": phi_range = kwargs["phi_range"] elif "range" in "kwargs": phi_range = kwargs["range"][2] phi_range = list(phi_range) + [phi_bins + 1] phi_bins = np.linspace(*phi_range) bin_schemas = binnings.calculate_bins_nd(data, [radial_bins, theta_bins, phi_bins], *args, check_nan=not dropna, **kwargs) weights = kwargs.pop("weights", None) frequencies, errors2, missed = histogram_nd.calculate_frequencies(data, ndim=3, binnings=bin_schemas, weights=weights) return SphericalHistogram(binnings=bin_schemas, frequencies=frequencies, errors2=errors2, missed=missed)
[ "def", "spherical_histogram", "(", "data", "=", "None", ",", "radial_bins", "=", "\"numpy\"", ",", "theta_bins", "=", "16", ",", "phi_bins", "=", "16", ",", "transformed", "=", "False", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "dropna", "=", "kwargs", ".", "pop", "(", "\"dropna\"", ",", "True", ")", "data", "=", "_prepare_data", "(", "data", ",", "transformed", "=", "transformed", ",", "klass", "=", "SphericalHistogram", ",", "dropna", "=", "dropna", ")", "if", "isinstance", "(", "theta_bins", ",", "int", ")", ":", "theta_range", "=", "(", "0", ",", "np", ".", "pi", ")", "if", "\"theta_range\"", "in", "\"kwargs\"", ":", "theta_range", "=", "kwargs", "[", "\"theta_range\"", "]", "elif", "\"range\"", "in", "\"kwargs\"", ":", "theta_range", "=", "kwargs", "[", "\"range\"", "]", "[", "1", "]", "theta_range", "=", "list", "(", "theta_range", ")", "+", "[", "theta_bins", "+", "1", "]", "theta_bins", "=", "np", ".", "linspace", "(", "*", "theta_range", ")", "if", "isinstance", "(", "phi_bins", ",", "int", ")", ":", "phi_range", "=", "(", "0", ",", "2", "*", "np", ".", "pi", ")", "if", "\"phi_range\"", "in", "\"kwargs\"", ":", "phi_range", "=", "kwargs", "[", "\"phi_range\"", "]", "elif", "\"range\"", "in", "\"kwargs\"", ":", "phi_range", "=", "kwargs", "[", "\"range\"", "]", "[", "2", "]", "phi_range", "=", "list", "(", "phi_range", ")", "+", "[", "phi_bins", "+", "1", "]", "phi_bins", "=", "np", ".", "linspace", "(", "*", "phi_range", ")", "bin_schemas", "=", "binnings", ".", "calculate_bins_nd", "(", "data", ",", "[", "radial_bins", ",", "theta_bins", ",", "phi_bins", "]", ",", "*", "args", ",", "check_nan", "=", "not", "dropna", ",", "*", "*", "kwargs", ")", "weights", "=", "kwargs", ".", "pop", "(", "\"weights\"", ",", "None", ")", "frequencies", ",", "errors2", ",", "missed", "=", "histogram_nd", ".", "calculate_frequencies", "(", "data", ",", "ndim", "=", "3", ",", "binnings", "=", "bin_schemas", ",", "weights", "=", "weights", ")", "return", "SphericalHistogram", "(", "binnings", "=", "bin_schemas", ",", "frequencies", "=", "frequencies", ",", "errors2", "=", "errors2", ",", "missed", "=", "missed", ")" ]
Facade construction function for the SphericalHistogram.
[ "Facade", "construction", "function", "for", "the", "SphericalHistogram", "." ]
python
train
46.727273
apache/incubator-mxnet
python/mxnet/contrib/onnx/onnx2mx/import_onnx.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/import_onnx.py#L41-L74
def _convert_operator(self, node_name, op_name, attrs, inputs): """Convert from onnx operator to mxnet operator. The converter must specify conversions explicitly for incompatible name, and apply handlers to operator attributes. Parameters ---------- :param node_name : str name of the node to be translated. :param op_name : str Operator name, such as Convolution, FullyConnected :param attrs : dict Dict of operator attributes :param inputs: list list of inputs to the operator Returns ------- :return mxnet_sym Converted mxnet symbol """ if op_name in convert_map: op_name, new_attrs, inputs = convert_map[op_name](attrs, inputs, self) else: raise NotImplementedError("Operator {} not implemented.".format(op_name)) if isinstance(op_name, string_types): new_op = getattr(symbol, op_name, None) if not new_op: raise RuntimeError("Unable to map op_name {} to sym".format(op_name)) if node_name is None: mxnet_sym = new_op(*inputs, **new_attrs) else: mxnet_sym = new_op(name=node_name, *inputs, **new_attrs) return mxnet_sym return op_name
[ "def", "_convert_operator", "(", "self", ",", "node_name", ",", "op_name", ",", "attrs", ",", "inputs", ")", ":", "if", "op_name", "in", "convert_map", ":", "op_name", ",", "new_attrs", ",", "inputs", "=", "convert_map", "[", "op_name", "]", "(", "attrs", ",", "inputs", ",", "self", ")", "else", ":", "raise", "NotImplementedError", "(", "\"Operator {} not implemented.\"", ".", "format", "(", "op_name", ")", ")", "if", "isinstance", "(", "op_name", ",", "string_types", ")", ":", "new_op", "=", "getattr", "(", "symbol", ",", "op_name", ",", "None", ")", "if", "not", "new_op", ":", "raise", "RuntimeError", "(", "\"Unable to map op_name {} to sym\"", ".", "format", "(", "op_name", ")", ")", "if", "node_name", "is", "None", ":", "mxnet_sym", "=", "new_op", "(", "*", "inputs", ",", "*", "*", "new_attrs", ")", "else", ":", "mxnet_sym", "=", "new_op", "(", "name", "=", "node_name", ",", "*", "inputs", ",", "*", "*", "new_attrs", ")", "return", "mxnet_sym", "return", "op_name" ]
Convert from onnx operator to mxnet operator. The converter must specify conversions explicitly for incompatible name, and apply handlers to operator attributes. Parameters ---------- :param node_name : str name of the node to be translated. :param op_name : str Operator name, such as Convolution, FullyConnected :param attrs : dict Dict of operator attributes :param inputs: list list of inputs to the operator Returns ------- :return mxnet_sym Converted mxnet symbol
[ "Convert", "from", "onnx", "operator", "to", "mxnet", "operator", ".", "The", "converter", "must", "specify", "conversions", "explicitly", "for", "incompatible", "name", "and", "apply", "handlers", "to", "operator", "attributes", "." ]
python
train
39.264706
mbedmicro/pyOCD
pyocd/coresight/cortex_m.py
https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/coresight/cortex_m.py#L467-L476
def default_software_reset_type(self, reset_type): """! @brief Modify the default software reset method. @param self @param reset_type Must be one of the software reset types: Target.ResetType.SW_SYSRESETREQ, Target.ResetType.SW_VECTRESET, or Target.ResetType.SW_EMULATED. """ assert isinstance(reset_type, Target.ResetType) assert reset_type in (Target.ResetType.SW_SYSRESETREQ, Target.ResetType.SW_VECTRESET, Target.ResetType.SW_EMULATED) self._default_software_reset_type = reset_type
[ "def", "default_software_reset_type", "(", "self", ",", "reset_type", ")", ":", "assert", "isinstance", "(", "reset_type", ",", "Target", ".", "ResetType", ")", "assert", "reset_type", "in", "(", "Target", ".", "ResetType", ".", "SW_SYSRESETREQ", ",", "Target", ".", "ResetType", ".", "SW_VECTRESET", ",", "Target", ".", "ResetType", ".", "SW_EMULATED", ")", "self", ".", "_default_software_reset_type", "=", "reset_type" ]
! @brief Modify the default software reset method. @param self @param reset_type Must be one of the software reset types: Target.ResetType.SW_SYSRESETREQ, Target.ResetType.SW_VECTRESET, or Target.ResetType.SW_EMULATED.
[ "!" ]
python
train
57.8
zetaops/pyoko
pyoko/model.py
https://github.com/zetaops/pyoko/blob/236c509ad85640933ac0f89ad8f7ed95f62adf07/pyoko/model.py#L169-L183
def get_choices_for(self, field): """ Get the choices for the given fields. Args: field (str): Name of field. Returns: List of tuples. [(name, value),...] """ choices = self._fields[field].choices if isinstance(choices, six.string_types): return [(d['value'], d['name']) for d in self._choices_manager.get_all(choices)] else: return choices
[ "def", "get_choices_for", "(", "self", ",", "field", ")", ":", "choices", "=", "self", ".", "_fields", "[", "field", "]", ".", "choices", "if", "isinstance", "(", "choices", ",", "six", ".", "string_types", ")", ":", "return", "[", "(", "d", "[", "'value'", "]", ",", "d", "[", "'name'", "]", ")", "for", "d", "in", "self", ".", "_choices_manager", ".", "get_all", "(", "choices", ")", "]", "else", ":", "return", "choices" ]
Get the choices for the given fields. Args: field (str): Name of field. Returns: List of tuples. [(name, value),...]
[ "Get", "the", "choices", "for", "the", "given", "fields", "." ]
python
train
29.333333
pyca/pyopenssl
src/OpenSSL/crypto.py
https://github.com/pyca/pyopenssl/blob/1fbe064c50fd030948141d7d630673761525b0d0/src/OpenSSL/crypto.py#L1628-L1648
def set_time(self, vfy_time): """ Set the time against which the certificates are verified. Normally the current time is used. .. note:: For example, you can determine if a certificate was valid at a given time. .. versionadded:: 17.0.0 :param datetime vfy_time: The verification time to set on this store. :return: ``None`` if the verification time was successfully set. """ param = _lib.X509_VERIFY_PARAM_new() param = _ffi.gc(param, _lib.X509_VERIFY_PARAM_free) _lib.X509_VERIFY_PARAM_set_time(param, int(vfy_time.strftime('%s'))) _openssl_assert(_lib.X509_STORE_set1_param(self._store, param) != 0)
[ "def", "set_time", "(", "self", ",", "vfy_time", ")", ":", "param", "=", "_lib", ".", "X509_VERIFY_PARAM_new", "(", ")", "param", "=", "_ffi", ".", "gc", "(", "param", ",", "_lib", ".", "X509_VERIFY_PARAM_free", ")", "_lib", ".", "X509_VERIFY_PARAM_set_time", "(", "param", ",", "int", "(", "vfy_time", ".", "strftime", "(", "'%s'", ")", ")", ")", "_openssl_assert", "(", "_lib", ".", "X509_STORE_set1_param", "(", "self", ".", "_store", ",", "param", ")", "!=", "0", ")" ]
Set the time against which the certificates are verified. Normally the current time is used. .. note:: For example, you can determine if a certificate was valid at a given time. .. versionadded:: 17.0.0 :param datetime vfy_time: The verification time to set on this store. :return: ``None`` if the verification time was successfully set.
[ "Set", "the", "time", "against", "which", "the", "certificates", "are", "verified", "." ]
python
test
33.52381
mozilla/mozilla-django-oidc
mozilla_django_oidc/auth.py
https://github.com/mozilla/mozilla-django-oidc/blob/e780130deacccbafc85a92f48d1407e042f5f955/mozilla_django_oidc/auth.py#L30-L46
def default_username_algo(email): """Generate username for the Django user. :arg str/unicode email: the email address to use to generate a username :returns: str/unicode """ # bluntly stolen from django-browserid # store the username as a base64 encoded sha224 of the email address # this protects against data leakage because usernames are often # treated as public identifiers (so we can't use the email address). username = base64.urlsafe_b64encode( hashlib.sha1(force_bytes(email)).digest() ).rstrip(b'=') return smart_text(username)
[ "def", "default_username_algo", "(", "email", ")", ":", "# bluntly stolen from django-browserid", "# store the username as a base64 encoded sha224 of the email address", "# this protects against data leakage because usernames are often", "# treated as public identifiers (so we can't use the email address).", "username", "=", "base64", ".", "urlsafe_b64encode", "(", "hashlib", ".", "sha1", "(", "force_bytes", "(", "email", ")", ")", ".", "digest", "(", ")", ")", ".", "rstrip", "(", "b'='", ")", "return", "smart_text", "(", "username", ")" ]
Generate username for the Django user. :arg str/unicode email: the email address to use to generate a username :returns: str/unicode
[ "Generate", "username", "for", "the", "Django", "user", "." ]
python
train
33.941176
datosgobar/pydatajson
pydatajson/federation.py
https://github.com/datosgobar/pydatajson/blob/3141082ffbaa295e2deaf6ffbbc5a59f5859960e/pydatajson/federation.py#L347-L369
def push_new_themes(catalog, portal_url, apikey): """Toma un catálogo y escribe los temas de la taxonomía que no están presentes. Args: catalog (DataJson): El catálogo de origen que contiene la taxonomía. portal_url (str): La URL del portal CKAN de destino. apikey (str): La apikey de un usuario con los permisos que le permitan crear o actualizar los temas. Returns: str: Los ids de los temas creados. """ ckan_portal = RemoteCKAN(portal_url, apikey=apikey) existing_themes = ckan_portal.call_action('group_list') new_themes = [theme['id'] for theme in catalog[ 'themeTaxonomy'] if theme['id'] not in existing_themes] pushed_names = [] for new_theme in new_themes: name = push_theme_to_ckan( catalog, portal_url, apikey, identifier=new_theme) pushed_names.append(name) return pushed_names
[ "def", "push_new_themes", "(", "catalog", ",", "portal_url", ",", "apikey", ")", ":", "ckan_portal", "=", "RemoteCKAN", "(", "portal_url", ",", "apikey", "=", "apikey", ")", "existing_themes", "=", "ckan_portal", ".", "call_action", "(", "'group_list'", ")", "new_themes", "=", "[", "theme", "[", "'id'", "]", "for", "theme", "in", "catalog", "[", "'themeTaxonomy'", "]", "if", "theme", "[", "'id'", "]", "not", "in", "existing_themes", "]", "pushed_names", "=", "[", "]", "for", "new_theme", "in", "new_themes", ":", "name", "=", "push_theme_to_ckan", "(", "catalog", ",", "portal_url", ",", "apikey", ",", "identifier", "=", "new_theme", ")", "pushed_names", ".", "append", "(", "name", ")", "return", "pushed_names" ]
Toma un catálogo y escribe los temas de la taxonomía que no están presentes. Args: catalog (DataJson): El catálogo de origen que contiene la taxonomía. portal_url (str): La URL del portal CKAN de destino. apikey (str): La apikey de un usuario con los permisos que le permitan crear o actualizar los temas. Returns: str: Los ids de los temas creados.
[ "Toma", "un", "catálogo", "y", "escribe", "los", "temas", "de", "la", "taxonomía", "que", "no", "están", "presentes", "." ]
python
train
40.695652
pymc-devs/pymc
pymc/gp/GPutils.py
https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/gp/GPutils.py#L150-L159
def vecs_to_datmesh(x, y): """ Converts input arguments x and y to a 2d meshgrid, suitable for calling Means, Covariances and Realizations. """ x, y = meshgrid(x, y) out = zeros(x.shape + (2,), dtype=float) out[:, :, 0] = x out[:, :, 1] = y return out
[ "def", "vecs_to_datmesh", "(", "x", ",", "y", ")", ":", "x", ",", "y", "=", "meshgrid", "(", "x", ",", "y", ")", "out", "=", "zeros", "(", "x", ".", "shape", "+", "(", "2", ",", ")", ",", "dtype", "=", "float", ")", "out", "[", ":", ",", ":", ",", "0", "]", "=", "x", "out", "[", ":", ",", ":", ",", "1", "]", "=", "y", "return", "out" ]
Converts input arguments x and y to a 2d meshgrid, suitable for calling Means, Covariances and Realizations.
[ "Converts", "input", "arguments", "x", "and", "y", "to", "a", "2d", "meshgrid", "suitable", "for", "calling", "Means", "Covariances", "and", "Realizations", "." ]
python
train
27.8
joestump/python-oauth2
oauth2/__init__.py
https://github.com/joestump/python-oauth2/blob/b94f69b1ad195513547924e380d9265133e995fa/oauth2/__init__.py#L388-L391
def get_nonoauth_parameters(self): """Get any non-OAuth parameters.""" return dict([(k, v) for k, v in self.items() if not k.startswith('oauth_')])
[ "def", "get_nonoauth_parameters", "(", "self", ")", ":", "return", "dict", "(", "[", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "self", ".", "items", "(", ")", "if", "not", "k", ".", "startswith", "(", "'oauth_'", ")", "]", ")" ]
Get any non-OAuth parameters.
[ "Get", "any", "non", "-", "OAuth", "parameters", "." ]
python
train
45.25
elifesciences/proofreader-python
proofreader/utils/print_table.py
https://github.com/elifesciences/proofreader-python/blob/387b3c65ee7777e26b3a7340179dc4ed68f24f58/proofreader/utils/print_table.py#L64-L86
def _row_to_str(self, row): # type: (List[str]) -> str """Converts a list of strings to a correctly spaced and formatted row string. e.g. ['some', 'foo', 'bar'] --> '| some | foo | bar |' :param row: list :return: str """ _row_text = '' for col, width in self.col_widths.items(): _row_text += self.COLUMN_SEP l_pad, r_pad = self._split_int(width - len(row[col])) _row_text += '{0}{1}{2}'.format(' ' * (l_pad + self.PADDING), row[col], ' ' * (r_pad + self.PADDING)) _row_text += self.COLUMN_SEP + '\n' return _row_text
[ "def", "_row_to_str", "(", "self", ",", "row", ")", ":", "# type: (List[str]) -> str", "_row_text", "=", "''", "for", "col", ",", "width", "in", "self", ".", "col_widths", ".", "items", "(", ")", ":", "_row_text", "+=", "self", ".", "COLUMN_SEP", "l_pad", ",", "r_pad", "=", "self", ".", "_split_int", "(", "width", "-", "len", "(", "row", "[", "col", "]", ")", ")", "_row_text", "+=", "'{0}{1}{2}'", ".", "format", "(", "' '", "*", "(", "l_pad", "+", "self", ".", "PADDING", ")", ",", "row", "[", "col", "]", ",", "' '", "*", "(", "r_pad", "+", "self", ".", "PADDING", ")", ")", "_row_text", "+=", "self", ".", "COLUMN_SEP", "+", "'\\n'", "return", "_row_text" ]
Converts a list of strings to a correctly spaced and formatted row string. e.g. ['some', 'foo', 'bar'] --> '| some | foo | bar |' :param row: list :return: str
[ "Converts", "a", "list", "of", "strings", "to", "a", "correctly", "spaced", "and", "formatted", "row", "string", "." ]
python
train
31.391304
agoragames/haigha
haigha/classes/basic_class.py
https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/classes/basic_class.py#L96-L106
def qos(self, prefetch_size=0, prefetch_count=0, is_global=False): ''' Set QoS on this channel. ''' args = Writer() args.write_long(prefetch_size).\ write_short(prefetch_count).\ write_bit(is_global) self.send_frame(MethodFrame(self.channel_id, 60, 10, args)) self.channel.add_synchronous_cb(self._recv_qos_ok)
[ "def", "qos", "(", "self", ",", "prefetch_size", "=", "0", ",", "prefetch_count", "=", "0", ",", "is_global", "=", "False", ")", ":", "args", "=", "Writer", "(", ")", "args", ".", "write_long", "(", "prefetch_size", ")", ".", "write_short", "(", "prefetch_count", ")", ".", "write_bit", "(", "is_global", ")", "self", ".", "send_frame", "(", "MethodFrame", "(", "self", ".", "channel_id", ",", "60", ",", "10", ",", "args", ")", ")", "self", ".", "channel", ".", "add_synchronous_cb", "(", "self", ".", "_recv_qos_ok", ")" ]
Set QoS on this channel.
[ "Set", "QoS", "on", "this", "channel", "." ]
python
train
34.636364
kajala/django-jutil
jutil/admin.py
https://github.com/kajala/django-jutil/blob/2abd93ebad51042744eaeb1ee1074ed0eb55ad0c/jutil/admin.py#L65-L73
def kw_changelist_view(self, request: HttpRequest, extra_context=None, **kw): """ Changelist view which allow key-value arguments. :param request: HttpRequest :param extra_context: Extra context dict :param kw: Key-value dict :return: See changelist_view() """ return self.changelist_view(request, extra_context)
[ "def", "kw_changelist_view", "(", "self", ",", "request", ":", "HttpRequest", ",", "extra_context", "=", "None", ",", "*", "*", "kw", ")", ":", "return", "self", ".", "changelist_view", "(", "request", ",", "extra_context", ")" ]
Changelist view which allow key-value arguments. :param request: HttpRequest :param extra_context: Extra context dict :param kw: Key-value dict :return: See changelist_view()
[ "Changelist", "view", "which", "allow", "key", "-", "value", "arguments", ".", ":", "param", "request", ":", "HttpRequest", ":", "param", "extra_context", ":", "Extra", "context", "dict", ":", "param", "kw", ":", "Key", "-", "value", "dict", ":", "return", ":", "See", "changelist_view", "()" ]
python
train
40.888889
bitesofcode/projexui
projexui/widgets/xviewwidget/xviewprofiletoolbar.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xviewwidget/xviewprofiletoolbar.py#L461-L493
def setCurrentProfile(self, prof): """ Sets the current profile for this toolbar to the inputed profile. :param prof | <projexui.widgets.xviewwidget.XViewProfile> || <str> """ if prof is None: self.clearActive() return # loop through the profiles looking for a match profile = None blocked = self.signalsBlocked() self.blockSignals(True) for act in self._profileGroup.actions(): if prof in (act.profile(), act.profile().name()): act.setChecked(True) profile = act.profile() else: act.setChecked(False) self.blockSignals(blocked) # update the current profile if profile == self._currentProfile and not self._viewWidget.isEmpty(): return self._currentProfile = profile if self._viewWidget and profile and not blocked: self._viewWidget.restoreProfile(profile) if not blocked: self.loadProfileFinished.emit(profile) self.currentProfileChanged.emit(profile)
[ "def", "setCurrentProfile", "(", "self", ",", "prof", ")", ":", "if", "prof", "is", "None", ":", "self", ".", "clearActive", "(", ")", "return", "# loop through the profiles looking for a match\r", "profile", "=", "None", "blocked", "=", "self", ".", "signalsBlocked", "(", ")", "self", ".", "blockSignals", "(", "True", ")", "for", "act", "in", "self", ".", "_profileGroup", ".", "actions", "(", ")", ":", "if", "prof", "in", "(", "act", ".", "profile", "(", ")", ",", "act", ".", "profile", "(", ")", ".", "name", "(", ")", ")", ":", "act", ".", "setChecked", "(", "True", ")", "profile", "=", "act", ".", "profile", "(", ")", "else", ":", "act", ".", "setChecked", "(", "False", ")", "self", ".", "blockSignals", "(", "blocked", ")", "# update the current profile\r", "if", "profile", "==", "self", ".", "_currentProfile", "and", "not", "self", ".", "_viewWidget", ".", "isEmpty", "(", ")", ":", "return", "self", ".", "_currentProfile", "=", "profile", "if", "self", ".", "_viewWidget", "and", "profile", "and", "not", "blocked", ":", "self", ".", "_viewWidget", ".", "restoreProfile", "(", "profile", ")", "if", "not", "blocked", ":", "self", ".", "loadProfileFinished", ".", "emit", "(", "profile", ")", "self", ".", "currentProfileChanged", ".", "emit", "(", "profile", ")" ]
Sets the current profile for this toolbar to the inputed profile. :param prof | <projexui.widgets.xviewwidget.XViewProfile> || <str>
[ "Sets", "the", "current", "profile", "for", "this", "toolbar", "to", "the", "inputed", "profile", ".", ":", "param", "prof", "|", "<projexui", ".", "widgets", ".", "xviewwidget", ".", "XViewProfile", ">", "||", "<str", ">" ]
python
train
35.575758
flo-compbio/goparser
goparser/parser.py
https://github.com/flo-compbio/goparser/blob/5e27d7d04a26a70a1d9dc113357041abff72be3f/goparser/parser.py#L305-L318
def clear_annotation_data(self): """Clear annotation data. Parameters ---------- Returns ------- None """ self.genes = set() self.annotations = [] self.term_annotations = {} self.gene_annotations = {}
[ "def", "clear_annotation_data", "(", "self", ")", ":", "self", ".", "genes", "=", "set", "(", ")", "self", ".", "annotations", "=", "[", "]", "self", ".", "term_annotations", "=", "{", "}", "self", ".", "gene_annotations", "=", "{", "}" ]
Clear annotation data. Parameters ---------- Returns ------- None
[ "Clear", "annotation", "data", "." ]
python
train
19.785714
oscarlazoarjona/fast
fast/inhomo.py
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/inhomo.py#L442-L496
def fast_maxwell_boltzmann(mass, file_name=None, return_code=False): r"""Return a function that returns values of a Maxwell-Boltzmann distribution. >>> from fast import Atom >>> mass = Atom("Rb", 87).mass >>> f = fast_maxwell_boltzmann(mass) >>> print f(0, 273.15+20) 0.00238221482739 >>> import numpy as np >>> v = np.linspace(-600, 600, 101) >>> dist = f(v, 273.15+20) >>> dv = v[1]-v[0] >>> print sum(dist)*dv 0.999704711134 """ # We get the mass of the atom. code = "" code = "def maxwell_boltzmann(v, T):\n" code += ' r"""A fast calculation of the' code += ' Maxwell-Boltzmann distribution."""\n' code += " if hasattr(v, 'shape'):\n" code += " d = 1\n" code += " m = %s\n" % mass code += " f = np.sqrt(m/2/np.pi/k_B_num/T)**d\n" code += " f = f * np.exp(-m*v**2/2/k_B_num/T)\n" code += " return f\n" code += " elif hasattr(v, '__len__'):\n" code += " d = len(v)\n" code += " m = %s\n" % mass code += " f = np.sqrt(m/2/np.pi/k_B_num/T)**d\n" code += " vsquare = sum([v[i]**2 for i in range(d)])\n" code += " f = f * np.exp(-m*vsquare/2/k_B_num/T)\n" code += " return f\n" code += " else:\n" code += " d = 1\n" code += " m = %s\n" % mass code += " f = np.sqrt(m/2/np.pi/k_B_num/T)**d\n" code += " f = f * np.exp(-m*v**2/2/k_B_num/T)\n" code += " return f\n" # We write the code to file if provided, and execute it. if file_name is not None: f = file(file_name+".py", "w") f.write(code) f.close() maxwell_boltzmann = code if not return_code: exec maxwell_boltzmann return maxwell_boltzmann
[ "def", "fast_maxwell_boltzmann", "(", "mass", ",", "file_name", "=", "None", ",", "return_code", "=", "False", ")", ":", "# We get the mass of the atom.", "code", "=", "\"\"", "code", "=", "\"def maxwell_boltzmann(v, T):\\n\"", "code", "+=", "' r\"\"\"A fast calculation of the'", "code", "+=", "' Maxwell-Boltzmann distribution.\"\"\"\\n'", "code", "+=", "\" if hasattr(v, 'shape'):\\n\"", "code", "+=", "\" d = 1\\n\"", "code", "+=", "\" m = %s\\n\"", "%", "mass", "code", "+=", "\" f = np.sqrt(m/2/np.pi/k_B_num/T)**d\\n\"", "code", "+=", "\" f = f * np.exp(-m*v**2/2/k_B_num/T)\\n\"", "code", "+=", "\" return f\\n\"", "code", "+=", "\" elif hasattr(v, '__len__'):\\n\"", "code", "+=", "\" d = len(v)\\n\"", "code", "+=", "\" m = %s\\n\"", "%", "mass", "code", "+=", "\" f = np.sqrt(m/2/np.pi/k_B_num/T)**d\\n\"", "code", "+=", "\" vsquare = sum([v[i]**2 for i in range(d)])\\n\"", "code", "+=", "\" f = f * np.exp(-m*vsquare/2/k_B_num/T)\\n\"", "code", "+=", "\" return f\\n\"", "code", "+=", "\" else:\\n\"", "code", "+=", "\" d = 1\\n\"", "code", "+=", "\" m = %s\\n\"", "%", "mass", "code", "+=", "\" f = np.sqrt(m/2/np.pi/k_B_num/T)**d\\n\"", "code", "+=", "\" f = f * np.exp(-m*v**2/2/k_B_num/T)\\n\"", "code", "+=", "\" return f\\n\"", "# We write the code to file if provided, and execute it.", "if", "file_name", "is", "not", "None", ":", "f", "=", "file", "(", "file_name", "+", "\".py\"", ",", "\"w\"", ")", "f", ".", "write", "(", "code", ")", "f", ".", "close", "(", ")", "maxwell_boltzmann", "=", "code", "if", "not", "return_code", ":", "exec", "maxwell_boltzmann", "return", "maxwell_boltzmann" ]
r"""Return a function that returns values of a Maxwell-Boltzmann distribution. >>> from fast import Atom >>> mass = Atom("Rb", 87).mass >>> f = fast_maxwell_boltzmann(mass) >>> print f(0, 273.15+20) 0.00238221482739 >>> import numpy as np >>> v = np.linspace(-600, 600, 101) >>> dist = f(v, 273.15+20) >>> dv = v[1]-v[0] >>> print sum(dist)*dv 0.999704711134
[ "r", "Return", "a", "function", "that", "returns", "values", "of", "a", "Maxwell", "-", "Boltzmann", "distribution", "." ]
python
train
32.690909
scieloorg/porteira
porteira/porteira.py
https://github.com/scieloorg/porteira/blob/e61f7d248b16848e63b2f85f37125aa77aba0366/porteira/porteira.py#L75-L79
def deserialize(self, xml_input, *args, **kwargs): """ Convert XML to dict object """ return xmltodict.parse(xml_input, *args, **kwargs)
[ "def", "deserialize", "(", "self", ",", "xml_input", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "xmltodict", ".", "parse", "(", "xml_input", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Convert XML to dict object
[ "Convert", "XML", "to", "dict", "object" ]
python
train
32.8
cohorte/cohorte-herald
python/snippets/herald_irc/bonus.py
https://github.com/cohorte/cohorte-herald/blob/bb3445d0031c8b3abad71e6219cc559b49faa3ee/python/snippets/herald_irc/bonus.py#L20-L27
def cmd_join(self, connection, sender, target, payload): """ Asks the bot to join a channel """ if payload: connection.join(payload) else: raise ValueError("No channel given")
[ "def", "cmd_join", "(", "self", ",", "connection", ",", "sender", ",", "target", ",", "payload", ")", ":", "if", "payload", ":", "connection", ".", "join", "(", "payload", ")", "else", ":", "raise", "ValueError", "(", "\"No channel given\"", ")" ]
Asks the bot to join a channel
[ "Asks", "the", "bot", "to", "join", "a", "channel" ]
python
train
29
ga4gh/ga4gh-server
ga4gh/server/sqlite_backend.py
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/sqlite_backend.py#L13-L21
def sqliteRowsToDicts(sqliteRows): """ Unpacks sqlite rows as returned by fetchall into an array of simple dicts. :param sqliteRows: array of rows returned from fetchall DB call :return: array of dicts, keyed by the column names. """ return map(lambda r: dict(zip(r.keys(), r)), sqliteRows)
[ "def", "sqliteRowsToDicts", "(", "sqliteRows", ")", ":", "return", "map", "(", "lambda", "r", ":", "dict", "(", "zip", "(", "r", ".", "keys", "(", ")", ",", "r", ")", ")", ",", "sqliteRows", ")" ]
Unpacks sqlite rows as returned by fetchall into an array of simple dicts. :param sqliteRows: array of rows returned from fetchall DB call :return: array of dicts, keyed by the column names.
[ "Unpacks", "sqlite", "rows", "as", "returned", "by", "fetchall", "into", "an", "array", "of", "simple", "dicts", "." ]
python
train
34.666667
gwastro/pycbc
pycbc/inject/inject.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/inject/inject.py#L660-L699
def make_strain_from_inj_object(self, inj, delta_t, detector_name, distance_scale=1): """Make a h(t) strain time-series from an injection object as read from an hdf file. Parameters ----------- inj : injection object The injection object to turn into a strain h(t). delta_t : float Sample rate to make injection at. detector_name : string Name of the detector used for projecting injections. distance_scale: float, optional Factor to scale the distance of an injection with. The default (=1) is no scaling. Returns -------- signal : float h(t) corresponding to the injection. """ detector = Detector(detector_name) # compute the waveform time series hp, hc = ringdown_td_approximants[inj['approximant']]( inj, delta_t=delta_t, **self.extra_args) hp._epoch += inj['tc'] hc._epoch += inj['tc'] if distance_scale != 1: hp /= distance_scale hc /= distance_scale # compute the detector response and add it to the strain signal = detector.project_wave(hp, hc, inj['ra'], inj['dec'], inj['polarization']) return signal
[ "def", "make_strain_from_inj_object", "(", "self", ",", "inj", ",", "delta_t", ",", "detector_name", ",", "distance_scale", "=", "1", ")", ":", "detector", "=", "Detector", "(", "detector_name", ")", "# compute the waveform time series", "hp", ",", "hc", "=", "ringdown_td_approximants", "[", "inj", "[", "'approximant'", "]", "]", "(", "inj", ",", "delta_t", "=", "delta_t", ",", "*", "*", "self", ".", "extra_args", ")", "hp", ".", "_epoch", "+=", "inj", "[", "'tc'", "]", "hc", ".", "_epoch", "+=", "inj", "[", "'tc'", "]", "if", "distance_scale", "!=", "1", ":", "hp", "/=", "distance_scale", "hc", "/=", "distance_scale", "# compute the detector response and add it to the strain", "signal", "=", "detector", ".", "project_wave", "(", "hp", ",", "hc", ",", "inj", "[", "'ra'", "]", ",", "inj", "[", "'dec'", "]", ",", "inj", "[", "'polarization'", "]", ")", "return", "signal" ]
Make a h(t) strain time-series from an injection object as read from an hdf file. Parameters ----------- inj : injection object The injection object to turn into a strain h(t). delta_t : float Sample rate to make injection at. detector_name : string Name of the detector used for projecting injections. distance_scale: float, optional Factor to scale the distance of an injection with. The default (=1) is no scaling. Returns -------- signal : float h(t) corresponding to the injection.
[ "Make", "a", "h", "(", "t", ")", "strain", "time", "-", "series", "from", "an", "injection", "object", "as", "read", "from", "an", "hdf", "file", "." ]
python
train
33
mmcauliffe/Conch-sounds
conch/main.py
https://github.com/mmcauliffe/Conch-sounds/blob/e05535fd08e4b0e47e37a77ef521d05eff1d6bc5/conch/main.py#L76-L130
def acoustic_similarity_directories(directories, analysis_function, distance_function, stop_check=None, call_back=None, multiprocessing=True): """ Analyze many directories. Parameters ---------- directories : list of str List of fully specified paths to the directories to be analyzed """ files = [] if call_back is not None: call_back('Mapping directories...') call_back(0, len(directories)) cur = 0 for d in directories: if not os.path.isdir(d): continue if stop_check is not None and stop_check(): return if call_back is not None: cur += 1 if cur % 3 == 0: call_back(cur) files += [os.path.join(d, x) for x in os.listdir(d) if x.lower().endswith('.wav')] if len(files) == 0: raise (ConchError("The directories specified do not contain any wav files")) if call_back is not None: call_back('Mapping directories...') call_back(0, len(files) * len(files)) cur = 0 path_mapping = list() for x in files: for y in files: if stop_check is not None and stop_check(): return if call_back is not None: cur += 1 if cur % 20 == 0: call_back(cur) if not x.lower().endswith('.wav'): continue if not y.lower().endswith('.wav'): continue if x == y: continue path_mapping.append((x, y)) result = acoustic_similarity_mapping(path_mapping, analysis_function, distance_function, stop_check, call_back, multiprocessing) return result
[ "def", "acoustic_similarity_directories", "(", "directories", ",", "analysis_function", ",", "distance_function", ",", "stop_check", "=", "None", ",", "call_back", "=", "None", ",", "multiprocessing", "=", "True", ")", ":", "files", "=", "[", "]", "if", "call_back", "is", "not", "None", ":", "call_back", "(", "'Mapping directories...'", ")", "call_back", "(", "0", ",", "len", "(", "directories", ")", ")", "cur", "=", "0", "for", "d", "in", "directories", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "d", ")", ":", "continue", "if", "stop_check", "is", "not", "None", "and", "stop_check", "(", ")", ":", "return", "if", "call_back", "is", "not", "None", ":", "cur", "+=", "1", "if", "cur", "%", "3", "==", "0", ":", "call_back", "(", "cur", ")", "files", "+=", "[", "os", ".", "path", ".", "join", "(", "d", ",", "x", ")", "for", "x", "in", "os", ".", "listdir", "(", "d", ")", "if", "x", ".", "lower", "(", ")", ".", "endswith", "(", "'.wav'", ")", "]", "if", "len", "(", "files", ")", "==", "0", ":", "raise", "(", "ConchError", "(", "\"The directories specified do not contain any wav files\"", ")", ")", "if", "call_back", "is", "not", "None", ":", "call_back", "(", "'Mapping directories...'", ")", "call_back", "(", "0", ",", "len", "(", "files", ")", "*", "len", "(", "files", ")", ")", "cur", "=", "0", "path_mapping", "=", "list", "(", ")", "for", "x", "in", "files", ":", "for", "y", "in", "files", ":", "if", "stop_check", "is", "not", "None", "and", "stop_check", "(", ")", ":", "return", "if", "call_back", "is", "not", "None", ":", "cur", "+=", "1", "if", "cur", "%", "20", "==", "0", ":", "call_back", "(", "cur", ")", "if", "not", "x", ".", "lower", "(", ")", ".", "endswith", "(", "'.wav'", ")", ":", "continue", "if", "not", "y", ".", "lower", "(", ")", ".", "endswith", "(", "'.wav'", ")", ":", "continue", "if", "x", "==", "y", ":", "continue", "path_mapping", ".", "append", "(", "(", "x", ",", "y", ")", ")", "result", "=", "acoustic_similarity_mapping", "(", "path_mapping", ",", "analysis_function", ",", "distance_function", ",", "stop_check", ",", "call_back", ",", "multiprocessing", ")", "return", "result" ]
Analyze many directories. Parameters ---------- directories : list of str List of fully specified paths to the directories to be analyzed
[ "Analyze", "many", "directories", "." ]
python
train
30.618182
SuLab/WikidataIntegrator
wikidataintegrator/wdi_fastrun.py
https://github.com/SuLab/WikidataIntegrator/blob/8ceb2ed1c08fec070ec9edfcf7db7b8691481b62/wikidataintegrator/wdi_fastrun.py#L504-L510
def clear(self): """ convinience function to empty this fastrun container """ self.prop_dt_map = dict() self.prop_data = dict() self.rev_lookup = defaultdict(set)
[ "def", "clear", "(", "self", ")", ":", "self", ".", "prop_dt_map", "=", "dict", "(", ")", "self", ".", "prop_data", "=", "dict", "(", ")", "self", ".", "rev_lookup", "=", "defaultdict", "(", "set", ")" ]
convinience function to empty this fastrun container
[ "convinience", "function", "to", "empty", "this", "fastrun", "container" ]
python
train
29.142857
aio-libs/aiomcache
aiomcache/client.py
https://github.com/aio-libs/aiomcache/blob/75d44b201aea91bc2856b10940922d5ebfbfcd7b/aiomcache/client.py#L415-L422
def flush_all(self, conn): """Its effect is to invalidate all existing items immediately""" command = b'flush_all\r\n' response = yield from self._execute_simple_command( conn, command) if const.OK != response: raise ClientException('Memcached flush_all failed', response)
[ "def", "flush_all", "(", "self", ",", "conn", ")", ":", "command", "=", "b'flush_all\\r\\n'", "response", "=", "yield", "from", "self", ".", "_execute_simple_command", "(", "conn", ",", "command", ")", "if", "const", ".", "OK", "!=", "response", ":", "raise", "ClientException", "(", "'Memcached flush_all failed'", ",", "response", ")" ]
Its effect is to invalidate all existing items immediately
[ "Its", "effect", "is", "to", "invalidate", "all", "existing", "items", "immediately" ]
python
train
40.25
tanghaibao/jcvi
jcvi/algorithms/formula.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/algorithms/formula.py#L96-L114
def spearmanr(x, y): """ Michiel de Hoon's library (available in BioPython or standalone as PyCluster) returns Spearman rsb which does include a tie correction. >>> x = [5.05, 6.75, 3.21, 2.66] >>> y = [1.65, 26.5, -5.93, 7.96] >>> z = [1.65, 2.64, 2.64, 6.95] >>> round(spearmanr(x, y), 4) 0.4 >>> round(spearmanr(x, z), 4) -0.6325 """ from scipy import stats if not x or not y: return 0 corr, pvalue = stats.spearmanr(x, y) return corr
[ "def", "spearmanr", "(", "x", ",", "y", ")", ":", "from", "scipy", "import", "stats", "if", "not", "x", "or", "not", "y", ":", "return", "0", "corr", ",", "pvalue", "=", "stats", ".", "spearmanr", "(", "x", ",", "y", ")", "return", "corr" ]
Michiel de Hoon's library (available in BioPython or standalone as PyCluster) returns Spearman rsb which does include a tie correction. >>> x = [5.05, 6.75, 3.21, 2.66] >>> y = [1.65, 26.5, -5.93, 7.96] >>> z = [1.65, 2.64, 2.64, 6.95] >>> round(spearmanr(x, y), 4) 0.4 >>> round(spearmanr(x, z), 4) -0.6325
[ "Michiel", "de", "Hoon", "s", "library", "(", "available", "in", "BioPython", "or", "standalone", "as", "PyCluster", ")", "returns", "Spearman", "rsb", "which", "does", "include", "a", "tie", "correction", "." ]
python
train
25.736842
pyokagan/pyglreg
glreg.py
https://github.com/pyokagan/pyglreg/blob/68fa5a6c6cee8667879840fbbcc7d30f52852915/glreg.py#L355-L362
def get_profiles(self): """Returns set of profile names referenced in this Feature :returns: set of profile names """ out = set(x.profile for x in self.requires if x.profile) out.update(x.profile for x in self.removes if x.profile) return out
[ "def", "get_profiles", "(", "self", ")", ":", "out", "=", "set", "(", "x", ".", "profile", "for", "x", "in", "self", ".", "requires", "if", "x", ".", "profile", ")", "out", ".", "update", "(", "x", ".", "profile", "for", "x", "in", "self", ".", "removes", "if", "x", ".", "profile", ")", "return", "out" ]
Returns set of profile names referenced in this Feature :returns: set of profile names
[ "Returns", "set", "of", "profile", "names", "referenced", "in", "this", "Feature" ]
python
train
35.5
theirc/rapidsms-multitenancy
multitenancy/views.py
https://github.com/theirc/rapidsms-multitenancy/blob/121bd0a628e691a88aade2e10045cba43af2dfcb/multitenancy/views.py#L24-L40
def group_dashboard(request, group_slug): """Dashboard for managing a TenantGroup.""" groups = get_user_groups(request.user) group = get_object_or_404(groups, slug=group_slug) tenants = get_user_tenants(request.user, group) can_edit_group = request.user.has_perm('multitenancy.change_tenantgroup', group) count = len(tenants) if count == 1: # Redirect to the detail page for this tenant return redirect(tenants[0]) context = { 'group': group, 'tenants': tenants, 'count': count, 'can_edit_group': can_edit_group, } return render(request, 'multitenancy/group-detail.html', context)
[ "def", "group_dashboard", "(", "request", ",", "group_slug", ")", ":", "groups", "=", "get_user_groups", "(", "request", ".", "user", ")", "group", "=", "get_object_or_404", "(", "groups", ",", "slug", "=", "group_slug", ")", "tenants", "=", "get_user_tenants", "(", "request", ".", "user", ",", "group", ")", "can_edit_group", "=", "request", ".", "user", ".", "has_perm", "(", "'multitenancy.change_tenantgroup'", ",", "group", ")", "count", "=", "len", "(", "tenants", ")", "if", "count", "==", "1", ":", "# Redirect to the detail page for this tenant", "return", "redirect", "(", "tenants", "[", "0", "]", ")", "context", "=", "{", "'group'", ":", "group", ",", "'tenants'", ":", "tenants", ",", "'count'", ":", "count", ",", "'can_edit_group'", ":", "can_edit_group", ",", "}", "return", "render", "(", "request", ",", "'multitenancy/group-detail.html'", ",", "context", ")" ]
Dashboard for managing a TenantGroup.
[ "Dashboard", "for", "managing", "a", "TenantGroup", "." ]
python
train
38.352941
Hackerfleet/hfos
hfos/ui/clientmanager.py
https://github.com/Hackerfleet/hfos/blob/b6df14eacaffb6be5c844108873ff8763ec7f0c9/hfos/ui/clientmanager.py#L405-L443
def broadcast(self, event): """Broadcasts an event either to all users or clients, depending on event flag""" try: if event.broadcasttype == "users": if len(self._users) > 0: self.log("Broadcasting to all users:", event.content, lvl=network) for useruuid in self._users.keys(): self.fireEvent( send(useruuid, event.content, sendtype="user")) # else: # self.log("Not broadcasting, no users connected.", # lvl=debug) elif event.broadcasttype == "clients": if len(self._clients) > 0: self.log("Broadcasting to all clients: ", event.content, lvl=network) for client in self._clients.values(): self.fireEvent(write(client.sock, event.content), "wsserver") # else: # self.log("Not broadcasting, no clients # connected.", # lvl=debug) elif event.broadcasttype == "socks": if len(self._sockets) > 0: self.log("Emergency?! Broadcasting to all sockets: ", event.content) for sock in self._sockets: self.fireEvent(write(sock, event.content), "wsserver") # else: # self.log("Not broadcasting, no sockets # connected.", # lvl=debug) except Exception as e: self.log("Error during broadcast: ", e, type(e), lvl=critical)
[ "def", "broadcast", "(", "self", ",", "event", ")", ":", "try", ":", "if", "event", ".", "broadcasttype", "==", "\"users\"", ":", "if", "len", "(", "self", ".", "_users", ")", ">", "0", ":", "self", ".", "log", "(", "\"Broadcasting to all users:\"", ",", "event", ".", "content", ",", "lvl", "=", "network", ")", "for", "useruuid", "in", "self", ".", "_users", ".", "keys", "(", ")", ":", "self", ".", "fireEvent", "(", "send", "(", "useruuid", ",", "event", ".", "content", ",", "sendtype", "=", "\"user\"", ")", ")", "# else:", "# self.log(\"Not broadcasting, no users connected.\",", "# lvl=debug)", "elif", "event", ".", "broadcasttype", "==", "\"clients\"", ":", "if", "len", "(", "self", ".", "_clients", ")", ">", "0", ":", "self", ".", "log", "(", "\"Broadcasting to all clients: \"", ",", "event", ".", "content", ",", "lvl", "=", "network", ")", "for", "client", "in", "self", ".", "_clients", ".", "values", "(", ")", ":", "self", ".", "fireEvent", "(", "write", "(", "client", ".", "sock", ",", "event", ".", "content", ")", ",", "\"wsserver\"", ")", "# else:", "# self.log(\"Not broadcasting, no clients", "# connected.\",", "# lvl=debug)", "elif", "event", ".", "broadcasttype", "==", "\"socks\"", ":", "if", "len", "(", "self", ".", "_sockets", ")", ">", "0", ":", "self", ".", "log", "(", "\"Emergency?! Broadcasting to all sockets: \"", ",", "event", ".", "content", ")", "for", "sock", "in", "self", ".", "_sockets", ":", "self", ".", "fireEvent", "(", "write", "(", "sock", ",", "event", ".", "content", ")", ",", "\"wsserver\"", ")", "# else:", "# self.log(\"Not broadcasting, no sockets", "# connected.\",", "# lvl=debug)", "except", "Exception", "as", "e", ":", "self", ".", "log", "(", "\"Error during broadcast: \"", ",", "e", ",", "type", "(", "e", ")", ",", "lvl", "=", "critical", ")" ]
Broadcasts an event either to all users or clients, depending on event flag
[ "Broadcasts", "an", "event", "either", "to", "all", "users", "or", "clients", "depending", "on", "event", "flag" ]
python
train
47.410256
trailofbits/manticore
manticore/native/cpu/x86.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/native/cpu/x86.py#L4085-L4112
def BTC(cpu, dest, src): """ Bit test and complement. Selects the bit in a bit string (specified with the first operand, called the bit base) at the bit-position designated by the bit offset operand (second operand), stores the value of the bit in the CF flag, and complements the selected bit in the bit string. :param cpu: current CPU. :param dest: bit base operand. :param src: bit offset operand. """ if dest.type == 'register': value = dest.read() pos = src.read() % dest.size cpu.CF = value & (1 << pos) == 1 << pos dest.write(value ^ (1 << pos)) elif dest.type == 'memory': addr, pos = cpu._getMemoryBit(dest, src) base, size, ty = cpu.get_descriptor(cpu.DS) addr += base value = cpu.read_int(addr, 8) cpu.CF = value & (1 << pos) == 1 << pos value = value ^ (1 << pos) cpu.write_int(addr, value, 8) else: raise NotImplementedError(f"Unknown operand for BTC: {dest.type}")
[ "def", "BTC", "(", "cpu", ",", "dest", ",", "src", ")", ":", "if", "dest", ".", "type", "==", "'register'", ":", "value", "=", "dest", ".", "read", "(", ")", "pos", "=", "src", ".", "read", "(", ")", "%", "dest", ".", "size", "cpu", ".", "CF", "=", "value", "&", "(", "1", "<<", "pos", ")", "==", "1", "<<", "pos", "dest", ".", "write", "(", "value", "^", "(", "1", "<<", "pos", ")", ")", "elif", "dest", ".", "type", "==", "'memory'", ":", "addr", ",", "pos", "=", "cpu", ".", "_getMemoryBit", "(", "dest", ",", "src", ")", "base", ",", "size", ",", "ty", "=", "cpu", ".", "get_descriptor", "(", "cpu", ".", "DS", ")", "addr", "+=", "base", "value", "=", "cpu", ".", "read_int", "(", "addr", ",", "8", ")", "cpu", ".", "CF", "=", "value", "&", "(", "1", "<<", "pos", ")", "==", "1", "<<", "pos", "value", "=", "value", "^", "(", "1", "<<", "pos", ")", "cpu", ".", "write_int", "(", "addr", ",", "value", ",", "8", ")", "else", ":", "raise", "NotImplementedError", "(", "f\"Unknown operand for BTC: {dest.type}\"", ")" ]
Bit test and complement. Selects the bit in a bit string (specified with the first operand, called the bit base) at the bit-position designated by the bit offset operand (second operand), stores the value of the bit in the CF flag, and complements the selected bit in the bit string. :param cpu: current CPU. :param dest: bit base operand. :param src: bit offset operand.
[ "Bit", "test", "and", "complement", "." ]
python
valid
39.321429
trickvi/economics
economics/cpi.py
https://github.com/trickvi/economics/blob/18da5ce7169472ca1ba6022272a389b933f76edd/economics/cpi.py#L69-L86
def get(self, date=datetime.date.today(), country=None): """ Get the CPI value for a specific time. Defaults to today. This uses the closest method internally but sets limit to one day. """ if not country: country = self.country if country == "all": raise ValueError("You need to specify a country") if not isinstance(date, str) and not isinstance(date, int): date = date.year cpi = self.data.get(country.upper(), {}).get(str(date)) if not cpi: raise ValueError("Missing CPI data for {} for {}".format( country, date)) return CPIResult(date=date, value=cpi)
[ "def", "get", "(", "self", ",", "date", "=", "datetime", ".", "date", ".", "today", "(", ")", ",", "country", "=", "None", ")", ":", "if", "not", "country", ":", "country", "=", "self", ".", "country", "if", "country", "==", "\"all\"", ":", "raise", "ValueError", "(", "\"You need to specify a country\"", ")", "if", "not", "isinstance", "(", "date", ",", "str", ")", "and", "not", "isinstance", "(", "date", ",", "int", ")", ":", "date", "=", "date", ".", "year", "cpi", "=", "self", ".", "data", ".", "get", "(", "country", ".", "upper", "(", ")", ",", "{", "}", ")", ".", "get", "(", "str", "(", "date", ")", ")", "if", "not", "cpi", ":", "raise", "ValueError", "(", "\"Missing CPI data for {} for {}\"", ".", "format", "(", "country", ",", "date", ")", ")", "return", "CPIResult", "(", "date", "=", "date", ",", "value", "=", "cpi", ")" ]
Get the CPI value for a specific time. Defaults to today. This uses the closest method internally but sets limit to one day.
[ "Get", "the", "CPI", "value", "for", "a", "specific", "time", ".", "Defaults", "to", "today", ".", "This", "uses", "the", "closest", "method", "internally", "but", "sets", "limit", "to", "one", "day", "." ]
python
train
38.111111
jhorman/pledge
pledge/__init__.py
https://github.com/jhorman/pledge/blob/062ba5b788aeb15e68c85a329374a50b4618544d/pledge/__init__.py#L171-L183
def check(f): """ Wraps the function with a decorator that runs all of the pre/post conditions. """ if hasattr(f, 'wrapped_fn'): return f else: @wraps(f) def decorated(*args, **kwargs): return check_conditions(f, args, kwargs) decorated.wrapped_fn = f return decorated
[ "def", "check", "(", "f", ")", ":", "if", "hasattr", "(", "f", ",", "'wrapped_fn'", ")", ":", "return", "f", "else", ":", "@", "wraps", "(", "f", ")", "def", "decorated", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "check_conditions", "(", "f", ",", "args", ",", "kwargs", ")", "decorated", ".", "wrapped_fn", "=", "f", "return", "decorated" ]
Wraps the function with a decorator that runs all of the pre/post conditions.
[ "Wraps", "the", "function", "with", "a", "decorator", "that", "runs", "all", "of", "the", "pre", "/", "post", "conditions", "." ]
python
train
25.538462
HewlettPackard/python-hpOneView
hpOneView/resources/settings/firmware_bundles.py
https://github.com/HewlettPackard/python-hpOneView/blob/3c6219723ef25e6e0c83d44a89007f89bc325b89/hpOneView/resources/settings/firmware_bundles.py#L48-L62
def upload(self, file_path, timeout=-1): """ Upload an SPP ISO image file or a hotfix file to the appliance. The API supports upload of one hotfix at a time into the system. For the successful upload of a hotfix, ensure its original name and extension are not altered. Args: file_path: Full path to firmware. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. Returns: dict: Information about the updated firmware bundle. """ return self._client.upload(file_path, timeout=timeout)
[ "def", "upload", "(", "self", ",", "file_path", ",", "timeout", "=", "-", "1", ")", ":", "return", "self", ".", "_client", ".", "upload", "(", "file_path", ",", "timeout", "=", "timeout", ")" ]
Upload an SPP ISO image file or a hotfix file to the appliance. The API supports upload of one hotfix at a time into the system. For the successful upload of a hotfix, ensure its original name and extension are not altered. Args: file_path: Full path to firmware. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. Returns: dict: Information about the updated firmware bundle.
[ "Upload", "an", "SPP", "ISO", "image", "file", "or", "a", "hotfix", "file", "to", "the", "appliance", ".", "The", "API", "supports", "upload", "of", "one", "hotfix", "at", "a", "time", "into", "the", "system", ".", "For", "the", "successful", "upload", "of", "a", "hotfix", "ensure", "its", "original", "name", "and", "extension", "are", "not", "altered", "." ]
python
train
46.133333
sorgerlab/indra
indra/preassembler/ontology_mapper.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/ontology_mapper.py#L102-L215
def _load_wm_map(exclude_auto=None): """Load an ontology map for world models. exclude_auto : None or list[tuple] A list of ontology mappings for which automated mappings should be excluded, e.g. [(HUME, UN)] would result in not using mappings from HUME to UN. """ exclude_auto = [] if not exclude_auto else exclude_auto path_here = os.path.dirname(os.path.abspath(__file__)) ontomap_file = os.path.join(path_here, '../resources/wm_ontomap.tsv') mappings = {} def make_hume_prefix_map(): hume_ont = os.path.join(path_here, '../sources/hume/hume_ontology.rdf') graph = rdflib.Graph() graph.parse(os.path.abspath(hume_ont), format='nt') entry_map = {} for node in graph.all_nodes(): entry = node.split('#')[1] # Handle "event" and other top-level entries if '/' not in entry: entry_map[entry] = None continue parts = entry.split('/') prefix, real_entry = parts[0], '/'.join(parts[1:]) entry_map[real_entry] = prefix return entry_map hume_prefix_map = make_hume_prefix_map() def add_hume_prefix(hume_entry): """We need to do this because the HUME prefixes are missing""" prefix = hume_prefix_map[hume_entry] return '%s/%s' % (prefix, hume_entry) def map_entry(reader, entry): """Remap the readers and entries to match our internal standards.""" if reader == 'eidos': namespace = 'UN' entry = entry.replace(' ', '_') entry_id = entry elif reader == 'BBN': namespace = 'HUME' entry = entry.replace(' ', '_') entry_id = add_hume_prefix(entry) elif reader == 'sofia': namespace = 'SOFIA' # First chop off the Event/Entity prefix parts = entry.split('/')[1:] # Now we split each part by underscore and capitalize # each piece of each part parts = ['_'.join([p.capitalize() for p in part.split('_')]) for part in parts] # Finally we stick the entry back together separated by slashes entry_id = '/'.join(parts) else: return reader, entry return namespace, entry_id with open(ontomap_file, 'r') as fh: for line in fh.readlines(): # Get each entry from the line s, se, t, te, score = line.strip().split('\t') score = float(score) # Map the entries to our internal naming standards s, se = map_entry(s, se) t, te = map_entry(t, te) # Skip automated mappings when they should be excluded if (s, t) not in exclude_auto: # We first do the forward mapping if (s, se, t) in mappings: if mappings[(s, se, t)][1] < score: mappings[(s, se, t)] = ((t, te), score) else: mappings[(s, se, t)] = ((t, te), score) # Then we add the reverse mapping if (t, s) not in exclude_auto: if (t, te, s) in mappings: if mappings[(t, te, s)][1] < score: mappings[(t, te, s)] = ((s, se), score) else: mappings[(t, te, s)] = ((s, se), score) ontomap = [] for s, ts in mappings.items(): ontomap.append(((s[0], s[1]), ts[0], ts[1])) # Now apply the Hume -> Eidos override override_file = os.path.join(path_here, '../resources/wm_ontomap.bbn.tsv') override_mappings = [] with open(override_file, 'r') as fh: for row in fh.readlines(): if 'BBN' not in row: continue # Order is target first, source second _, te, _, se = row.strip().split('\t') # Map the entries to our internal naming standards s = 'HUME' t = 'UN' se = se.replace(' ', '_') te = te.replace(' ', '_') if se.startswith('/'): se = se[1:] override_mappings.append((s, se, t, te)) for s, se, t, te in override_mappings: found = False for idx, ((so, seo), (eo, teo), score) in enumerate(ontomap): if (s, se, t) == (so, seo, eo): # Override when a match is found ontomap[idx] = ((s, se), (t, te), 1.0) found = True if not found: ontomap.append(((s, se), (t, te), 1.0)) return ontomap
[ "def", "_load_wm_map", "(", "exclude_auto", "=", "None", ")", ":", "exclude_auto", "=", "[", "]", "if", "not", "exclude_auto", "else", "exclude_auto", "path_here", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "__file__", ")", ")", "ontomap_file", "=", "os", ".", "path", ".", "join", "(", "path_here", ",", "'../resources/wm_ontomap.tsv'", ")", "mappings", "=", "{", "}", "def", "make_hume_prefix_map", "(", ")", ":", "hume_ont", "=", "os", ".", "path", ".", "join", "(", "path_here", ",", "'../sources/hume/hume_ontology.rdf'", ")", "graph", "=", "rdflib", ".", "Graph", "(", ")", "graph", ".", "parse", "(", "os", ".", "path", ".", "abspath", "(", "hume_ont", ")", ",", "format", "=", "'nt'", ")", "entry_map", "=", "{", "}", "for", "node", "in", "graph", ".", "all_nodes", "(", ")", ":", "entry", "=", "node", ".", "split", "(", "'#'", ")", "[", "1", "]", "# Handle \"event\" and other top-level entries", "if", "'/'", "not", "in", "entry", ":", "entry_map", "[", "entry", "]", "=", "None", "continue", "parts", "=", "entry", ".", "split", "(", "'/'", ")", "prefix", ",", "real_entry", "=", "parts", "[", "0", "]", ",", "'/'", ".", "join", "(", "parts", "[", "1", ":", "]", ")", "entry_map", "[", "real_entry", "]", "=", "prefix", "return", "entry_map", "hume_prefix_map", "=", "make_hume_prefix_map", "(", ")", "def", "add_hume_prefix", "(", "hume_entry", ")", ":", "\"\"\"We need to do this because the HUME prefixes are missing\"\"\"", "prefix", "=", "hume_prefix_map", "[", "hume_entry", "]", "return", "'%s/%s'", "%", "(", "prefix", ",", "hume_entry", ")", "def", "map_entry", "(", "reader", ",", "entry", ")", ":", "\"\"\"Remap the readers and entries to match our internal standards.\"\"\"", "if", "reader", "==", "'eidos'", ":", "namespace", "=", "'UN'", "entry", "=", "entry", ".", "replace", "(", "' '", ",", "'_'", ")", "entry_id", "=", "entry", "elif", "reader", "==", "'BBN'", ":", "namespace", "=", "'HUME'", "entry", "=", "entry", ".", "replace", "(", "' '", ",", "'_'", ")", "entry_id", "=", "add_hume_prefix", "(", "entry", ")", "elif", "reader", "==", "'sofia'", ":", "namespace", "=", "'SOFIA'", "# First chop off the Event/Entity prefix", "parts", "=", "entry", ".", "split", "(", "'/'", ")", "[", "1", ":", "]", "# Now we split each part by underscore and capitalize", "# each piece of each part", "parts", "=", "[", "'_'", ".", "join", "(", "[", "p", ".", "capitalize", "(", ")", "for", "p", "in", "part", ".", "split", "(", "'_'", ")", "]", ")", "for", "part", "in", "parts", "]", "# Finally we stick the entry back together separated by slashes", "entry_id", "=", "'/'", ".", "join", "(", "parts", ")", "else", ":", "return", "reader", ",", "entry", "return", "namespace", ",", "entry_id", "with", "open", "(", "ontomap_file", ",", "'r'", ")", "as", "fh", ":", "for", "line", "in", "fh", ".", "readlines", "(", ")", ":", "# Get each entry from the line", "s", ",", "se", ",", "t", ",", "te", ",", "score", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "'\\t'", ")", "score", "=", "float", "(", "score", ")", "# Map the entries to our internal naming standards", "s", ",", "se", "=", "map_entry", "(", "s", ",", "se", ")", "t", ",", "te", "=", "map_entry", "(", "t", ",", "te", ")", "# Skip automated mappings when they should be excluded", "if", "(", "s", ",", "t", ")", "not", "in", "exclude_auto", ":", "# We first do the forward mapping", "if", "(", "s", ",", "se", ",", "t", ")", "in", "mappings", ":", "if", "mappings", "[", "(", "s", ",", "se", ",", "t", ")", "]", "[", "1", "]", "<", "score", ":", "mappings", "[", "(", "s", ",", "se", ",", "t", ")", "]", "=", "(", "(", "t", ",", "te", ")", ",", "score", ")", "else", ":", "mappings", "[", "(", "s", ",", "se", ",", "t", ")", "]", "=", "(", "(", "t", ",", "te", ")", ",", "score", ")", "# Then we add the reverse mapping", "if", "(", "t", ",", "s", ")", "not", "in", "exclude_auto", ":", "if", "(", "t", ",", "te", ",", "s", ")", "in", "mappings", ":", "if", "mappings", "[", "(", "t", ",", "te", ",", "s", ")", "]", "[", "1", "]", "<", "score", ":", "mappings", "[", "(", "t", ",", "te", ",", "s", ")", "]", "=", "(", "(", "s", ",", "se", ")", ",", "score", ")", "else", ":", "mappings", "[", "(", "t", ",", "te", ",", "s", ")", "]", "=", "(", "(", "s", ",", "se", ")", ",", "score", ")", "ontomap", "=", "[", "]", "for", "s", ",", "ts", "in", "mappings", ".", "items", "(", ")", ":", "ontomap", ".", "append", "(", "(", "(", "s", "[", "0", "]", ",", "s", "[", "1", "]", ")", ",", "ts", "[", "0", "]", ",", "ts", "[", "1", "]", ")", ")", "# Now apply the Hume -> Eidos override", "override_file", "=", "os", ".", "path", ".", "join", "(", "path_here", ",", "'../resources/wm_ontomap.bbn.tsv'", ")", "override_mappings", "=", "[", "]", "with", "open", "(", "override_file", ",", "'r'", ")", "as", "fh", ":", "for", "row", "in", "fh", ".", "readlines", "(", ")", ":", "if", "'BBN'", "not", "in", "row", ":", "continue", "# Order is target first, source second", "_", ",", "te", ",", "_", ",", "se", "=", "row", ".", "strip", "(", ")", ".", "split", "(", "'\\t'", ")", "# Map the entries to our internal naming standards", "s", "=", "'HUME'", "t", "=", "'UN'", "se", "=", "se", ".", "replace", "(", "' '", ",", "'_'", ")", "te", "=", "te", ".", "replace", "(", "' '", ",", "'_'", ")", "if", "se", ".", "startswith", "(", "'/'", ")", ":", "se", "=", "se", "[", "1", ":", "]", "override_mappings", ".", "append", "(", "(", "s", ",", "se", ",", "t", ",", "te", ")", ")", "for", "s", ",", "se", ",", "t", ",", "te", "in", "override_mappings", ":", "found", "=", "False", "for", "idx", ",", "(", "(", "so", ",", "seo", ")", ",", "(", "eo", ",", "teo", ")", ",", "score", ")", "in", "enumerate", "(", "ontomap", ")", ":", "if", "(", "s", ",", "se", ",", "t", ")", "==", "(", "so", ",", "seo", ",", "eo", ")", ":", "# Override when a match is found", "ontomap", "[", "idx", "]", "=", "(", "(", "s", ",", "se", ")", ",", "(", "t", ",", "te", ")", ",", "1.0", ")", "found", "=", "True", "if", "not", "found", ":", "ontomap", ".", "append", "(", "(", "(", "s", ",", "se", ")", ",", "(", "t", ",", "te", ")", ",", "1.0", ")", ")", "return", "ontomap" ]
Load an ontology map for world models. exclude_auto : None or list[tuple] A list of ontology mappings for which automated mappings should be excluded, e.g. [(HUME, UN)] would result in not using mappings from HUME to UN.
[ "Load", "an", "ontology", "map", "for", "world", "models", "." ]
python
train
39.675439
PyCQA/astroid
astroid/transforms.py
https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/transforms.py#L79-L81
def unregister_transform(self, node_class, transform, predicate=None): """Unregister the given transform.""" self.transforms[node_class].remove((transform, predicate))
[ "def", "unregister_transform", "(", "self", ",", "node_class", ",", "transform", ",", "predicate", "=", "None", ")", ":", "self", ".", "transforms", "[", "node_class", "]", ".", "remove", "(", "(", "transform", ",", "predicate", ")", ")" ]
Unregister the given transform.
[ "Unregister", "the", "given", "transform", "." ]
python
train
60.333333
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/core/prompts.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/prompts.py#L219-L234
def cwd_filt2(depth): """Return the last depth elements of the current working directory. $HOME is always replaced with '~'. If depth==0, the full path is returned.""" full_cwd = os.getcwdu() cwd = full_cwd.replace(HOME,"~").split(os.sep) if '~' in cwd and len(cwd) == depth+1: depth += 1 drivepart = '' if sys.platform == 'win32' and len(cwd) > depth: drivepart = os.path.splitdrive(full_cwd)[0] out = drivepart + '/'.join(cwd[-depth:]) return out or os.sep
[ "def", "cwd_filt2", "(", "depth", ")", ":", "full_cwd", "=", "os", ".", "getcwdu", "(", ")", "cwd", "=", "full_cwd", ".", "replace", "(", "HOME", ",", "\"~\"", ")", ".", "split", "(", "os", ".", "sep", ")", "if", "'~'", "in", "cwd", "and", "len", "(", "cwd", ")", "==", "depth", "+", "1", ":", "depth", "+=", "1", "drivepart", "=", "''", "if", "sys", ".", "platform", "==", "'win32'", "and", "len", "(", "cwd", ")", ">", "depth", ":", "drivepart", "=", "os", ".", "path", ".", "splitdrive", "(", "full_cwd", ")", "[", "0", "]", "out", "=", "drivepart", "+", "'/'", ".", "join", "(", "cwd", "[", "-", "depth", ":", "]", ")", "return", "out", "or", "os", ".", "sep" ]
Return the last depth elements of the current working directory. $HOME is always replaced with '~'. If depth==0, the full path is returned.
[ "Return", "the", "last", "depth", "elements", "of", "the", "current", "working", "directory", "." ]
python
test
31.375
linkedin/pyexchange
pyexchange/exchange2010/__init__.py
https://github.com/linkedin/pyexchange/blob/d568f4edd326adb451b915ddf66cf1a37820e3ca/pyexchange/exchange2010/__init__.py#L338-L353
def cancel(self): """ Cancels an event in Exchange. :: event = service.calendar().get_event(id='KEY HERE') event.cancel() This will send notifications to anyone who has not declined the meeting. """ if not self.id: raise TypeError(u"You can't delete an event that hasn't been created yet.") self.refresh_change_key() self.service.send(soap_request.delete_event(self)) # TODO rsanders high - check return status to make sure it was actually sent return None
[ "def", "cancel", "(", "self", ")", ":", "if", "not", "self", ".", "id", ":", "raise", "TypeError", "(", "u\"You can't delete an event that hasn't been created yet.\"", ")", "self", ".", "refresh_change_key", "(", ")", "self", ".", "service", ".", "send", "(", "soap_request", ".", "delete_event", "(", "self", ")", ")", "# TODO rsanders high - check return status to make sure it was actually sent", "return", "None" ]
Cancels an event in Exchange. :: event = service.calendar().get_event(id='KEY HERE') event.cancel() This will send notifications to anyone who has not declined the meeting.
[ "Cancels", "an", "event", "in", "Exchange", ".", "::" ]
python
train
31.4375
elastic/elasticsearch-dsl-py
elasticsearch_dsl/index.py
https://github.com/elastic/elasticsearch-dsl-py/blob/874b52472fc47b601de0e5fa0e4300e21aff0085/elasticsearch_dsl/index.py#L509-L517
def recovery(self, using=None, **kwargs): """ The indices recovery API provides insight into on-going shard recoveries for the index. Any additional keyword arguments will be passed to ``Elasticsearch.indices.recovery`` unchanged. """ return self._get_connection(using).indices.recovery(index=self._name, **kwargs)
[ "def", "recovery", "(", "self", ",", "using", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_get_connection", "(", "using", ")", ".", "indices", ".", "recovery", "(", "index", "=", "self", ".", "_name", ",", "*", "*", "kwargs", ")" ]
The indices recovery API provides insight into on-going shard recoveries for the index. Any additional keyword arguments will be passed to ``Elasticsearch.indices.recovery`` unchanged.
[ "The", "indices", "recovery", "API", "provides", "insight", "into", "on", "-", "going", "shard", "recoveries", "for", "the", "index", "." ]
python
train
40.333333
portfors-lab/sparkle
sparkle/gui/stim/component_label.py
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/stim/component_label.py#L37-L48
def getLabelByName(self, name): """Gets a label widget by it component name :param name: name of the AbstractStimulusComponent which this label is named after :type name: str :returns: :class:`DragLabel<sparkle.gui.drag_label.DragLabel>` """ name = name.lower() if name in self.stimLabels: return self.stimLabels[name] else: return None
[ "def", "getLabelByName", "(", "self", ",", "name", ")", ":", "name", "=", "name", ".", "lower", "(", ")", "if", "name", "in", "self", ".", "stimLabels", ":", "return", "self", ".", "stimLabels", "[", "name", "]", "else", ":", "return", "None" ]
Gets a label widget by it component name :param name: name of the AbstractStimulusComponent which this label is named after :type name: str :returns: :class:`DragLabel<sparkle.gui.drag_label.DragLabel>`
[ "Gets", "a", "label", "widget", "by", "it", "component", "name" ]
python
train
34.5
Chilipp/psyplot
psyplot/project.py
https://github.com/Chilipp/psyplot/blob/75a0a15a9a1dd018e79d2df270d56c4bf5f311d5/psyplot/project.py#L1976-L1995
def _gen_doc(cls, summary, full_name, identifier, example_call, doc_str, show_examples): """Generate the documentation docstring for a PlotMethod""" # leave out the first argument example_call = ', '.join(map(str.strip, example_call.split(',')[1:])) ret = docstrings.dedents(""" %s This plotting method adds data arrays and plots them via :class:`%s` plotters To plot a variable in this dataset, type:: >>> ds.psy.plot.%s(%s) %s""" % (summary, full_name, identifier, example_call, doc_str)) if show_examples: ret += '\n\n' + cls._gen_examples(identifier) return ret
[ "def", "_gen_doc", "(", "cls", ",", "summary", ",", "full_name", ",", "identifier", ",", "example_call", ",", "doc_str", ",", "show_examples", ")", ":", "# leave out the first argument", "example_call", "=", "', '", ".", "join", "(", "map", "(", "str", ".", "strip", ",", "example_call", ".", "split", "(", "','", ")", "[", "1", ":", "]", ")", ")", "ret", "=", "docstrings", ".", "dedents", "(", "\"\"\"\n %s\n\n This plotting method adds data arrays and plots them via\n :class:`%s` plotters\n\n To plot a variable in this dataset, type::\n\n >>> ds.psy.plot.%s(%s)\n\n %s\"\"\"", "%", "(", "summary", ",", "full_name", ",", "identifier", ",", "example_call", ",", "doc_str", ")", ")", "if", "show_examples", ":", "ret", "+=", "'\\n\\n'", "+", "cls", ".", "_gen_examples", "(", "identifier", ")", "return", "ret" ]
Generate the documentation docstring for a PlotMethod
[ "Generate", "the", "documentation", "docstring", "for", "a", "PlotMethod" ]
python
train
35.2
andy-z/ged4py
ged4py/detail/name.py
https://github.com/andy-z/ged4py/blob/d0e0cceaadf0a84cbf052705e3c27303b12e1757/ged4py/detail/name.py#L38-L85
def parse_name_altree(record): """Parse NAME structure assuming ALTREE dialect. In ALTREE dialect maiden name (if present) is saved as SURN sub-record and is also appended to family name in parens. Given name is saved in GIVN sub-record. Few examples: No maiden name: 1 NAME John /Smith/ 2 GIVN John With maiden name: 1 NAME Jane /Smith (Ivanova)/ 2 GIVN Jane 2 SURN Ivanova No maiden name 1 NAME Mers /Daimler (-Benz)/ 2 GIVN Mers Because family name can also contain parens it's not enough to parse family name and guess maiden name from it, we also have to check for SURN record. ALTREE also replaces empty names with question mark, we undo that too. :param record: NAME record :return: tuple with 3 or 4 elements, first three elements of tuple are the same as returned from :py:meth:`split_name` method, fourth element (if present) denotes maiden name. """ name_tuple = split_name(record.value) if name_tuple[1] == '?': name_tuple = (name_tuple[0], '', name_tuple[2]) maiden = record.sub_tag_value("SURN") if maiden: # strip "(maiden)" from family name ending = '(' + maiden + ')' surname = name_tuple[1] if surname.endswith(ending): surname = surname[:-len(ending)].rstrip() if surname == '?': surname = '' name_tuple = (name_tuple[0], surname, name_tuple[2], maiden) return name_tuple
[ "def", "parse_name_altree", "(", "record", ")", ":", "name_tuple", "=", "split_name", "(", "record", ".", "value", ")", "if", "name_tuple", "[", "1", "]", "==", "'?'", ":", "name_tuple", "=", "(", "name_tuple", "[", "0", "]", ",", "''", ",", "name_tuple", "[", "2", "]", ")", "maiden", "=", "record", ".", "sub_tag_value", "(", "\"SURN\"", ")", "if", "maiden", ":", "# strip \"(maiden)\" from family name", "ending", "=", "'('", "+", "maiden", "+", "')'", "surname", "=", "name_tuple", "[", "1", "]", "if", "surname", ".", "endswith", "(", "ending", ")", ":", "surname", "=", "surname", "[", ":", "-", "len", "(", "ending", ")", "]", ".", "rstrip", "(", ")", "if", "surname", "==", "'?'", ":", "surname", "=", "''", "name_tuple", "=", "(", "name_tuple", "[", "0", "]", ",", "surname", ",", "name_tuple", "[", "2", "]", ",", "maiden", ")", "return", "name_tuple" ]
Parse NAME structure assuming ALTREE dialect. In ALTREE dialect maiden name (if present) is saved as SURN sub-record and is also appended to family name in parens. Given name is saved in GIVN sub-record. Few examples: No maiden name: 1 NAME John /Smith/ 2 GIVN John With maiden name: 1 NAME Jane /Smith (Ivanova)/ 2 GIVN Jane 2 SURN Ivanova No maiden name 1 NAME Mers /Daimler (-Benz)/ 2 GIVN Mers Because family name can also contain parens it's not enough to parse family name and guess maiden name from it, we also have to check for SURN record. ALTREE also replaces empty names with question mark, we undo that too. :param record: NAME record :return: tuple with 3 or 4 elements, first three elements of tuple are the same as returned from :py:meth:`split_name` method, fourth element (if present) denotes maiden name.
[ "Parse", "NAME", "structure", "assuming", "ALTREE", "dialect", "." ]
python
train
31.020833
OpenTreeOfLife/peyotl
peyotl/collections_store/collections_umbrella.py
https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/collections_store/collections_umbrella.py#L269-L280
def _coerce_json_to_collection(self, json_repr): """Use to ensure that a JSON string (if found) is parsed to the equivalent dict in python. If the incoming value is already parsed, do nothing. If a string fails to parse, return None.""" if isinstance(json_repr, dict): collection = json_repr else: try: collection = anyjson.loads(json_repr) except: _LOG.warn('> invalid JSON (failed anyjson parsing)') return None return collection
[ "def", "_coerce_json_to_collection", "(", "self", ",", "json_repr", ")", ":", "if", "isinstance", "(", "json_repr", ",", "dict", ")", ":", "collection", "=", "json_repr", "else", ":", "try", ":", "collection", "=", "anyjson", ".", "loads", "(", "json_repr", ")", "except", ":", "_LOG", ".", "warn", "(", "'> invalid JSON (failed anyjson parsing)'", ")", "return", "None", "return", "collection" ]
Use to ensure that a JSON string (if found) is parsed to the equivalent dict in python. If the incoming value is already parsed, do nothing. If a string fails to parse, return None.
[ "Use", "to", "ensure", "that", "a", "JSON", "string", "(", "if", "found", ")", "is", "parsed", "to", "the", "equivalent", "dict", "in", "python", ".", "If", "the", "incoming", "value", "is", "already", "parsed", "do", "nothing", ".", "If", "a", "string", "fails", "to", "parse", "return", "None", "." ]
python
train
45.333333
bitesofcode/xqt
xqt/gui/xfiledialog.py
https://github.com/bitesofcode/xqt/blob/befa649a2f2104a20d49c8c78ffdba5907fd94d2/xqt/gui/xfiledialog.py#L24-L39
def getDirectory(*args): """ Normalizes the getDirectory method between the different Qt wrappers. :return (<str> filename, <bool> accepted) """ result = QtGui.QFileDialog.getDirectory(*args) # PyQt4 returns just a string if type(result) is not tuple: return result, bool(result) # PySide returns a tuple of str, bool else: return result
[ "def", "getDirectory", "(", "*", "args", ")", ":", "result", "=", "QtGui", ".", "QFileDialog", ".", "getDirectory", "(", "*", "args", ")", "# PyQt4 returns just a string\r", "if", "type", "(", "result", ")", "is", "not", "tuple", ":", "return", "result", ",", "bool", "(", "result", ")", "# PySide returns a tuple of str, bool\r", "else", ":", "return", "result" ]
Normalizes the getDirectory method between the different Qt wrappers. :return (<str> filename, <bool> accepted)
[ "Normalizes", "the", "getDirectory", "method", "between", "the", "different", "Qt", "wrappers", ".", ":", "return", "(", "<str", ">", "filename", "<bool", ">", "accepted", ")" ]
python
train
29.5
gbiggs/rtsprofile
rtsprofile/component.py
https://github.com/gbiggs/rtsprofile/blob/fded6eddcb0b25fe9808b1b12336a4413ea00905/rtsprofile/component.py#L529-L540
def get_configuration_set_by_id(self, id): '''Finds a configuration set in the component by its ID. @param id The ID of the configuration set to search for. @return The ConfigurationSet object for the set, or None if it was not found. ''' for cs in self.configuration_sets: if cs.id == id: return cs return None
[ "def", "get_configuration_set_by_id", "(", "self", ",", "id", ")", ":", "for", "cs", "in", "self", ".", "configuration_sets", ":", "if", "cs", ".", "id", "==", "id", ":", "return", "cs", "return", "None" ]
Finds a configuration set in the component by its ID. @param id The ID of the configuration set to search for. @return The ConfigurationSet object for the set, or None if it was not found.
[ "Finds", "a", "configuration", "set", "in", "the", "component", "by", "its", "ID", "." ]
python
train
32.166667
hyperledger/indy-plenum
plenum/server/node.py
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/node.py#L3403-L3421
def onBatchRejected(self, ledger_id): """ A batch of requests has been rejected, if stateRoot is None, reject the current batch. :param ledger_id: :param stateRoot: state root after the batch was created :return: """ if ledger_id == POOL_LEDGER_ID: if isinstance(self.poolManager, TxnPoolManager): self.get_req_handler(POOL_LEDGER_ID).onBatchRejected() elif self.get_req_handler(ledger_id): self.get_req_handler(ledger_id).onBatchRejected() else: logger.debug('{} did not know how to handle for ledger {}'.format(self, ledger_id)) self.audit_handler.post_batch_rejected(ledger_id) self.execute_hook(NodeHooks.POST_BATCH_REJECTED, ledger_id)
[ "def", "onBatchRejected", "(", "self", ",", "ledger_id", ")", ":", "if", "ledger_id", "==", "POOL_LEDGER_ID", ":", "if", "isinstance", "(", "self", ".", "poolManager", ",", "TxnPoolManager", ")", ":", "self", ".", "get_req_handler", "(", "POOL_LEDGER_ID", ")", ".", "onBatchRejected", "(", ")", "elif", "self", ".", "get_req_handler", "(", "ledger_id", ")", ":", "self", ".", "get_req_handler", "(", "ledger_id", ")", ".", "onBatchRejected", "(", ")", "else", ":", "logger", ".", "debug", "(", "'{} did not know how to handle for ledger {}'", ".", "format", "(", "self", ",", "ledger_id", ")", ")", "self", ".", "audit_handler", ".", "post_batch_rejected", "(", "ledger_id", ")", "self", ".", "execute_hook", "(", "NodeHooks", ".", "POST_BATCH_REJECTED", ",", "ledger_id", ")" ]
A batch of requests has been rejected, if stateRoot is None, reject the current batch. :param ledger_id: :param stateRoot: state root after the batch was created :return:
[ "A", "batch", "of", "requests", "has", "been", "rejected", "if", "stateRoot", "is", "None", "reject", "the", "current", "batch", ".", ":", "param", "ledger_id", ":", ":", "param", "stateRoot", ":", "state", "root", "after", "the", "batch", "was", "created", ":", "return", ":" ]
python
train
40.631579
archman/beamline
beamline/mathutils.py
https://github.com/archman/beamline/blob/417bc5dc13e754bc89d246427984590fced64d07/beamline/mathutils.py#L437-L447
def getR(self, i=5, j=6): """ return transport matrix element, indexed by i, j, be default, return dispersion value, i.e. getR(5,6) in [m] :param i: row index, with initial index of 1 :param j: col indx, with initial index of 1 :return: transport matrix element """ if self.refresh is True: self.getMatrix() return self.transM[i - 1, j - 1]
[ "def", "getR", "(", "self", ",", "i", "=", "5", ",", "j", "=", "6", ")", ":", "if", "self", ".", "refresh", "is", "True", ":", "self", ".", "getMatrix", "(", ")", "return", "self", ".", "transM", "[", "i", "-", "1", ",", "j", "-", "1", "]" ]
return transport matrix element, indexed by i, j, be default, return dispersion value, i.e. getR(5,6) in [m] :param i: row index, with initial index of 1 :param j: col indx, with initial index of 1 :return: transport matrix element
[ "return", "transport", "matrix", "element", "indexed", "by", "i", "j", "be", "default", "return", "dispersion", "value", "i", ".", "e", ".", "getR", "(", "5", "6", ")", "in", "[", "m", "]" ]
python
train
37
fprimex/zdesk
zdesk/zdesk_api.py
https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L1150-L1154
def dynamic_content_item_variant_delete(self, item_id, id, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/dynamic_content#delete-variant" api_path = "/api/v2/dynamic_content/items/{item_id}/variants/{id}.json" api_path = api_path.format(item_id=item_id, id=id) return self.call(api_path, method="DELETE", **kwargs)
[ "def", "dynamic_content_item_variant_delete", "(", "self", ",", "item_id", ",", "id", ",", "*", "*", "kwargs", ")", ":", "api_path", "=", "\"/api/v2/dynamic_content/items/{item_id}/variants/{id}.json\"", "api_path", "=", "api_path", ".", "format", "(", "item_id", "=", "item_id", ",", "id", "=", "id", ")", "return", "self", ".", "call", "(", "api_path", ",", "method", "=", "\"DELETE\"", ",", "*", "*", "kwargs", ")" ]
https://developer.zendesk.com/rest_api/docs/core/dynamic_content#delete-variant
[ "https", ":", "//", "developer", ".", "zendesk", ".", "com", "/", "rest_api", "/", "docs", "/", "core", "/", "dynamic_content#delete", "-", "variant" ]
python
train
71.2
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/GettextCommon.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/GettextCommon.py#L131-L151
def _read_linguas_from_files(env, linguas_files=None): """ Parse `LINGUAS` file and return list of extracted languages """ import SCons.Util import SCons.Environment global _re_comment global _re_lang if not SCons.Util.is_List(linguas_files) \ and not SCons.Util.is_String(linguas_files) \ and not isinstance(linguas_files, SCons.Node.FS.Base) \ and linguas_files: # If, linguas_files==True or such, then read 'LINGUAS' file. linguas_files = ['LINGUAS'] if linguas_files is None: return [] fnodes = env.arg2nodes(linguas_files) linguas = [] for fnode in fnodes: contents = _re_comment.sub("", fnode.get_text_contents()) ls = [l for l in _re_lang.findall(contents) if l] linguas.extend(ls) return linguas
[ "def", "_read_linguas_from_files", "(", "env", ",", "linguas_files", "=", "None", ")", ":", "import", "SCons", ".", "Util", "import", "SCons", ".", "Environment", "global", "_re_comment", "global", "_re_lang", "if", "not", "SCons", ".", "Util", ".", "is_List", "(", "linguas_files", ")", "and", "not", "SCons", ".", "Util", ".", "is_String", "(", "linguas_files", ")", "and", "not", "isinstance", "(", "linguas_files", ",", "SCons", ".", "Node", ".", "FS", ".", "Base", ")", "and", "linguas_files", ":", "# If, linguas_files==True or such, then read 'LINGUAS' file.", "linguas_files", "=", "[", "'LINGUAS'", "]", "if", "linguas_files", "is", "None", ":", "return", "[", "]", "fnodes", "=", "env", ".", "arg2nodes", "(", "linguas_files", ")", "linguas", "=", "[", "]", "for", "fnode", "in", "fnodes", ":", "contents", "=", "_re_comment", ".", "sub", "(", "\"\"", ",", "fnode", ".", "get_text_contents", "(", ")", ")", "ls", "=", "[", "l", "for", "l", "in", "_re_lang", ".", "findall", "(", "contents", ")", "if", "l", "]", "linguas", ".", "extend", "(", "ls", ")", "return", "linguas" ]
Parse `LINGUAS` file and return list of extracted languages
[ "Parse", "LINGUAS", "file", "and", "return", "list", "of", "extracted", "languages" ]
python
train
38.619048
pmorissette/ffn
ffn/core.py
https://github.com/pmorissette/ffn/blob/ef09f28b858b7ffcd2627ce6a4dc618183a6bc8a/ffn/core.py#L2084-L2095
def winsorize(x, axis=0, limits=0.01): """ `Winsorize <https://en.wikipedia.org/wiki/Winsorizing>`_ values based on limits """ # operate on copy x = x.copy() if isinstance(x, pd.DataFrame): return x.apply(_winsorize_wrapper, axis=axis, args=(limits, )) else: return pd.Series(_winsorize_wrapper(x, limits).values, index=x.index)
[ "def", "winsorize", "(", "x", ",", "axis", "=", "0", ",", "limits", "=", "0.01", ")", ":", "# operate on copy", "x", "=", "x", ".", "copy", "(", ")", "if", "isinstance", "(", "x", ",", "pd", ".", "DataFrame", ")", ":", "return", "x", ".", "apply", "(", "_winsorize_wrapper", ",", "axis", "=", "axis", ",", "args", "=", "(", "limits", ",", ")", ")", "else", ":", "return", "pd", ".", "Series", "(", "_winsorize_wrapper", "(", "x", ",", "limits", ")", ".", "values", ",", "index", "=", "x", ".", "index", ")" ]
`Winsorize <https://en.wikipedia.org/wiki/Winsorizing>`_ values based on limits
[ "Winsorize", "<https", ":", "//", "en", ".", "wikipedia", ".", "org", "/", "wiki", "/", "Winsorizing", ">", "_", "values", "based", "on", "limits" ]
python
train
32.25
google/grr
grr/core/grr_response_core/lib/rdfvalues/paths.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/rdfvalues/paths.py#L201-L261
def AFF4Path(self, client_urn): """Returns the AFF4 URN this pathspec will be stored under. Args: client_urn: A ClientURN. Returns: A urn that corresponds to this pathspec. Raises: ValueError: If pathspec is not of the correct type. """ # If the first level is OS and the second level is TSK its probably a mount # point resolution. We map it into the tsk branch. For example if we get: # path: \\\\.\\Volume{1234}\\ # pathtype: OS # mount_point: /c:/ # nested_path { # path: /windows/ # pathtype: TSK # } # We map this to aff4://client_id/fs/tsk/\\\\.\\Volume{1234}\\/windows/ if not self.HasField("pathtype"): raise ValueError("Can't determine AFF4 path without a valid pathtype.") first_component = self[0] dev = first_component.path if first_component.HasField("offset"): # We divide here just to get prettier numbers in the GUI dev += ":{}".format(first_component.offset // 512) if (len(self) > 1 and first_component.pathtype == PathSpec.PathType.OS and self[1].pathtype == PathSpec.PathType.TSK): result = [self.AFF4_PREFIXES[PathSpec.PathType.TSK], dev] # Skip the top level pathspec. start = 1 else: # For now just map the top level prefix based on the first pathtype result = [self.AFF4_PREFIXES[first_component.pathtype]] start = 0 for p in self[start]: component = p.path # The following encode different pathspec properties into the AFF4 path in # such a way that unique files on the client are mapped to unique URNs in # the AFF4 space. Note that this transformation does not need to be # reversible since we always use the PathSpec when accessing files on the # client. if p.HasField("offset"): component += ":{}".format(p.offset // 512) # Support ADS names. if p.HasField("stream_name"): component += ":" + p.stream_name result.append(component) return client_urn.Add("/".join(result))
[ "def", "AFF4Path", "(", "self", ",", "client_urn", ")", ":", "# If the first level is OS and the second level is TSK its probably a mount", "# point resolution. We map it into the tsk branch. For example if we get:", "# path: \\\\\\\\.\\\\Volume{1234}\\\\", "# pathtype: OS", "# mount_point: /c:/", "# nested_path {", "# path: /windows/", "# pathtype: TSK", "# }", "# We map this to aff4://client_id/fs/tsk/\\\\\\\\.\\\\Volume{1234}\\\\/windows/", "if", "not", "self", ".", "HasField", "(", "\"pathtype\"", ")", ":", "raise", "ValueError", "(", "\"Can't determine AFF4 path without a valid pathtype.\"", ")", "first_component", "=", "self", "[", "0", "]", "dev", "=", "first_component", ".", "path", "if", "first_component", ".", "HasField", "(", "\"offset\"", ")", ":", "# We divide here just to get prettier numbers in the GUI", "dev", "+=", "\":{}\"", ".", "format", "(", "first_component", ".", "offset", "//", "512", ")", "if", "(", "len", "(", "self", ")", ">", "1", "and", "first_component", ".", "pathtype", "==", "PathSpec", ".", "PathType", ".", "OS", "and", "self", "[", "1", "]", ".", "pathtype", "==", "PathSpec", ".", "PathType", ".", "TSK", ")", ":", "result", "=", "[", "self", ".", "AFF4_PREFIXES", "[", "PathSpec", ".", "PathType", ".", "TSK", "]", ",", "dev", "]", "# Skip the top level pathspec.", "start", "=", "1", "else", ":", "# For now just map the top level prefix based on the first pathtype", "result", "=", "[", "self", ".", "AFF4_PREFIXES", "[", "first_component", ".", "pathtype", "]", "]", "start", "=", "0", "for", "p", "in", "self", "[", "start", "]", ":", "component", "=", "p", ".", "path", "# The following encode different pathspec properties into the AFF4 path in", "# such a way that unique files on the client are mapped to unique URNs in", "# the AFF4 space. Note that this transformation does not need to be", "# reversible since we always use the PathSpec when accessing files on the", "# client.", "if", "p", ".", "HasField", "(", "\"offset\"", ")", ":", "component", "+=", "\":{}\"", ".", "format", "(", "p", ".", "offset", "//", "512", ")", "# Support ADS names.", "if", "p", ".", "HasField", "(", "\"stream_name\"", ")", ":", "component", "+=", "\":\"", "+", "p", ".", "stream_name", "result", ".", "append", "(", "component", ")", "return", "client_urn", ".", "Add", "(", "\"/\"", ".", "join", "(", "result", ")", ")" ]
Returns the AFF4 URN this pathspec will be stored under. Args: client_urn: A ClientURN. Returns: A urn that corresponds to this pathspec. Raises: ValueError: If pathspec is not of the correct type.
[ "Returns", "the", "AFF4", "URN", "this", "pathspec", "will", "be", "stored", "under", "." ]
python
train
32.885246
saltstack/salt
salt/states/boto_asg.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_asg.py#L822-L892
def absent( name, force=False, region=None, key=None, keyid=None, profile=None, remove_lc=False): ''' Ensure the named autoscale group is deleted. name Name of the autoscale group. force Force deletion of autoscale group. remove_lc Delete the launch config as well. region The region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} asg = __salt__['boto_asg.get_config'](name, region, key, keyid, profile) if asg is None: ret['result'] = False ret['comment'] = 'Failed to check autoscale group existence.' elif asg: if __opts__['test']: ret['comment'] = 'Autoscale group set to be deleted.' ret['result'] = None if remove_lc: msg = 'Launch configuration {0} is set to be deleted.'.format(asg['launch_config_name']) ret['comment'] = ' '.join([ret['comment'], msg]) return ret deleted = __salt__['boto_asg.delete'](name, force, region, key, keyid, profile) if deleted: if remove_lc: lc_deleted = __salt__['boto_asg.delete_launch_configuration'](asg['launch_config_name'], region, key, keyid, profile) if lc_deleted: if 'launch_config' not in ret['changes']: ret['changes']['launch_config'] = {} ret['changes']['launch_config']['deleted'] = asg['launch_config_name'] else: ret['result'] = False ret['comment'] = ' '.join([ret['comment'], 'Failed to delete launch configuration.']) ret['changes']['old'] = asg ret['changes']['new'] = None ret['comment'] = 'Deleted autoscale group.' else: ret['result'] = False ret['comment'] = 'Failed to delete autoscale group.' else: ret['comment'] = 'Autoscale group does not exist.' return ret
[ "def", "absent", "(", "name", ",", "force", "=", "False", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ",", "remove_lc", "=", "False", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "True", ",", "'comment'", ":", "''", ",", "'changes'", ":", "{", "}", "}", "asg", "=", "__salt__", "[", "'boto_asg.get_config'", "]", "(", "name", ",", "region", ",", "key", ",", "keyid", ",", "profile", ")", "if", "asg", "is", "None", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'Failed to check autoscale group existence.'", "elif", "asg", ":", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'comment'", "]", "=", "'Autoscale group set to be deleted.'", "ret", "[", "'result'", "]", "=", "None", "if", "remove_lc", ":", "msg", "=", "'Launch configuration {0} is set to be deleted.'", ".", "format", "(", "asg", "[", "'launch_config_name'", "]", ")", "ret", "[", "'comment'", "]", "=", "' '", ".", "join", "(", "[", "ret", "[", "'comment'", "]", ",", "msg", "]", ")", "return", "ret", "deleted", "=", "__salt__", "[", "'boto_asg.delete'", "]", "(", "name", ",", "force", ",", "region", ",", "key", ",", "keyid", ",", "profile", ")", "if", "deleted", ":", "if", "remove_lc", ":", "lc_deleted", "=", "__salt__", "[", "'boto_asg.delete_launch_configuration'", "]", "(", "asg", "[", "'launch_config_name'", "]", ",", "region", ",", "key", ",", "keyid", ",", "profile", ")", "if", "lc_deleted", ":", "if", "'launch_config'", "not", "in", "ret", "[", "'changes'", "]", ":", "ret", "[", "'changes'", "]", "[", "'launch_config'", "]", "=", "{", "}", "ret", "[", "'changes'", "]", "[", "'launch_config'", "]", "[", "'deleted'", "]", "=", "asg", "[", "'launch_config_name'", "]", "else", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "' '", ".", "join", "(", "[", "ret", "[", "'comment'", "]", ",", "'Failed to delete launch configuration.'", "]", ")", "ret", "[", "'changes'", "]", "[", "'old'", "]", "=", "asg", "ret", "[", "'changes'", "]", "[", "'new'", "]", "=", "None", "ret", "[", "'comment'", "]", "=", "'Deleted autoscale group.'", "else", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'Failed to delete autoscale group.'", "else", ":", "ret", "[", "'comment'", "]", "=", "'Autoscale group does not exist.'", "return", "ret" ]
Ensure the named autoscale group is deleted. name Name of the autoscale group. force Force deletion of autoscale group. remove_lc Delete the launch config as well. region The region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid.
[ "Ensure", "the", "named", "autoscale", "group", "is", "deleted", "." ]
python
train
36.28169
calston/tensor
tensor/utils.py
https://github.com/calston/tensor/blob/7c0c99708b5dbff97f3895f705e11996b608549d/tensor/utils.py#L254-L261
def getBody(self, url, method='GET', headers={}, data=None, socket=None): """Make an HTTP request and return the body """ if not 'User-Agent' in headers: headers['User-Agent'] = ['Tensor HTTP checker'] return self.request(url, method, headers, data, socket)
[ "def", "getBody", "(", "self", ",", "url", ",", "method", "=", "'GET'", ",", "headers", "=", "{", "}", ",", "data", "=", "None", ",", "socket", "=", "None", ")", ":", "if", "not", "'User-Agent'", "in", "headers", ":", "headers", "[", "'User-Agent'", "]", "=", "[", "'Tensor HTTP checker'", "]", "return", "self", ".", "request", "(", "url", ",", "method", ",", "headers", ",", "data", ",", "socket", ")" ]
Make an HTTP request and return the body
[ "Make", "an", "HTTP", "request", "and", "return", "the", "body" ]
python
test
37
phoebe-project/phoebe2
phoebe/parameters/constraint.py
https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/parameters/constraint.py#L989-L1026
def requiv_contact_min(b, component, solve_for=None, **kwargs): """ Create a constraint to determine the critical (at L1) value of requiv at which a constact will underflow. This will only be used for contacts for requiv_min :parameter b: the :class:`phoebe.frontend.bundle.Bundle` :parameter str component: the label of the star in which this constraint should be built :parameter str solve_for: if 'requiv_max' should not be the derived/constrained parameter, provide which other parameter should be derived :returns: lhs (Parameter), rhs (ConstraintParameter), args (list of arguments that were passed to this function) """ hier = b.get_hierarchy() if not len(hier.get_value()): # TODO: change to custom error type to catch in bundle.add_component # TODO: check whether the problem is 0 hierarchies or more than 1 raise NotImplementedError("constraint for requiv_contact_min requires hierarchy") component_ps = _get_system_ps(b, component) parentorbit = hier.get_parent_of(component) parentorbit_ps = _get_system_ps(b, parentorbit) requiv_min = component_ps.get_parameter(qualifier='requiv_min') q = parentorbit_ps.get_parameter(qualifier='q') sma = parentorbit_ps.get_parameter(qualifier='sma') if solve_for in [None, requiv_min]: lhs = requiv_min rhs = roche_requiv_contact_L1(q, sma, hier.get_primary_or_secondary(component, return_ind=True)) else: raise NotImplementedError("requiv_contact_min can only be solved for requiv_min") return lhs, rhs, {'component': component}
[ "def", "requiv_contact_min", "(", "b", ",", "component", ",", "solve_for", "=", "None", ",", "*", "*", "kwargs", ")", ":", "hier", "=", "b", ".", "get_hierarchy", "(", ")", "if", "not", "len", "(", "hier", ".", "get_value", "(", ")", ")", ":", "# TODO: change to custom error type to catch in bundle.add_component", "# TODO: check whether the problem is 0 hierarchies or more than 1", "raise", "NotImplementedError", "(", "\"constraint for requiv_contact_min requires hierarchy\"", ")", "component_ps", "=", "_get_system_ps", "(", "b", ",", "component", ")", "parentorbit", "=", "hier", ".", "get_parent_of", "(", "component", ")", "parentorbit_ps", "=", "_get_system_ps", "(", "b", ",", "parentorbit", ")", "requiv_min", "=", "component_ps", ".", "get_parameter", "(", "qualifier", "=", "'requiv_min'", ")", "q", "=", "parentorbit_ps", ".", "get_parameter", "(", "qualifier", "=", "'q'", ")", "sma", "=", "parentorbit_ps", ".", "get_parameter", "(", "qualifier", "=", "'sma'", ")", "if", "solve_for", "in", "[", "None", ",", "requiv_min", "]", ":", "lhs", "=", "requiv_min", "rhs", "=", "roche_requiv_contact_L1", "(", "q", ",", "sma", ",", "hier", ".", "get_primary_or_secondary", "(", "component", ",", "return_ind", "=", "True", ")", ")", "else", ":", "raise", "NotImplementedError", "(", "\"requiv_contact_min can only be solved for requiv_min\"", ")", "return", "lhs", ",", "rhs", ",", "{", "'component'", ":", "component", "}" ]
Create a constraint to determine the critical (at L1) value of requiv at which a constact will underflow. This will only be used for contacts for requiv_min :parameter b: the :class:`phoebe.frontend.bundle.Bundle` :parameter str component: the label of the star in which this constraint should be built :parameter str solve_for: if 'requiv_max' should not be the derived/constrained parameter, provide which other parameter should be derived :returns: lhs (Parameter), rhs (ConstraintParameter), args (list of arguments that were passed to this function)
[ "Create", "a", "constraint", "to", "determine", "the", "critical", "(", "at", "L1", ")", "value", "of", "requiv", "at", "which", "a", "constact", "will", "underflow", ".", "This", "will", "only", "be", "used", "for", "contacts", "for", "requiv_min" ]
python
train
42.184211
fr33jc/bang
bang/providers/hpcloud/__init__.py
https://github.com/fr33jc/bang/blob/8f000713f88d2a9a8c1193b63ca10a6578560c16/bang/providers/hpcloud/__init__.py#L136-L193
def authenticate(self): """ Authenticate against the HP Cloud Identity Service. This is the first step in any hpcloud.com session, although this method is automatically called when accessing higher-level methods/attributes. **Examples of Credentials Configuration** - Bare minimum for authentication using HP API keys: .. code-block:: yaml deployer_credentials: hpcloud: auth_url: https://region-a.geo-1.identity.hpcloudsvc.com:35357/v2.0/ tenant_name: farley.mowat-tenant1 access_key_id: MZOFIE9S83FOS248FIE3 secret_access_key: EU859vjksor73gkY378f9gkslbkrabcxwfyW2loo - With multiple *compute* availability zones activated, the region must also be specified (due to current limitations in the OpenStack client libraries): .. code-block:: yaml deployer_credentials: hpcloud: auth_url: https://region-a.geo-1.identity.hpcloudsvc.com:35357/v2.0/ tenant_name: farley.mowat-tenant1 access_key_id: MZOFIE9S83FOS248FIE3 secret_access_key: EU859vjksor73gkY378f9gkslbkrabcxwfyW2loo region_name: az-1.region-a.geo-1 - Using ``username`` and ``password`` is also allowed, but discouraged: .. code-block:: yaml deployer_credentials: hpcloud: auth_url: https://region-a.geo-1.identity.hpcloudsvc.com:35357/v2.0/ tenant_name: farley.mowat-tenant1 username: farley.mowat password: NeverCryW0lf When both API keys and ``username+password`` are specified, the API keys are used. """ log.info("Authenticating to HP Cloud...") creds = self.creds access_key_id = creds.get('access_key_id', '') secret_access_key = creds.get('secret_access_key', '') # prefer api key + secret key, but fallback to username + password if access_key_id and secret_access_key: self.nova_client.client.os_access_key_id = access_key_id self.nova_client.client.os_secret_key = secret_access_key self.nova_client.authenticate()
[ "def", "authenticate", "(", "self", ")", ":", "log", ".", "info", "(", "\"Authenticating to HP Cloud...\"", ")", "creds", "=", "self", ".", "creds", "access_key_id", "=", "creds", ".", "get", "(", "'access_key_id'", ",", "''", ")", "secret_access_key", "=", "creds", ".", "get", "(", "'secret_access_key'", ",", "''", ")", "# prefer api key + secret key, but fallback to username + password", "if", "access_key_id", "and", "secret_access_key", ":", "self", ".", "nova_client", ".", "client", ".", "os_access_key_id", "=", "access_key_id", "self", ".", "nova_client", ".", "client", ".", "os_secret_key", "=", "secret_access_key", "self", ".", "nova_client", ".", "authenticate", "(", ")" ]
Authenticate against the HP Cloud Identity Service. This is the first step in any hpcloud.com session, although this method is automatically called when accessing higher-level methods/attributes. **Examples of Credentials Configuration** - Bare minimum for authentication using HP API keys: .. code-block:: yaml deployer_credentials: hpcloud: auth_url: https://region-a.geo-1.identity.hpcloudsvc.com:35357/v2.0/ tenant_name: farley.mowat-tenant1 access_key_id: MZOFIE9S83FOS248FIE3 secret_access_key: EU859vjksor73gkY378f9gkslbkrabcxwfyW2loo - With multiple *compute* availability zones activated, the region must also be specified (due to current limitations in the OpenStack client libraries): .. code-block:: yaml deployer_credentials: hpcloud: auth_url: https://region-a.geo-1.identity.hpcloudsvc.com:35357/v2.0/ tenant_name: farley.mowat-tenant1 access_key_id: MZOFIE9S83FOS248FIE3 secret_access_key: EU859vjksor73gkY378f9gkslbkrabcxwfyW2loo region_name: az-1.region-a.geo-1 - Using ``username`` and ``password`` is also allowed, but discouraged: .. code-block:: yaml deployer_credentials: hpcloud: auth_url: https://region-a.geo-1.identity.hpcloudsvc.com:35357/v2.0/ tenant_name: farley.mowat-tenant1 username: farley.mowat password: NeverCryW0lf When both API keys and ``username+password`` are specified, the API keys are used.
[ "Authenticate", "against", "the", "HP", "Cloud", "Identity", "Service", ".", "This", "is", "the", "first", "step", "in", "any", "hpcloud", ".", "com", "session", "although", "this", "method", "is", "automatically", "called", "when", "accessing", "higher", "-", "level", "methods", "/", "attributes", "." ]
python
train
38.862069
tanghaibao/jcvi
jcvi/formats/contig.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/contig.py#L90-L146
def frombed(args): """ %prog frombed bedfile contigfasta readfasta Convert read placement to contig format. This is useful before running BAMBUS. """ from jcvi.formats.fasta import Fasta from jcvi.formats.bed import Bed from jcvi.utils.cbook import fill p = OptionParser(frombed.__doc__) opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) bedfile, contigfasta, readfasta = args prefix = bedfile.rsplit(".", 1)[0] contigfile = prefix + ".contig" idsfile = prefix + ".ids" contigfasta = Fasta(contigfasta) readfasta = Fasta(readfasta) bed = Bed(bedfile) checksum = "00000000 checksum." fw_ids = open(idsfile, "w") fw = open(contigfile, "w") for ctg, reads in bed.sub_beds(): ctgseq = contigfasta[ctg] ctgline = "##{0} {1} {2} bases, {3}".format(\ ctg, len(reads), len(ctgseq), checksum) print(ctg, file=fw_ids) print(ctgline, file=fw) print(fill(ctgseq.seq), file=fw) for b in reads: read = b.accn strand = b.strand readseq = readfasta[read] rc = " [RC]" if strand == "-" else "" readlen = len(readseq) rstart, rend = 1, readlen if strand == "-": rstart, rend = rend, rstart readrange = "{{{0} {1}}}".format(rstart, rend) conrange = "<{0} {1}>".format(b.start, b.end) readline = "#{0}(0){1} {2} bases, {3} {4} {5}".format(\ read, rc, readlen, checksum, readrange, conrange) print(readline, file=fw) print(fill(readseq.seq), file=fw) logging.debug("Mapped contigs written to `{0}`.".format(contigfile)) logging.debug("Contig IDs written to `{0}`.".format(idsfile))
[ "def", "frombed", "(", "args", ")", ":", "from", "jcvi", ".", "formats", ".", "fasta", "import", "Fasta", "from", "jcvi", ".", "formats", ".", "bed", "import", "Bed", "from", "jcvi", ".", "utils", ".", "cbook", "import", "fill", "p", "=", "OptionParser", "(", "frombed", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "3", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "bedfile", ",", "contigfasta", ",", "readfasta", "=", "args", "prefix", "=", "bedfile", ".", "rsplit", "(", "\".\"", ",", "1", ")", "[", "0", "]", "contigfile", "=", "prefix", "+", "\".contig\"", "idsfile", "=", "prefix", "+", "\".ids\"", "contigfasta", "=", "Fasta", "(", "contigfasta", ")", "readfasta", "=", "Fasta", "(", "readfasta", ")", "bed", "=", "Bed", "(", "bedfile", ")", "checksum", "=", "\"00000000 checksum.\"", "fw_ids", "=", "open", "(", "idsfile", ",", "\"w\"", ")", "fw", "=", "open", "(", "contigfile", ",", "\"w\"", ")", "for", "ctg", ",", "reads", "in", "bed", ".", "sub_beds", "(", ")", ":", "ctgseq", "=", "contigfasta", "[", "ctg", "]", "ctgline", "=", "\"##{0} {1} {2} bases, {3}\"", ".", "format", "(", "ctg", ",", "len", "(", "reads", ")", ",", "len", "(", "ctgseq", ")", ",", "checksum", ")", "print", "(", "ctg", ",", "file", "=", "fw_ids", ")", "print", "(", "ctgline", ",", "file", "=", "fw", ")", "print", "(", "fill", "(", "ctgseq", ".", "seq", ")", ",", "file", "=", "fw", ")", "for", "b", "in", "reads", ":", "read", "=", "b", ".", "accn", "strand", "=", "b", ".", "strand", "readseq", "=", "readfasta", "[", "read", "]", "rc", "=", "\" [RC]\"", "if", "strand", "==", "\"-\"", "else", "\"\"", "readlen", "=", "len", "(", "readseq", ")", "rstart", ",", "rend", "=", "1", ",", "readlen", "if", "strand", "==", "\"-\"", ":", "rstart", ",", "rend", "=", "rend", ",", "rstart", "readrange", "=", "\"{{{0} {1}}}\"", ".", "format", "(", "rstart", ",", "rend", ")", "conrange", "=", "\"<{0} {1}>\"", ".", "format", "(", "b", ".", "start", ",", "b", ".", "end", ")", "readline", "=", "\"#{0}(0){1} {2} bases, {3} {4} {5}\"", ".", "format", "(", "read", ",", "rc", ",", "readlen", ",", "checksum", ",", "readrange", ",", "conrange", ")", "print", "(", "readline", ",", "file", "=", "fw", ")", "print", "(", "fill", "(", "readseq", ".", "seq", ")", ",", "file", "=", "fw", ")", "logging", ".", "debug", "(", "\"Mapped contigs written to `{0}`.\"", ".", "format", "(", "contigfile", ")", ")", "logging", ".", "debug", "(", "\"Contig IDs written to `{0}`.\"", ".", "format", "(", "idsfile", ")", ")" ]
%prog frombed bedfile contigfasta readfasta Convert read placement to contig format. This is useful before running BAMBUS.
[ "%prog", "frombed", "bedfile", "contigfasta", "readfasta" ]
python
train
31.385965
ActivisionGameScience/assertpy
assertpy/assertpy.py
https://github.com/ActivisionGameScience/assertpy/blob/08d799cdb01f9a25d3e20672efac991c7bc26d79/assertpy/assertpy.py#L231-L244
def is_instance_of(self, some_class): """Asserts that val is an instance of the given class.""" try: if not isinstance(self.val, some_class): if hasattr(self.val, '__name__'): t = self.val.__name__ elif hasattr(self.val, '__class__'): t = self.val.__class__.__name__ else: t = 'unknown' self._err('Expected <%s:%s> to be instance of class <%s>, but was not.' % (self.val, t, some_class.__name__)) except TypeError: raise TypeError('given arg must be a class') return self
[ "def", "is_instance_of", "(", "self", ",", "some_class", ")", ":", "try", ":", "if", "not", "isinstance", "(", "self", ".", "val", ",", "some_class", ")", ":", "if", "hasattr", "(", "self", ".", "val", ",", "'__name__'", ")", ":", "t", "=", "self", ".", "val", ".", "__name__", "elif", "hasattr", "(", "self", ".", "val", ",", "'__class__'", ")", ":", "t", "=", "self", ".", "val", ".", "__class__", ".", "__name__", "else", ":", "t", "=", "'unknown'", "self", ".", "_err", "(", "'Expected <%s:%s> to be instance of class <%s>, but was not.'", "%", "(", "self", ".", "val", ",", "t", ",", "some_class", ".", "__name__", ")", ")", "except", "TypeError", ":", "raise", "TypeError", "(", "'given arg must be a class'", ")", "return", "self" ]
Asserts that val is an instance of the given class.
[ "Asserts", "that", "val", "is", "an", "instance", "of", "the", "given", "class", "." ]
python
valid
45.571429
StackStorm/pybind
pybind/slxos/v17r_1_01a/mpls_state/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17r_1_01a/mpls_state/__init__.py#L1150-L1173
def _set_autobw_threshold_table_summary(self, v, load=False): """ Setter method for autobw_threshold_table_summary, mapped from YANG variable /mpls_state/autobw_threshold_table_summary (container) If this variable is read-only (config: false) in the source YANG file, then _set_autobw_threshold_table_summary is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_autobw_threshold_table_summary() directly. YANG Description: MPLS Auto Bandwidth Threshold TableSummary """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=autobw_threshold_table_summary.autobw_threshold_table_summary, is_container='container', presence=False, yang_name="autobw-threshold-table-summary", rest_name="autobw-threshold-table-summary", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-autobw-threshold-table-summary', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """autobw_threshold_table_summary must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=autobw_threshold_table_summary.autobw_threshold_table_summary, is_container='container', presence=False, yang_name="autobw-threshold-table-summary", rest_name="autobw-threshold-table-summary", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-autobw-threshold-table-summary', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)""", }) self.__autobw_threshold_table_summary = t if hasattr(self, '_set'): self._set()
[ "def", "_set_autobw_threshold_table_summary", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "autobw_threshold_table_summary", ".", "autobw_threshold_table_summary", ",", "is_container", "=", "'container'", ",", "presence", "=", "False", ",", "yang_name", "=", "\"autobw-threshold-table-summary\"", ",", "rest_name", "=", "\"autobw-threshold-table-summary\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'callpoint'", ":", "u'mpls-autobw-threshold-table-summary'", ",", "u'cli-suppress-show-path'", ":", "None", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-mpls-operational'", ",", "defining_module", "=", "'brocade-mpls-operational'", ",", "yang_type", "=", "'container'", ",", "is_config", "=", "False", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"autobw_threshold_table_summary must be of a type compatible with container\"\"\"", ",", "'defined-type'", ":", "\"container\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=autobw_threshold_table_summary.autobw_threshold_table_summary, is_container='container', presence=False, yang_name=\"autobw-threshold-table-summary\", rest_name=\"autobw-threshold-table-summary\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-autobw-threshold-table-summary', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)\"\"\"", ",", "}", ")", "self", ".", "__autobw_threshold_table_summary", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for autobw_threshold_table_summary, mapped from YANG variable /mpls_state/autobw_threshold_table_summary (container) If this variable is read-only (config: false) in the source YANG file, then _set_autobw_threshold_table_summary is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_autobw_threshold_table_summary() directly. YANG Description: MPLS Auto Bandwidth Threshold TableSummary
[ "Setter", "method", "for", "autobw_threshold_table_summary", "mapped", "from", "YANG", "variable", "/", "mpls_state", "/", "autobw_threshold_table_summary", "(", "container", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_autobw_threshold_table_summary", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_autobw_threshold_table_summary", "()", "directly", "." ]
python
train
87.375
CityOfZion/neo-python
neo/Implementations/Notifications/LevelDB/NotificationDB.py
https://github.com/CityOfZion/neo-python/blob/fe90f62e123d720d4281c79af0598d9df9e776fb/neo/Implementations/Notifications/LevelDB/NotificationDB.py#L302-L320
def get_token(self, hash): """ Looks up a token by hash Args: hash (UInt160): The token to look up Returns: SmartContractEvent: A smart contract event with a contract that is an NEP5 Token """ tokens_snapshot = self.db.prefixed_db(NotificationPrefix.PREFIX_TOKEN).snapshot() try: val = tokens_snapshot.get(hash.ToBytes()) if val: event = SmartContractEvent.FromByteArray(val) return event except Exception as e: logger.error("Smart contract event with contract hash %s not found: %s " % (hash.ToString(), e)) return None
[ "def", "get_token", "(", "self", ",", "hash", ")", ":", "tokens_snapshot", "=", "self", ".", "db", ".", "prefixed_db", "(", "NotificationPrefix", ".", "PREFIX_TOKEN", ")", ".", "snapshot", "(", ")", "try", ":", "val", "=", "tokens_snapshot", ".", "get", "(", "hash", ".", "ToBytes", "(", ")", ")", "if", "val", ":", "event", "=", "SmartContractEvent", ".", "FromByteArray", "(", "val", ")", "return", "event", "except", "Exception", "as", "e", ":", "logger", ".", "error", "(", "\"Smart contract event with contract hash %s not found: %s \"", "%", "(", "hash", ".", "ToString", "(", ")", ",", "e", ")", ")", "return", "None" ]
Looks up a token by hash Args: hash (UInt160): The token to look up Returns: SmartContractEvent: A smart contract event with a contract that is an NEP5 Token
[ "Looks", "up", "a", "token", "by", "hash", "Args", ":", "hash", "(", "UInt160", ")", ":", "The", "token", "to", "look", "up" ]
python
train
35.157895
log2timeline/plaso
plaso/storage/sqlite/merge_reader.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/storage/sqlite/merge_reader.py#L99-L116
def _AddEvent(self, event): """Adds an event. Args: event (EventObject): event. """ if hasattr(event, 'event_data_row_identifier'): event_data_identifier = identifiers.SQLTableIdentifier( self._CONTAINER_TYPE_EVENT_DATA, event.event_data_row_identifier) lookup_key = event_data_identifier.CopyToString() event_data_identifier = self._event_data_identifier_mappings[lookup_key] event.SetEventDataIdentifier(event_data_identifier) # TODO: add event identifier mappings for event tags. self._storage_writer.AddEvent(event)
[ "def", "_AddEvent", "(", "self", ",", "event", ")", ":", "if", "hasattr", "(", "event", ",", "'event_data_row_identifier'", ")", ":", "event_data_identifier", "=", "identifiers", ".", "SQLTableIdentifier", "(", "self", ".", "_CONTAINER_TYPE_EVENT_DATA", ",", "event", ".", "event_data_row_identifier", ")", "lookup_key", "=", "event_data_identifier", ".", "CopyToString", "(", ")", "event_data_identifier", "=", "self", ".", "_event_data_identifier_mappings", "[", "lookup_key", "]", "event", ".", "SetEventDataIdentifier", "(", "event_data_identifier", ")", "# TODO: add event identifier mappings for event tags.", "self", ".", "_storage_writer", ".", "AddEvent", "(", "event", ")" ]
Adds an event. Args: event (EventObject): event.
[ "Adds", "an", "event", "." ]
python
train
32.222222
Gorialis/jishaku
jishaku/repl/compilation.py
https://github.com/Gorialis/jishaku/blob/fc7c479b9d510ede189a929c8aa6f7c8ef7f9a6e/jishaku/repl/compilation.py#L150-L164
async def traverse(self, func): """ Traverses an async function or generator, yielding each result. This function is private. The class should be used as an iterator instead of using this method. """ # this allows the reference to be stolen async_executor = self if inspect.isasyncgenfunction(func): async for result in func(*async_executor.args): yield result else: yield await func(*async_executor.args)
[ "async", "def", "traverse", "(", "self", ",", "func", ")", ":", "# this allows the reference to be stolen", "async_executor", "=", "self", "if", "inspect", ".", "isasyncgenfunction", "(", "func", ")", ":", "async", "for", "result", "in", "func", "(", "*", "async_executor", ".", "args", ")", ":", "yield", "result", "else", ":", "yield", "await", "func", "(", "*", "async_executor", ".", "args", ")" ]
Traverses an async function or generator, yielding each result. This function is private. The class should be used as an iterator instead of using this method.
[ "Traverses", "an", "async", "function", "or", "generator", "yielding", "each", "result", "." ]
python
train
33.2
bodylabs/lace
lace/texture.py
https://github.com/bodylabs/lace/blob/b68f4a60a4cac66c0607ffbae38ef9d07d37f459/lace/texture.py#L56-L69
def load_texture(self, texture_version): ''' Expect a texture version number as an integer, load the texture version from /is/ps/shared/data/body/template/texture_coordinates/. Currently there are versions [0, 1, 2, 3] availiable. ''' import numpy as np lowres_tex_template = 's3://bodylabs-korper-assets/is/ps/shared/data/body/template/texture_coordinates/textured_template_low_v%d.obj' % texture_version highres_tex_template = 's3://bodylabs-korper-assets/is/ps/shared/data/body/template/texture_coordinates/textured_template_high_v%d.obj' % texture_version from lace.mesh import Mesh from lace.cache import sc mesh_with_texture = Mesh(filename=sc(lowres_tex_template)) if not np.all(mesh_with_texture.f.shape == self.f.shape): mesh_with_texture = Mesh(filename=sc(highres_tex_template)) self.transfer_texture(mesh_with_texture)
[ "def", "load_texture", "(", "self", ",", "texture_version", ")", ":", "import", "numpy", "as", "np", "lowres_tex_template", "=", "'s3://bodylabs-korper-assets/is/ps/shared/data/body/template/texture_coordinates/textured_template_low_v%d.obj'", "%", "texture_version", "highres_tex_template", "=", "'s3://bodylabs-korper-assets/is/ps/shared/data/body/template/texture_coordinates/textured_template_high_v%d.obj'", "%", "texture_version", "from", "lace", ".", "mesh", "import", "Mesh", "from", "lace", ".", "cache", "import", "sc", "mesh_with_texture", "=", "Mesh", "(", "filename", "=", "sc", "(", "lowres_tex_template", ")", ")", "if", "not", "np", ".", "all", "(", "mesh_with_texture", ".", "f", ".", "shape", "==", "self", ".", "f", ".", "shape", ")", ":", "mesh_with_texture", "=", "Mesh", "(", "filename", "=", "sc", "(", "highres_tex_template", ")", ")", "self", ".", "transfer_texture", "(", "mesh_with_texture", ")" ]
Expect a texture version number as an integer, load the texture version from /is/ps/shared/data/body/template/texture_coordinates/. Currently there are versions [0, 1, 2, 3] availiable.
[ "Expect", "a", "texture", "version", "number", "as", "an", "integer", "load", "the", "texture", "version", "from", "/", "is", "/", "ps", "/", "shared", "/", "data", "/", "body", "/", "template", "/", "texture_coordinates", "/", ".", "Currently", "there", "are", "versions", "[", "0", "1", "2", "3", "]", "availiable", "." ]
python
train
66.071429
bioidiap/gridtk
gridtk/script/jman.py
https://github.com/bioidiap/gridtk/blob/9e3291b8b50388682908927231b2730db1da147d/gridtk/script/jman.py#L198-L201
def list(args): """Lists the jobs in the given database.""" jm = setup(args) jm.list(job_ids=get_ids(args.job_ids), print_array_jobs=args.print_array_jobs, print_dependencies=args.print_dependencies, status=args.status, long=args.long, print_times=args.print_times, ids_only=args.ids_only, names=args.names)
[ "def", "list", "(", "args", ")", ":", "jm", "=", "setup", "(", "args", ")", "jm", ".", "list", "(", "job_ids", "=", "get_ids", "(", "args", ".", "job_ids", ")", ",", "print_array_jobs", "=", "args", ".", "print_array_jobs", ",", "print_dependencies", "=", "args", ".", "print_dependencies", ",", "status", "=", "args", ".", "status", ",", "long", "=", "args", ".", "long", ",", "print_times", "=", "args", ".", "print_times", ",", "ids_only", "=", "args", ".", "ids_only", ",", "names", "=", "args", ".", "names", ")" ]
Lists the jobs in the given database.
[ "Lists", "the", "jobs", "in", "the", "given", "database", "." ]
python
train
77.5
wal-e/wal-e
wal_e/blobstore/file/file_util.py
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/blobstore/file/file_util.py#L36-L59
def do_lzop_get(creds, url, path, decrypt, do_retry): """ Get and decompress a URL This streams the content directly to lzop; the compressed version is never stored on disk. """ assert url.endswith('.lzo'), 'Expect an lzop-compressed file' with files.DeleteOnError(path) as decomp_out: key = _uri_to_key(creds, url) with get_download_pipeline(PIPE, decomp_out.f, decrypt) as pl: g = gevent.spawn(write_and_return_error, key, pl.stdin) exc = g.get() if exc is not None: raise exc logger.info( msg='completed download and decompression', detail='Downloaded and decompressed "{url}" to "{path}"' .format(url=url, path=path)) return True
[ "def", "do_lzop_get", "(", "creds", ",", "url", ",", "path", ",", "decrypt", ",", "do_retry", ")", ":", "assert", "url", ".", "endswith", "(", "'.lzo'", ")", ",", "'Expect an lzop-compressed file'", "with", "files", ".", "DeleteOnError", "(", "path", ")", "as", "decomp_out", ":", "key", "=", "_uri_to_key", "(", "creds", ",", "url", ")", "with", "get_download_pipeline", "(", "PIPE", ",", "decomp_out", ".", "f", ",", "decrypt", ")", "as", "pl", ":", "g", "=", "gevent", ".", "spawn", "(", "write_and_return_error", ",", "key", ",", "pl", ".", "stdin", ")", "exc", "=", "g", ".", "get", "(", ")", "if", "exc", "is", "not", "None", ":", "raise", "exc", "logger", ".", "info", "(", "msg", "=", "'completed download and decompression'", ",", "detail", "=", "'Downloaded and decompressed \"{url}\" to \"{path}\"'", ".", "format", "(", "url", "=", "url", ",", "path", "=", "path", ")", ")", "return", "True" ]
Get and decompress a URL This streams the content directly to lzop; the compressed version is never stored on disk.
[ "Get", "and", "decompress", "a", "URL" ]
python
train
31.625
estnltk/estnltk
estnltk/text.py
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L1296-L1315
def split_by(self, layer, sep=' '): """Split the text into multiple instances defined by elements of given layer. The spans for layer elements are extracted and feed to :py:meth:`~estnltk.text.Text.split_given_spans` method. Parameters ---------- layer: str String determining the layer that is used to define the start and end positions of resulting splits. sep: str (default: ' ') The separator to use to join texts of multilayer elements. Returns ------- list of Text """ if not self.is_tagged(layer): self.tag(layer) return self.split_given_spans(self.spans(layer), sep=sep)
[ "def", "split_by", "(", "self", ",", "layer", ",", "sep", "=", "' '", ")", ":", "if", "not", "self", ".", "is_tagged", "(", "layer", ")", ":", "self", ".", "tag", "(", "layer", ")", "return", "self", ".", "split_given_spans", "(", "self", ".", "spans", "(", "layer", ")", ",", "sep", "=", "sep", ")" ]
Split the text into multiple instances defined by elements of given layer. The spans for layer elements are extracted and feed to :py:meth:`~estnltk.text.Text.split_given_spans` method. Parameters ---------- layer: str String determining the layer that is used to define the start and end positions of resulting splits. sep: str (default: ' ') The separator to use to join texts of multilayer elements. Returns ------- list of Text
[ "Split", "the", "text", "into", "multiple", "instances", "defined", "by", "elements", "of", "given", "layer", "." ]
python
train
35.1
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/utils/path.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/utils/path.py#L322-L333
def locate_profile(profile='default'): """Find the path to the folder associated with a given profile. I.e. find $IPYTHONDIR/profile_whatever. """ from IPython.core.profiledir import ProfileDir, ProfileDirError try: pd = ProfileDir.find_profile_dir_by_name(get_ipython_dir(), profile) except ProfileDirError: # IOError makes more sense when people are expecting a path raise IOError("Couldn't find profile %r" % profile) return pd.location
[ "def", "locate_profile", "(", "profile", "=", "'default'", ")", ":", "from", "IPython", ".", "core", ".", "profiledir", "import", "ProfileDir", ",", "ProfileDirError", "try", ":", "pd", "=", "ProfileDir", ".", "find_profile_dir_by_name", "(", "get_ipython_dir", "(", ")", ",", "profile", ")", "except", "ProfileDirError", ":", "# IOError makes more sense when people are expecting a path", "raise", "IOError", "(", "\"Couldn't find profile %r\"", "%", "profile", ")", "return", "pd", ".", "location" ]
Find the path to the folder associated with a given profile. I.e. find $IPYTHONDIR/profile_whatever.
[ "Find", "the", "path", "to", "the", "folder", "associated", "with", "a", "given", "profile", ".", "I", ".", "e", ".", "find", "$IPYTHONDIR", "/", "profile_whatever", "." ]
python
test
40.416667
gabstopper/smc-python
smc/elements/helpers.py
https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/elements/helpers.py#L45-L59
def zone_helper(zone): """ Zone finder by name. If zone doesn't exist, create it and return the href :param str zone: name of zone (if href, will be returned as is) :return str href: href of zone """ if zone is None: return None elif isinstance(zone, Zone): return zone.href elif zone.startswith('http'): return zone return Zone.get_or_create(name=zone).href
[ "def", "zone_helper", "(", "zone", ")", ":", "if", "zone", "is", "None", ":", "return", "None", "elif", "isinstance", "(", "zone", ",", "Zone", ")", ":", "return", "zone", ".", "href", "elif", "zone", ".", "startswith", "(", "'http'", ")", ":", "return", "zone", "return", "Zone", ".", "get_or_create", "(", "name", "=", "zone", ")", ".", "href" ]
Zone finder by name. If zone doesn't exist, create it and return the href :param str zone: name of zone (if href, will be returned as is) :return str href: href of zone
[ "Zone", "finder", "by", "name", ".", "If", "zone", "doesn", "t", "exist", "create", "it", "and", "return", "the", "href" ]
python
train
27.266667
iotile/coretools
transport_plugins/native_ble/iotile_transport_native_ble/device_adapter.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/transport_plugins/native_ble/iotile_transport_native_ble/device_adapter.py#L218-L226
def stop_scan(self): """Stop to scan.""" try: self.bable.stop_scan(sync=True) except bable_interface.BaBLEException: # If we errored our it is because we were not currently scanning pass self.scanning = False
[ "def", "stop_scan", "(", "self", ")", ":", "try", ":", "self", ".", "bable", ".", "stop_scan", "(", "sync", "=", "True", ")", "except", "bable_interface", ".", "BaBLEException", ":", "# If we errored our it is because we were not currently scanning", "pass", "self", ".", "scanning", "=", "False" ]
Stop to scan.
[ "Stop", "to", "scan", "." ]
python
train
29.888889
minhhoit/yacms
yacms/utils/docs.py
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/utils/docs.py#L288-L302
def build_requirements(docs_path, package_name="yacms"): """ Updates the requirements file with yacms's version number. """ mezz_string = "yacms==" project_path = os.path.join(docs_path, "..") requirements_file = os.path.join(project_path, package_name, "project_template", "requirements.txt") with open(requirements_file, "r") as f: requirements = f.readlines() with open(requirements_file, "w") as f: f.write("yacms==%s\n" % __version__) for requirement in requirements: if requirement.strip() and not requirement.startswith(mezz_string): f.write(requirement)
[ "def", "build_requirements", "(", "docs_path", ",", "package_name", "=", "\"yacms\"", ")", ":", "mezz_string", "=", "\"yacms==\"", "project_path", "=", "os", ".", "path", ".", "join", "(", "docs_path", ",", "\"..\"", ")", "requirements_file", "=", "os", ".", "path", ".", "join", "(", "project_path", ",", "package_name", ",", "\"project_template\"", ",", "\"requirements.txt\"", ")", "with", "open", "(", "requirements_file", ",", "\"r\"", ")", "as", "f", ":", "requirements", "=", "f", ".", "readlines", "(", ")", "with", "open", "(", "requirements_file", ",", "\"w\"", ")", "as", "f", ":", "f", ".", "write", "(", "\"yacms==%s\\n\"", "%", "__version__", ")", "for", "requirement", "in", "requirements", ":", "if", "requirement", ".", "strip", "(", ")", "and", "not", "requirement", ".", "startswith", "(", "mezz_string", ")", ":", "f", ".", "write", "(", "requirement", ")" ]
Updates the requirements file with yacms's version number.
[ "Updates", "the", "requirements", "file", "with", "yacms", "s", "version", "number", "." ]
python
train
44.533333
HazyResearch/fonduer
src/fonduer/learning/disc_models/sparse_lstm.py
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/learning/disc_models/sparse_lstm.py#L25-L64
def forward(self, X): """Forward function. :param X: The input (batch) of the model contains word sequences for lstm, features and feature weights. :type X: For word sequences: a list of torch.Tensor pair (word sequence and word mask) of shape (batch_size, sequence_length). For features: torch.Tensor of shape (batch_size, sparse_feature_size). For feature weights: torch.Tensor of shape (batch_size, sparse_feature_size). :return: The output of LSTM layer. :rtype: torch.Tensor of shape (batch_size, num_classes) """ s = X[:-2] f = X[-2] w = X[-1] batch_size = len(f) # Generate lstm weight indices x_idx = self._cuda( torch.as_tensor(np.arange(1, self.settings["lstm_dim"] + 1)).repeat( batch_size, 1 ) ) outputs = self._cuda(torch.Tensor([])) # Calculate textual features from LSTMs for i in range(len(s)): state_word = self.lstms[0].init_hidden(batch_size) output = self.lstms[0].forward(s[i][0], s[i][1], state_word) outputs = torch.cat((outputs, output), 1) # Concatenate textual features with multi-modal features feaures = torch.cat((x_idx, f), 1) weights = torch.cat((outputs, w), 1) return self.sparse_linear(feaures, weights)
[ "def", "forward", "(", "self", ",", "X", ")", ":", "s", "=", "X", "[", ":", "-", "2", "]", "f", "=", "X", "[", "-", "2", "]", "w", "=", "X", "[", "-", "1", "]", "batch_size", "=", "len", "(", "f", ")", "# Generate lstm weight indices", "x_idx", "=", "self", ".", "_cuda", "(", "torch", ".", "as_tensor", "(", "np", ".", "arange", "(", "1", ",", "self", ".", "settings", "[", "\"lstm_dim\"", "]", "+", "1", ")", ")", ".", "repeat", "(", "batch_size", ",", "1", ")", ")", "outputs", "=", "self", ".", "_cuda", "(", "torch", ".", "Tensor", "(", "[", "]", ")", ")", "# Calculate textual features from LSTMs", "for", "i", "in", "range", "(", "len", "(", "s", ")", ")", ":", "state_word", "=", "self", ".", "lstms", "[", "0", "]", ".", "init_hidden", "(", "batch_size", ")", "output", "=", "self", ".", "lstms", "[", "0", "]", ".", "forward", "(", "s", "[", "i", "]", "[", "0", "]", ",", "s", "[", "i", "]", "[", "1", "]", ",", "state_word", ")", "outputs", "=", "torch", ".", "cat", "(", "(", "outputs", ",", "output", ")", ",", "1", ")", "# Concatenate textual features with multi-modal features", "feaures", "=", "torch", ".", "cat", "(", "(", "x_idx", ",", "f", ")", ",", "1", ")", "weights", "=", "torch", ".", "cat", "(", "(", "outputs", ",", "w", ")", ",", "1", ")", "return", "self", ".", "sparse_linear", "(", "feaures", ",", "weights", ")" ]
Forward function. :param X: The input (batch) of the model contains word sequences for lstm, features and feature weights. :type X: For word sequences: a list of torch.Tensor pair (word sequence and word mask) of shape (batch_size, sequence_length). For features: torch.Tensor of shape (batch_size, sparse_feature_size). For feature weights: torch.Tensor of shape (batch_size, sparse_feature_size). :return: The output of LSTM layer. :rtype: torch.Tensor of shape (batch_size, num_classes)
[ "Forward", "function", "." ]
python
train
35.05
UCBerkeleySETI/blimpy
blimpy/waterfall.py
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/waterfall.py#L195-L212
def info(self): """ Print header information and other derived information. """ print("\n--- File Info ---") for key, val in self.file_header.items(): if key == 'src_raj': val = val.to_string(unit=u.hour, sep=':') if key == 'src_dej': val = val.to_string(unit=u.deg, sep=':') print("%16s : %32s" % (key, val)) print("\n%16s : %32s" % ("Num ints in file", self.n_ints_in_file)) print("%16s : %32s" % ("File shape", self.file_shape)) print("--- Selection Info ---") print("%16s : %32s" % ("Data selection shape", self.selection_shape)) print("%16s : %32s" % ("Minimum freq (MHz)", self.container.f_start)) print("%16s : %32s" % ("Maximum freq (MHz)", self.container.f_stop))
[ "def", "info", "(", "self", ")", ":", "print", "(", "\"\\n--- File Info ---\"", ")", "for", "key", ",", "val", "in", "self", ".", "file_header", ".", "items", "(", ")", ":", "if", "key", "==", "'src_raj'", ":", "val", "=", "val", ".", "to_string", "(", "unit", "=", "u", ".", "hour", ",", "sep", "=", "':'", ")", "if", "key", "==", "'src_dej'", ":", "val", "=", "val", ".", "to_string", "(", "unit", "=", "u", ".", "deg", ",", "sep", "=", "':'", ")", "print", "(", "\"%16s : %32s\"", "%", "(", "key", ",", "val", ")", ")", "print", "(", "\"\\n%16s : %32s\"", "%", "(", "\"Num ints in file\"", ",", "self", ".", "n_ints_in_file", ")", ")", "print", "(", "\"%16s : %32s\"", "%", "(", "\"File shape\"", ",", "self", ".", "file_shape", ")", ")", "print", "(", "\"--- Selection Info ---\"", ")", "print", "(", "\"%16s : %32s\"", "%", "(", "\"Data selection shape\"", ",", "self", ".", "selection_shape", ")", ")", "print", "(", "\"%16s : %32s\"", "%", "(", "\"Minimum freq (MHz)\"", ",", "self", ".", "container", ".", "f_start", ")", ")", "print", "(", "\"%16s : %32s\"", "%", "(", "\"Maximum freq (MHz)\"", ",", "self", ".", "container", ".", "f_stop", ")", ")" ]
Print header information and other derived information.
[ "Print", "header", "information", "and", "other", "derived", "information", "." ]
python
test
44.333333
praekeltfoundation/seed-control-interface
ci/utils.py
https://github.com/praekeltfoundation/seed-control-interface/blob/32ddad88b5bc2f8f4d80b848361899da2e081636/ci/utils.py#L162-L185
def get_page_of_iterator(iterator, page_size, page_number): """ Get a page from an interator, handling invalid input from the page number by defaulting to the first page. """ try: page_number = validate_page_number(page_number) except (PageNotAnInteger, EmptyPage): page_number = 1 start = (page_number - 1) * page_size # End 1 more than we need, so that we can see if there's another page end = (page_number * page_size) + 1 skipped_items = list(islice(iterator, start)) items = list(islice(iterator, end)) if len(items) == 0 and page_number != 1: items = skipped_items page_number = 1 has_next = len(items) > page_size items = items[:page_size] return NoCountPage(items, page_number, page_size, has_next)
[ "def", "get_page_of_iterator", "(", "iterator", ",", "page_size", ",", "page_number", ")", ":", "try", ":", "page_number", "=", "validate_page_number", "(", "page_number", ")", "except", "(", "PageNotAnInteger", ",", "EmptyPage", ")", ":", "page_number", "=", "1", "start", "=", "(", "page_number", "-", "1", ")", "*", "page_size", "# End 1 more than we need, so that we can see if there's another page", "end", "=", "(", "page_number", "*", "page_size", ")", "+", "1", "skipped_items", "=", "list", "(", "islice", "(", "iterator", ",", "start", ")", ")", "items", "=", "list", "(", "islice", "(", "iterator", ",", "end", ")", ")", "if", "len", "(", "items", ")", "==", "0", "and", "page_number", "!=", "1", ":", "items", "=", "skipped_items", "page_number", "=", "1", "has_next", "=", "len", "(", "items", ")", ">", "page_size", "items", "=", "items", "[", ":", "page_size", "]", "return", "NoCountPage", "(", "items", ",", "page_number", ",", "page_size", ",", "has_next", ")" ]
Get a page from an interator, handling invalid input from the page number by defaulting to the first page.
[ "Get", "a", "page", "from", "an", "interator", "handling", "invalid", "input", "from", "the", "page", "number", "by", "defaulting", "to", "the", "first", "page", "." ]
python
test
32.458333
lucasmaystre/choix
choix/mm.py
https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/mm.py#L149-L183
def mm_top1( n_items, data, initial_params=None, alpha=0.0, max_iter=10000, tol=1e-8): """Compute the ML estimate of model parameters using the MM algorithm. This function computes the maximum-likelihood (ML) estimate of model parameters given top-1 data (see :ref:`data-top1`), using the minorization-maximization (MM) algorithm [Hun04]_, [CD12]_. If ``alpha > 0``, the function returns the maximum a-posteriori (MAP) estimate under a (peaked) Dirichlet prior. See :ref:`regularization` for details. Parameters ---------- n_items : int Number of distinct items. data : list of lists Top-1 data. initial_params : array_like, optional Parameters used to initialize the iterative procedure. alpha : float, optional Regularization parameter. max_iter : int, optional Maximum number of iterations allowed. tol : float, optional Maximum L1-norm of the difference between successive iterates to declare convergence. Returns ------- params : numpy.ndarray The ML estimate of model parameters. """ return _mm(n_items, data, initial_params, alpha, max_iter, tol, _mm_top1)
[ "def", "mm_top1", "(", "n_items", ",", "data", ",", "initial_params", "=", "None", ",", "alpha", "=", "0.0", ",", "max_iter", "=", "10000", ",", "tol", "=", "1e-8", ")", ":", "return", "_mm", "(", "n_items", ",", "data", ",", "initial_params", ",", "alpha", ",", "max_iter", ",", "tol", ",", "_mm_top1", ")" ]
Compute the ML estimate of model parameters using the MM algorithm. This function computes the maximum-likelihood (ML) estimate of model parameters given top-1 data (see :ref:`data-top1`), using the minorization-maximization (MM) algorithm [Hun04]_, [CD12]_. If ``alpha > 0``, the function returns the maximum a-posteriori (MAP) estimate under a (peaked) Dirichlet prior. See :ref:`regularization` for details. Parameters ---------- n_items : int Number of distinct items. data : list of lists Top-1 data. initial_params : array_like, optional Parameters used to initialize the iterative procedure. alpha : float, optional Regularization parameter. max_iter : int, optional Maximum number of iterations allowed. tol : float, optional Maximum L1-norm of the difference between successive iterates to declare convergence. Returns ------- params : numpy.ndarray The ML estimate of model parameters.
[ "Compute", "the", "ML", "estimate", "of", "model", "parameters", "using", "the", "MM", "algorithm", "." ]
python
train
34.085714
atlassian-api/atlassian-python-api
atlassian/jira.py
https://github.com/atlassian-api/atlassian-python-api/blob/540d269905c3e7547b666fe30c647b2d512cf358/atlassian/jira.py#L652-L669
def update_issue_remote_link_by_id(self, issue_key, link_id, url, title, global_id=None, relationship=None): """ Update existing Remote Link on Issue :param issue_key: str :param link_id: str :param url: str :param title: str :param global_id: str, OPTIONAL: :param relationship: str, Optional. Default by built-in method: 'Web Link' """ data = {'object': {'url': url, 'title': title}} if global_id: data['globalId'] = global_id if relationship: data['relationship'] = relationship url = 'rest/api/2/issue/{issue_key}/remotelink/{link_id}'.format(issue_key=issue_key, link_id=link_id) return self.put(url, data=data)
[ "def", "update_issue_remote_link_by_id", "(", "self", ",", "issue_key", ",", "link_id", ",", "url", ",", "title", ",", "global_id", "=", "None", ",", "relationship", "=", "None", ")", ":", "data", "=", "{", "'object'", ":", "{", "'url'", ":", "url", ",", "'title'", ":", "title", "}", "}", "if", "global_id", ":", "data", "[", "'globalId'", "]", "=", "global_id", "if", "relationship", ":", "data", "[", "'relationship'", "]", "=", "relationship", "url", "=", "'rest/api/2/issue/{issue_key}/remotelink/{link_id}'", ".", "format", "(", "issue_key", "=", "issue_key", ",", "link_id", "=", "link_id", ")", "return", "self", ".", "put", "(", "url", ",", "data", "=", "data", ")" ]
Update existing Remote Link on Issue :param issue_key: str :param link_id: str :param url: str :param title: str :param global_id: str, OPTIONAL: :param relationship: str, Optional. Default by built-in method: 'Web Link'
[ "Update", "existing", "Remote", "Link", "on", "Issue", ":", "param", "issue_key", ":", "str", ":", "param", "link_id", ":", "str", ":", "param", "url", ":", "str", ":", "param", "title", ":", "str", ":", "param", "global_id", ":", "str", "OPTIONAL", ":", ":", "param", "relationship", ":", "str", "Optional", ".", "Default", "by", "built", "-", "in", "method", ":", "Web", "Link" ]
python
train
41.333333
erdc/RAPIDpy
RAPIDpy/rapid.py
https://github.com/erdc/RAPIDpy/blob/50e14e130554b254a00ff23b226cd7e4c6cfe91a/RAPIDpy/rapid.py#L342-L427
def update_reach_number_data(self): """ Update the reach number data for the namelist based on input files. .. warning:: You need to make sure you set *rapid_connect_file* and *riv_bas_id_file* before running this function. Example: .. code:: python from RAPIDpy import RAPID rapid_manager = RAPID( rapid_connect_file='../rapid-io/input/rapid_connect.csv', riv_bas_id_file='../rapid-io/input/riv_bas_id.csv', ) rapid_manager.update_reach_number_data() Example with forcing data: .. code:: python from RAPIDpy import RAPID rapid_manager = RAPID( rapid_connect_file='../rapid-io/input/rapid_connect.csv', riv_bas_id_file='../rapid-io/input/riv_bas_id.csv', Qfor_file='../rapid-io/input/qfor_file.csv', for_tot_id_file='../rapid-io/input/for_tot_id_file.csv', for_use_id_file='../rapid-io/input/for_use_id_file.csv', ZS_dtF=3*60*60, BS_opt_for=True ) rapid_manager.update_reach_number_data() """ if not self.rapid_connect_file: log("Missing rapid_connect_file. " "Please set before running this function ...", "ERROR") if not self.riv_bas_id_file: log("Missing riv_bas_id_file. " "Please set before running this function ...", "ERROR") # get rapid connect info rapid_connect_table = np.loadtxt(self.rapid_connect_file, ndmin=2, delimiter=",", dtype=int) self.IS_riv_tot = int(rapid_connect_table.shape[0]) self.IS_max_up = int(rapid_connect_table[:, 2].max()) # get riv_bas_id info riv_bas_id_table = np.loadtxt(self.riv_bas_id_file, ndmin=1, delimiter=",", usecols=(0,), dtype=int) self.IS_riv_bas = int(riv_bas_id_table.size) # add the forcing files if not self.for_tot_id_file: self.IS_for_tot = 0 log("Missing for_tot_id_file. Skipping ...", "WARNING") else: # get riv_bas_id info for_tot_id_table = np.loadtxt(self.for_tot_id_file, ndmin=1, delimiter=",", usecols=(0,), dtype=int) self.IS_for_tot = int(for_tot_id_table.size) if not self.for_use_id_file: self.IS_for_use = 0 log("Missing for_use_id_file. Skipping ...", "WARNING") else: # get riv_bas_id info for_use_id_table = np.loadtxt(self.for_use_id_file, ndmin=1, delimiter=",", usecols=(0,), dtype=int) self.IS_for_use = int(for_use_id_table.size)
[ "def", "update_reach_number_data", "(", "self", ")", ":", "if", "not", "self", ".", "rapid_connect_file", ":", "log", "(", "\"Missing rapid_connect_file. \"", "\"Please set before running this function ...\"", ",", "\"ERROR\"", ")", "if", "not", "self", ".", "riv_bas_id_file", ":", "log", "(", "\"Missing riv_bas_id_file. \"", "\"Please set before running this function ...\"", ",", "\"ERROR\"", ")", "# get rapid connect info", "rapid_connect_table", "=", "np", ".", "loadtxt", "(", "self", ".", "rapid_connect_file", ",", "ndmin", "=", "2", ",", "delimiter", "=", "\",\"", ",", "dtype", "=", "int", ")", "self", ".", "IS_riv_tot", "=", "int", "(", "rapid_connect_table", ".", "shape", "[", "0", "]", ")", "self", ".", "IS_max_up", "=", "int", "(", "rapid_connect_table", "[", ":", ",", "2", "]", ".", "max", "(", ")", ")", "# get riv_bas_id info", "riv_bas_id_table", "=", "np", ".", "loadtxt", "(", "self", ".", "riv_bas_id_file", ",", "ndmin", "=", "1", ",", "delimiter", "=", "\",\"", ",", "usecols", "=", "(", "0", ",", ")", ",", "dtype", "=", "int", ")", "self", ".", "IS_riv_bas", "=", "int", "(", "riv_bas_id_table", ".", "size", ")", "# add the forcing files", "if", "not", "self", ".", "for_tot_id_file", ":", "self", ".", "IS_for_tot", "=", "0", "log", "(", "\"Missing for_tot_id_file. Skipping ...\"", ",", "\"WARNING\"", ")", "else", ":", "# get riv_bas_id info", "for_tot_id_table", "=", "np", ".", "loadtxt", "(", "self", ".", "for_tot_id_file", ",", "ndmin", "=", "1", ",", "delimiter", "=", "\",\"", ",", "usecols", "=", "(", "0", ",", ")", ",", "dtype", "=", "int", ")", "self", ".", "IS_for_tot", "=", "int", "(", "for_tot_id_table", ".", "size", ")", "if", "not", "self", ".", "for_use_id_file", ":", "self", ".", "IS_for_use", "=", "0", "log", "(", "\"Missing for_use_id_file. Skipping ...\"", ",", "\"WARNING\"", ")", "else", ":", "# get riv_bas_id info", "for_use_id_table", "=", "np", ".", "loadtxt", "(", "self", ".", "for_use_id_file", ",", "ndmin", "=", "1", ",", "delimiter", "=", "\",\"", ",", "usecols", "=", "(", "0", ",", ")", ",", "dtype", "=", "int", ")", "self", ".", "IS_for_use", "=", "int", "(", "for_use_id_table", ".", "size", ")" ]
Update the reach number data for the namelist based on input files. .. warning:: You need to make sure you set *rapid_connect_file* and *riv_bas_id_file* before running this function. Example: .. code:: python from RAPIDpy import RAPID rapid_manager = RAPID( rapid_connect_file='../rapid-io/input/rapid_connect.csv', riv_bas_id_file='../rapid-io/input/riv_bas_id.csv', ) rapid_manager.update_reach_number_data() Example with forcing data: .. code:: python from RAPIDpy import RAPID rapid_manager = RAPID( rapid_connect_file='../rapid-io/input/rapid_connect.csv', riv_bas_id_file='../rapid-io/input/riv_bas_id.csv', Qfor_file='../rapid-io/input/qfor_file.csv', for_tot_id_file='../rapid-io/input/for_tot_id_file.csv', for_use_id_file='../rapid-io/input/for_use_id_file.csv', ZS_dtF=3*60*60, BS_opt_for=True ) rapid_manager.update_reach_number_data()
[ "Update", "the", "reach", "number", "data", "for", "the", "namelist", "based", "on", "input", "files", "." ]
python
train
34.918605
ellisonleao/pyshorteners
pyshorteners/shorteners/adfly.py
https://github.com/ellisonleao/pyshorteners/blob/116155751c943f8d875c819d5a41db10515db18d/pyshorteners/shorteners/adfly.py#L79-L118
def expand(self, url): """Expand implementation for Adf.ly Args: url: the URL you want to expand Returns: A string containing the expanded URL Raises: BadAPIResponseException: If the data is malformed or we got a bad status code on API response ShorteningErrorException: If the API Returns an error as response """ url = self.clean_url(url) expand_url = f'{self.api_url}v1/expand' payload = { 'domain': getattr(self, 'domain', 'adf.ly'), 'advert_type': getattr(self, 'type', 'int'), 'group_id': getattr(self, 'group_id', None), 'key': self.api_key, 'user_id': self.user_id, 'url': url, } response = self._post(expand_url, data=payload) if not response.ok: raise BadAPIResponseException(response.content) try: data = response.json() except json.decoder.JSONDecodeError: raise BadAPIResponseException('API response could not be decoded') if data.get('errors'): errors = ','.join(i['msg'] for i in data['errors']) raise ShorteningErrorException(errors) if not data.get('data'): raise BadAPIResponseException(response.content) return data['data'][0]['url']
[ "def", "expand", "(", "self", ",", "url", ")", ":", "url", "=", "self", ".", "clean_url", "(", "url", ")", "expand_url", "=", "f'{self.api_url}v1/expand'", "payload", "=", "{", "'domain'", ":", "getattr", "(", "self", ",", "'domain'", ",", "'adf.ly'", ")", ",", "'advert_type'", ":", "getattr", "(", "self", ",", "'type'", ",", "'int'", ")", ",", "'group_id'", ":", "getattr", "(", "self", ",", "'group_id'", ",", "None", ")", ",", "'key'", ":", "self", ".", "api_key", ",", "'user_id'", ":", "self", ".", "user_id", ",", "'url'", ":", "url", ",", "}", "response", "=", "self", ".", "_post", "(", "expand_url", ",", "data", "=", "payload", ")", "if", "not", "response", ".", "ok", ":", "raise", "BadAPIResponseException", "(", "response", ".", "content", ")", "try", ":", "data", "=", "response", ".", "json", "(", ")", "except", "json", ".", "decoder", ".", "JSONDecodeError", ":", "raise", "BadAPIResponseException", "(", "'API response could not be decoded'", ")", "if", "data", ".", "get", "(", "'errors'", ")", ":", "errors", "=", "','", ".", "join", "(", "i", "[", "'msg'", "]", "for", "i", "in", "data", "[", "'errors'", "]", ")", "raise", "ShorteningErrorException", "(", "errors", ")", "if", "not", "data", ".", "get", "(", "'data'", ")", ":", "raise", "BadAPIResponseException", "(", "response", ".", "content", ")", "return", "data", "[", "'data'", "]", "[", "0", "]", "[", "'url'", "]" ]
Expand implementation for Adf.ly Args: url: the URL you want to expand Returns: A string containing the expanded URL Raises: BadAPIResponseException: If the data is malformed or we got a bad status code on API response ShorteningErrorException: If the API Returns an error as response
[ "Expand", "implementation", "for", "Adf", ".", "ly", "Args", ":", "url", ":", "the", "URL", "you", "want", "to", "expand" ]
python
train
33.775
JelteF/PyLaTeX
pylatex/tikz.py
https://github.com/JelteF/PyLaTeX/blob/62d9d9912ce8445e6629cdbcb80ad86143a1ed23/pylatex/tikz.py#L249-L263
def get_anchor_point(self, anchor_name): """Return an anchor point of the node, if it exists.""" if anchor_name in self._possible_anchors: return TikZNodeAnchor(self.handle, anchor_name) else: try: anchor = int(anchor_name.split('_')[1]) except: anchor = None if anchor is not None: return TikZNodeAnchor(self.handle, str(anchor)) raise ValueError('Invalid anchor name: "{}"'.format(anchor_name))
[ "def", "get_anchor_point", "(", "self", ",", "anchor_name", ")", ":", "if", "anchor_name", "in", "self", ".", "_possible_anchors", ":", "return", "TikZNodeAnchor", "(", "self", ".", "handle", ",", "anchor_name", ")", "else", ":", "try", ":", "anchor", "=", "int", "(", "anchor_name", ".", "split", "(", "'_'", ")", "[", "1", "]", ")", "except", ":", "anchor", "=", "None", "if", "anchor", "is", "not", "None", ":", "return", "TikZNodeAnchor", "(", "self", ".", "handle", ",", "str", "(", "anchor", ")", ")", "raise", "ValueError", "(", "'Invalid anchor name: \"{}\"'", ".", "format", "(", "anchor_name", ")", ")" ]
Return an anchor point of the node, if it exists.
[ "Return", "an", "anchor", "point", "of", "the", "node", "if", "it", "exists", "." ]
python
train
34.2
markovmodel/msmtools
msmtools/analysis/api.py
https://github.com/markovmodel/msmtools/blob/54dc76dd2113a0e8f3d15d5316abab41402941be/msmtools/analysis/api.py#L1259-L1350
def correlation(T, obs1, obs2=None, times=(1), maxtime=None, k=None, ncv=None, return_times=False): r"""Time-correlation for equilibrium experiment. Parameters ---------- T : (M, M) ndarray or scipy.sparse matrix Transition matrix obs1 : (M,) ndarray Observable, represented as vector on state space obs2 : (M,) ndarray (optional) Second observable, for cross-correlations times : array-like of int (optional), default=(1) List of times (in tau) at which to compute correlation maxtime : int, optional, default=None Maximum time step to use. Equivalent to . Alternative to times. k : int (optional) Number of eigenvalues and eigenvectors to use for computation ncv : int (optional) The number of Lanczos vectors generated, `ncv` must be greater than k; it is recommended that ncv > 2*k Returns ------- correlations : ndarray Correlation values at given times times : ndarray, optional time points at which the correlation was computed (if return_times=True) References ---------- .. [1] Noe, F, S Doose, I Daidone, M Loellmann, M Sauer, J D Chodera and J Smith. 2010. Dynamical fingerprints for probing individual relaxation processes in biomolecular dynamics with simulations and kinetic experiments. PNAS 108 (12): 4822-4827. Notes ----- **Auto-correlation** The auto-correlation of an observable :math:`a(x)` for a system in equilibrium is .. math:: \mathbb{E}_{\mu}[a(x,0)a(x,t)]=\sum_x \mu(x) a(x, 0) a(x, t) :math:`a(x,0)=a(x)` is the observable at time :math:`t=0`. It can be propagated forward in time using the t-step transition matrix :math:`p^{t}(x, y)`. The propagated observable at time :math:`t` is :math:`a(x, t)=\sum_y p^t(x, y)a(y, 0)`. Using the eigenvlaues and eigenvectors of the transition matrix the autocorrelation can be written as .. math:: \mathbb{E}_{\mu}[a(x,0)a(x,t)]=\sum_i \lambda_i^t \langle a, r_i\rangle_{\mu} \langle l_i, a \rangle. **Cross-correlation** The cross-correlation of two observables :math:`a(x)`, :math:`b(x)` is similarly given .. math:: \mathbb{E}_{\mu}[a(x,0)b(x,t)]=\sum_x \mu(x) a(x, 0) b(x, t) Examples -------- >>> import numpy as np >>> from msmtools.analysis import correlation >>> T = np.array([[0.9, 0.1, 0.0], [0.5, 0.0, 0.5], [0.0, 0.1, 0.9]]) >>> a = np.array([1.0, 0.0, 0.0]) >>> times = np.array([1, 5, 10, 20]) >>> corr = correlation(T, a, times=times) >>> corr array([ 0.40909091, 0.34081364, 0.28585667, 0.23424263]) """ # check if square matrix and remember size T = _types.ensure_ndarray_or_sparse(T, ndim=2, uniform=True, kind='numeric') n = T.shape[0] obs1 = _types.ensure_ndarray(obs1, ndim=1, size=n, kind='numeric') obs2 = _types.ensure_ndarray_or_None(obs2, ndim=1, size=n, kind='numeric') times = _types.ensure_int_vector(times, require_order=True) # check input # go if _issparse(T): return sparse.fingerprints.correlation(T, obs1, obs2=obs2, times=times, k=k, ncv=ncv) else: return dense.fingerprints.correlation(T, obs1, obs2=obs2, times=times, k=k)
[ "def", "correlation", "(", "T", ",", "obs1", ",", "obs2", "=", "None", ",", "times", "=", "(", "1", ")", ",", "maxtime", "=", "None", ",", "k", "=", "None", ",", "ncv", "=", "None", ",", "return_times", "=", "False", ")", ":", "# check if square matrix and remember size", "T", "=", "_types", ".", "ensure_ndarray_or_sparse", "(", "T", ",", "ndim", "=", "2", ",", "uniform", "=", "True", ",", "kind", "=", "'numeric'", ")", "n", "=", "T", ".", "shape", "[", "0", "]", "obs1", "=", "_types", ".", "ensure_ndarray", "(", "obs1", ",", "ndim", "=", "1", ",", "size", "=", "n", ",", "kind", "=", "'numeric'", ")", "obs2", "=", "_types", ".", "ensure_ndarray_or_None", "(", "obs2", ",", "ndim", "=", "1", ",", "size", "=", "n", ",", "kind", "=", "'numeric'", ")", "times", "=", "_types", ".", "ensure_int_vector", "(", "times", ",", "require_order", "=", "True", ")", "# check input", "# go", "if", "_issparse", "(", "T", ")", ":", "return", "sparse", ".", "fingerprints", ".", "correlation", "(", "T", ",", "obs1", ",", "obs2", "=", "obs2", ",", "times", "=", "times", ",", "k", "=", "k", ",", "ncv", "=", "ncv", ")", "else", ":", "return", "dense", ".", "fingerprints", ".", "correlation", "(", "T", ",", "obs1", ",", "obs2", "=", "obs2", ",", "times", "=", "times", ",", "k", "=", "k", ")" ]
r"""Time-correlation for equilibrium experiment. Parameters ---------- T : (M, M) ndarray or scipy.sparse matrix Transition matrix obs1 : (M,) ndarray Observable, represented as vector on state space obs2 : (M,) ndarray (optional) Second observable, for cross-correlations times : array-like of int (optional), default=(1) List of times (in tau) at which to compute correlation maxtime : int, optional, default=None Maximum time step to use. Equivalent to . Alternative to times. k : int (optional) Number of eigenvalues and eigenvectors to use for computation ncv : int (optional) The number of Lanczos vectors generated, `ncv` must be greater than k; it is recommended that ncv > 2*k Returns ------- correlations : ndarray Correlation values at given times times : ndarray, optional time points at which the correlation was computed (if return_times=True) References ---------- .. [1] Noe, F, S Doose, I Daidone, M Loellmann, M Sauer, J D Chodera and J Smith. 2010. Dynamical fingerprints for probing individual relaxation processes in biomolecular dynamics with simulations and kinetic experiments. PNAS 108 (12): 4822-4827. Notes ----- **Auto-correlation** The auto-correlation of an observable :math:`a(x)` for a system in equilibrium is .. math:: \mathbb{E}_{\mu}[a(x,0)a(x,t)]=\sum_x \mu(x) a(x, 0) a(x, t) :math:`a(x,0)=a(x)` is the observable at time :math:`t=0`. It can be propagated forward in time using the t-step transition matrix :math:`p^{t}(x, y)`. The propagated observable at time :math:`t` is :math:`a(x, t)=\sum_y p^t(x, y)a(y, 0)`. Using the eigenvlaues and eigenvectors of the transition matrix the autocorrelation can be written as .. math:: \mathbb{E}_{\mu}[a(x,0)a(x,t)]=\sum_i \lambda_i^t \langle a, r_i\rangle_{\mu} \langle l_i, a \rangle. **Cross-correlation** The cross-correlation of two observables :math:`a(x)`, :math:`b(x)` is similarly given .. math:: \mathbb{E}_{\mu}[a(x,0)b(x,t)]=\sum_x \mu(x) a(x, 0) b(x, t) Examples -------- >>> import numpy as np >>> from msmtools.analysis import correlation >>> T = np.array([[0.9, 0.1, 0.0], [0.5, 0.0, 0.5], [0.0, 0.1, 0.9]]) >>> a = np.array([1.0, 0.0, 0.0]) >>> times = np.array([1, 5, 10, 20]) >>> corr = correlation(T, a, times=times) >>> corr array([ 0.40909091, 0.34081364, 0.28585667, 0.23424263])
[ "r", "Time", "-", "correlation", "for", "equilibrium", "experiment", "." ]
python
train
34.836957
PMEAL/OpenPNM
openpnm/algorithms/InvasionPercolation.py
https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/algorithms/InvasionPercolation.py#L103-L143
def setup(self, phase, entry_pressure='', pore_volume='', throat_volume=''): r""" Set up the required parameters for the algorithm Parameters ---------- phase : OpenPNM Phase object The phase to be injected into the Network. The Phase must have the capillary entry pressure values for the system. entry_pressure : string The dictionary key to the capillary entry pressure. If none is supplied then the current value is retained. The default is 'throat.capillary_pressure'. pore_volume : string The dictionary key to the pore volume. If none is supplied then the current value is retained. The default is 'pore.volume'. throat_volume : string The dictionary key to the throat volume. If none is supplied then the current value is retained. The default is 'throat.volume'. """ self.settings['phase'] = phase.name if pore_volume: self.settings['pore_volume'] = pore_volume if throat_volume: self.settings['throat_volume'] = throat_volume if entry_pressure: self.settings['entry_pressure'] = entry_pressure # Setup arrays and info self['throat.entry_pressure'] = phase[self.settings['entry_pressure']] # Indices into t_entry giving a sorted list self['throat.sorted'] = sp.argsort(self['throat.entry_pressure'], axis=0) self['throat.order'] = 0 self['throat.order'][self['throat.sorted']] = sp.arange(0, self.Nt) self['throat.invasion_sequence'] = -1 self['pore.invasion_sequence'] = -1 self._tcount = 0
[ "def", "setup", "(", "self", ",", "phase", ",", "entry_pressure", "=", "''", ",", "pore_volume", "=", "''", ",", "throat_volume", "=", "''", ")", ":", "self", ".", "settings", "[", "'phase'", "]", "=", "phase", ".", "name", "if", "pore_volume", ":", "self", ".", "settings", "[", "'pore_volume'", "]", "=", "pore_volume", "if", "throat_volume", ":", "self", ".", "settings", "[", "'throat_volume'", "]", "=", "throat_volume", "if", "entry_pressure", ":", "self", ".", "settings", "[", "'entry_pressure'", "]", "=", "entry_pressure", "# Setup arrays and info", "self", "[", "'throat.entry_pressure'", "]", "=", "phase", "[", "self", ".", "settings", "[", "'entry_pressure'", "]", "]", "# Indices into t_entry giving a sorted list", "self", "[", "'throat.sorted'", "]", "=", "sp", ".", "argsort", "(", "self", "[", "'throat.entry_pressure'", "]", ",", "axis", "=", "0", ")", "self", "[", "'throat.order'", "]", "=", "0", "self", "[", "'throat.order'", "]", "[", "self", "[", "'throat.sorted'", "]", "]", "=", "sp", ".", "arange", "(", "0", ",", "self", ".", "Nt", ")", "self", "[", "'throat.invasion_sequence'", "]", "=", "-", "1", "self", "[", "'pore.invasion_sequence'", "]", "=", "-", "1", "self", ".", "_tcount", "=", "0" ]
r""" Set up the required parameters for the algorithm Parameters ---------- phase : OpenPNM Phase object The phase to be injected into the Network. The Phase must have the capillary entry pressure values for the system. entry_pressure : string The dictionary key to the capillary entry pressure. If none is supplied then the current value is retained. The default is 'throat.capillary_pressure'. pore_volume : string The dictionary key to the pore volume. If none is supplied then the current value is retained. The default is 'pore.volume'. throat_volume : string The dictionary key to the throat volume. If none is supplied then the current value is retained. The default is 'throat.volume'.
[ "r", "Set", "up", "the", "required", "parameters", "for", "the", "algorithm" ]
python
train
41.195122
JasonKessler/scattertext
scattertext/TermDocMatrixWithoutCategories.py
https://github.com/JasonKessler/scattertext/blob/cacf1f687d218ee8cae3fc05cc901db824bb1b81/scattertext/TermDocMatrixWithoutCategories.py#L471-L488
def add_metadata(self, metadata_matrix, meta_index_store): ''' Returns a new corpus with a the metadata matrix and index store integrated. :param metadata_matrix: scipy.sparse matrix (# docs, # metadata) :param meta_index_store: IndexStore of metadata values :return: TermDocMatrixWithoutCategories ''' assert isinstance(meta_index_store, IndexStore) assert len(metadata_matrix.shape) == 2 assert metadata_matrix.shape[0] == self.get_num_docs() return self._make_new_term_doc_matrix(new_X=self._X, new_y=None, new_category_idx_store=None, new_y_mask=np.ones(self.get_num_docs()).astype(bool), new_mX=metadata_matrix, new_term_idx_store=self._term_idx_store, new_metadata_idx_store=meta_index_store)
[ "def", "add_metadata", "(", "self", ",", "metadata_matrix", ",", "meta_index_store", ")", ":", "assert", "isinstance", "(", "meta_index_store", ",", "IndexStore", ")", "assert", "len", "(", "metadata_matrix", ".", "shape", ")", "==", "2", "assert", "metadata_matrix", ".", "shape", "[", "0", "]", "==", "self", ".", "get_num_docs", "(", ")", "return", "self", ".", "_make_new_term_doc_matrix", "(", "new_X", "=", "self", ".", "_X", ",", "new_y", "=", "None", ",", "new_category_idx_store", "=", "None", ",", "new_y_mask", "=", "np", ".", "ones", "(", "self", ".", "get_num_docs", "(", ")", ")", ".", "astype", "(", "bool", ")", ",", "new_mX", "=", "metadata_matrix", ",", "new_term_idx_store", "=", "self", ".", "_term_idx_store", ",", "new_metadata_idx_store", "=", "meta_index_store", ")" ]
Returns a new corpus with a the metadata matrix and index store integrated. :param metadata_matrix: scipy.sparse matrix (# docs, # metadata) :param meta_index_store: IndexStore of metadata values :return: TermDocMatrixWithoutCategories
[ "Returns", "a", "new", "corpus", "with", "a", "the", "metadata", "matrix", "and", "index", "store", "integrated", "." ]
python
train
57.666667
mikeboers/Flask-ACL
flask_acl/extension.py
https://github.com/mikeboers/Flask-ACL/blob/7339b89f96ad8686d1526e25c138244ad912e12d/flask_acl/extension.py#L101-L114
def permission_set(self, name, func=None): """Define a new permission set (directly, or as a decorator). E.g.:: @authz.permission_set('HTTP') def is_http_perm(perm): return perm.startswith('http.') """ if func is None: return functools.partial(self.predicate, name) self.permission_sets[name] = func return func
[ "def", "permission_set", "(", "self", ",", "name", ",", "func", "=", "None", ")", ":", "if", "func", "is", "None", ":", "return", "functools", ".", "partial", "(", "self", ".", "predicate", ",", "name", ")", "self", ".", "permission_sets", "[", "name", "]", "=", "func", "return", "func" ]
Define a new permission set (directly, or as a decorator). E.g.:: @authz.permission_set('HTTP') def is_http_perm(perm): return perm.startswith('http.')
[ "Define", "a", "new", "permission", "set", "(", "directly", "or", "as", "a", "decorator", ")", "." ]
python
train
28.642857
Othernet-Project/ndb-utils
ndb_utils/models.py
https://github.com/Othernet-Project/ndb-utils/blob/7804a5e305a4ed280742e22dad1dd10756cbe695/ndb_utils/models.py#L130-L144
def clean(self): """ Cleans the data and throws ValidationError on failure """ errors = {} cleaned = {} for name, validator in self.validate_schema.items(): val = getattr(self, name, None) try: cleaned[name] = validator.to_python(val) except formencode.api.Invalid, err: errors[name] = err if errors: raise ValidationError('Invalid data', errors) return cleaned
[ "def", "clean", "(", "self", ")", ":", "errors", "=", "{", "}", "cleaned", "=", "{", "}", "for", "name", ",", "validator", "in", "self", ".", "validate_schema", ".", "items", "(", ")", ":", "val", "=", "getattr", "(", "self", ",", "name", ",", "None", ")", "try", ":", "cleaned", "[", "name", "]", "=", "validator", ".", "to_python", "(", "val", ")", "except", "formencode", ".", "api", ".", "Invalid", ",", "err", ":", "errors", "[", "name", "]", "=", "err", "if", "errors", ":", "raise", "ValidationError", "(", "'Invalid data'", ",", "errors", ")", "return", "cleaned" ]
Cleans the data and throws ValidationError on failure
[ "Cleans", "the", "data", "and", "throws", "ValidationError", "on", "failure" ]
python
train
31.8
senaite/senaite.core
bika/lims/browser/widgets/referenceresultswidget.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/widgets/referenceresultswidget.py#L133-L180
def folderitem(self, obj, item, index): """Service triggered each time an item is iterated in folderitems. The use of this service prevents the extra-loops in child objects. :obj: the instance of the class to be foldered :item: dict containing the properties of the object to be used by the template :index: current index of the item """ # ensure we have an object and not a brain obj = api.get_object(obj) uid = api.get_uid(obj) url = api.get_url(obj) title = api.get_title(obj) # get the category if self.show_categories_enabled(): category = obj.getCategoryTitle() if category not in self.categories: self.categories.append(category) item["category"] = category rr = self.referenceresults.get(uid, {}) item["Title"] = title item["replace"]["Title"] = get_link(url, value=title) item["allow_edit"] = self.get_editable_columns() item["required"] = self.get_required_columns() item["selected"] = rr and True or False item["result"] = rr.get("result", "") item["min"] = rr.get("min", "") item["max"] = rr.get("max", "") # Icons after_icons = "" if obj.getAccredited(): after_icons += get_image( "accredited.png", title=_("Accredited")) if obj.getAttachmentOption() == "r": after_icons += get_image( "attach_reqd.png", title=_("Attachment required")) if obj.getAttachmentOption() == "n": after_icons += get_image( "attach_no.png", title=_("Attachment not permitted")) if after_icons: item["after"]["Title"] = after_icons return item
[ "def", "folderitem", "(", "self", ",", "obj", ",", "item", ",", "index", ")", ":", "# ensure we have an object and not a brain", "obj", "=", "api", ".", "get_object", "(", "obj", ")", "uid", "=", "api", ".", "get_uid", "(", "obj", ")", "url", "=", "api", ".", "get_url", "(", "obj", ")", "title", "=", "api", ".", "get_title", "(", "obj", ")", "# get the category", "if", "self", ".", "show_categories_enabled", "(", ")", ":", "category", "=", "obj", ".", "getCategoryTitle", "(", ")", "if", "category", "not", "in", "self", ".", "categories", ":", "self", ".", "categories", ".", "append", "(", "category", ")", "item", "[", "\"category\"", "]", "=", "category", "rr", "=", "self", ".", "referenceresults", ".", "get", "(", "uid", ",", "{", "}", ")", "item", "[", "\"Title\"", "]", "=", "title", "item", "[", "\"replace\"", "]", "[", "\"Title\"", "]", "=", "get_link", "(", "url", ",", "value", "=", "title", ")", "item", "[", "\"allow_edit\"", "]", "=", "self", ".", "get_editable_columns", "(", ")", "item", "[", "\"required\"", "]", "=", "self", ".", "get_required_columns", "(", ")", "item", "[", "\"selected\"", "]", "=", "rr", "and", "True", "or", "False", "item", "[", "\"result\"", "]", "=", "rr", ".", "get", "(", "\"result\"", ",", "\"\"", ")", "item", "[", "\"min\"", "]", "=", "rr", ".", "get", "(", "\"min\"", ",", "\"\"", ")", "item", "[", "\"max\"", "]", "=", "rr", ".", "get", "(", "\"max\"", ",", "\"\"", ")", "# Icons", "after_icons", "=", "\"\"", "if", "obj", ".", "getAccredited", "(", ")", ":", "after_icons", "+=", "get_image", "(", "\"accredited.png\"", ",", "title", "=", "_", "(", "\"Accredited\"", ")", ")", "if", "obj", ".", "getAttachmentOption", "(", ")", "==", "\"r\"", ":", "after_icons", "+=", "get_image", "(", "\"attach_reqd.png\"", ",", "title", "=", "_", "(", "\"Attachment required\"", ")", ")", "if", "obj", ".", "getAttachmentOption", "(", ")", "==", "\"n\"", ":", "after_icons", "+=", "get_image", "(", "\"attach_no.png\"", ",", "title", "=", "_", "(", "\"Attachment not permitted\"", ")", ")", "if", "after_icons", ":", "item", "[", "\"after\"", "]", "[", "\"Title\"", "]", "=", "after_icons", "return", "item" ]
Service triggered each time an item is iterated in folderitems. The use of this service prevents the extra-loops in child objects. :obj: the instance of the class to be foldered :item: dict containing the properties of the object to be used by the template :index: current index of the item
[ "Service", "triggered", "each", "time", "an", "item", "is", "iterated", "in", "folderitems", "." ]
python
train
37.104167
gwastro/pycbc-glue
pycbc_glue/iterutils.py
https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/iterutils.py#L218-L240
def inplace_filter(func, sequence): """ Like Python's filter() builtin, but modifies the sequence in place. Example: >>> l = range(10) >>> inplace_filter(lambda x: x > 5, l) >>> l [6, 7, 8, 9] Performance considerations: the function iterates over the sequence, shuffling surviving members down and deleting whatever top part of the sequence is left empty at the end, so sequences whose surviving members are predominantly at the bottom will be processed faster. """ target = 0 for source in xrange(len(sequence)): if func(sequence[source]): sequence[target] = sequence[source] target += 1 del sequence[target:]
[ "def", "inplace_filter", "(", "func", ",", "sequence", ")", ":", "target", "=", "0", "for", "source", "in", "xrange", "(", "len", "(", "sequence", ")", ")", ":", "if", "func", "(", "sequence", "[", "source", "]", ")", ":", "sequence", "[", "target", "]", "=", "sequence", "[", "source", "]", "target", "+=", "1", "del", "sequence", "[", "target", ":", "]" ]
Like Python's filter() builtin, but modifies the sequence in place. Example: >>> l = range(10) >>> inplace_filter(lambda x: x > 5, l) >>> l [6, 7, 8, 9] Performance considerations: the function iterates over the sequence, shuffling surviving members down and deleting whatever top part of the sequence is left empty at the end, so sequences whose surviving members are predominantly at the bottom will be processed faster.
[ "Like", "Python", "s", "filter", "()", "builtin", "but", "modifies", "the", "sequence", "in", "place", "." ]
python
train
26.826087
bunq/sdk_python
bunq/sdk/model/generated/object_.py
https://github.com/bunq/sdk_python/blob/da6c9b83e6d83ee8062617f53c6eb7293c0d863d/bunq/sdk/model/generated/object_.py#L4337-L4369
def is_all_field_none(self): """ :rtype: bool """ if self._BillingInvoice is not None: return False if self._DraftPayment is not None: return False if self._MasterCardAction is not None: return False if self._Payment is not None: return False if self._PaymentBatch is not None: return False if self._RequestResponse is not None: return False if self._ScheduleInstance is not None: return False if self._TabResultResponse is not None: return False if self._WhitelistResult is not None: return False return True
[ "def", "is_all_field_none", "(", "self", ")", ":", "if", "self", ".", "_BillingInvoice", "is", "not", "None", ":", "return", "False", "if", "self", ".", "_DraftPayment", "is", "not", "None", ":", "return", "False", "if", "self", ".", "_MasterCardAction", "is", "not", "None", ":", "return", "False", "if", "self", ".", "_Payment", "is", "not", "None", ":", "return", "False", "if", "self", ".", "_PaymentBatch", "is", "not", "None", ":", "return", "False", "if", "self", ".", "_RequestResponse", "is", "not", "None", ":", "return", "False", "if", "self", ".", "_ScheduleInstance", "is", "not", "None", ":", "return", "False", "if", "self", ".", "_TabResultResponse", "is", "not", "None", ":", "return", "False", "if", "self", ".", "_WhitelistResult", "is", "not", "None", ":", "return", "False", "return", "True" ]
:rtype: bool
[ ":", "rtype", ":", "bool" ]
python
train
21.181818
dwavesystems/dwave-system
dwave/embedding/polynomialembedder.py
https://github.com/dwavesystems/dwave-system/blob/86a1698f15ccd8b0ece0ed868ee49292d3f67f5b/dwave/embedding/polynomialembedder.py#L291-L319
def biclique(self, xmin, xmax, ymin, ymax): """Compute a maximum-sized complete bipartite graph contained in the rectangle defined by ``xmin, xmax, ymin, ymax`` where each chain of qubits is either a vertical line or a horizontal line. INPUTS: xmin,xmax,ymin,ymax: integers defining the bounds of a rectangle where we look for unbroken chains. These ranges include both endpoints. OUTPUT: (A_side, B_side): a tuple of two lists containing lists of qubits. the lists found in ``A_side`` and ``B_side`` are chains of qubits. These lists of qubits are arranged so that >>> [zip(chain,chain[1:]) for chain in A_side] and >>> [zip(chain,chain[1:]) for chain in B_side] are lists of valid couplers. """ Aside = sum((self.maximum_hline_bundle(y, xmin, xmax) for y in range(ymin, ymax + 1)), []) Bside = sum((self.maximum_vline_bundle(x, ymin, ymax) for x in range(xmin, xmax + 1)), []) return Aside, Bside
[ "def", "biclique", "(", "self", ",", "xmin", ",", "xmax", ",", "ymin", ",", "ymax", ")", ":", "Aside", "=", "sum", "(", "(", "self", ".", "maximum_hline_bundle", "(", "y", ",", "xmin", ",", "xmax", ")", "for", "y", "in", "range", "(", "ymin", ",", "ymax", "+", "1", ")", ")", ",", "[", "]", ")", "Bside", "=", "sum", "(", "(", "self", ".", "maximum_vline_bundle", "(", "x", ",", "ymin", ",", "ymax", ")", "for", "x", "in", "range", "(", "xmin", ",", "xmax", "+", "1", ")", ")", ",", "[", "]", ")", "return", "Aside", ",", "Bside" ]
Compute a maximum-sized complete bipartite graph contained in the rectangle defined by ``xmin, xmax, ymin, ymax`` where each chain of qubits is either a vertical line or a horizontal line. INPUTS: xmin,xmax,ymin,ymax: integers defining the bounds of a rectangle where we look for unbroken chains. These ranges include both endpoints. OUTPUT: (A_side, B_side): a tuple of two lists containing lists of qubits. the lists found in ``A_side`` and ``B_side`` are chains of qubits. These lists of qubits are arranged so that >>> [zip(chain,chain[1:]) for chain in A_side] and >>> [zip(chain,chain[1:]) for chain in B_side] are lists of valid couplers.
[ "Compute", "a", "maximum", "-", "sized", "complete", "bipartite", "graph", "contained", "in", "the", "rectangle", "defined", "by", "xmin", "xmax", "ymin", "ymax", "where", "each", "chain", "of", "qubits", "is", "either", "a", "vertical", "line", "or", "a", "horizontal", "line", "." ]
python
train
39.172414
pytroll/trollimage
trollimage/utilities.py
https://github.com/pytroll/trollimage/blob/d35a7665ad475ff230e457085523e21f2cd3f454/trollimage/utilities.py#L52-L73
def _make_cmap(colors, position=None, bit=False): ''' _make_cmap takes a list of tuples which contain RGB values. The RGB values may either be in 8-bit [0 to 255] (in which bit must be set to True when called) or arithmetic [0 to 1] (default). _make_cmap returns a cmap with equally spaced colors. Arrange your tuples so that the first color is the lowest value for the colorbar and the last is the highest. position contains values from 0 to 1 to dictate the location of each color. ''' bit_rgb = np.linspace(0,1,256) if position == None: position = np.linspace(0,1,len(colors)) else: if len(position) != len(colors): sys.exit("position length must be the same as colors") elif position[0] != 0 or position[-1] != 1: sys.exit("position must start with 0 and end with 1") palette = [(i, (float(r), float(g), float(b), float(a))) for i, (r, g, b, a) in enumerate(colors)] cmap = Colormap(*palette) return cmap
[ "def", "_make_cmap", "(", "colors", ",", "position", "=", "None", ",", "bit", "=", "False", ")", ":", "bit_rgb", "=", "np", ".", "linspace", "(", "0", ",", "1", ",", "256", ")", "if", "position", "==", "None", ":", "position", "=", "np", ".", "linspace", "(", "0", ",", "1", ",", "len", "(", "colors", ")", ")", "else", ":", "if", "len", "(", "position", ")", "!=", "len", "(", "colors", ")", ":", "sys", ".", "exit", "(", "\"position length must be the same as colors\"", ")", "elif", "position", "[", "0", "]", "!=", "0", "or", "position", "[", "-", "1", "]", "!=", "1", ":", "sys", ".", "exit", "(", "\"position must start with 0 and end with 1\"", ")", "palette", "=", "[", "(", "i", ",", "(", "float", "(", "r", ")", ",", "float", "(", "g", ")", ",", "float", "(", "b", ")", ",", "float", "(", "a", ")", ")", ")", "for", "i", ",", "(", "r", ",", "g", ",", "b", ",", "a", ")", "in", "enumerate", "(", "colors", ")", "]", "cmap", "=", "Colormap", "(", "*", "palette", ")", "return", "cmap" ]
_make_cmap takes a list of tuples which contain RGB values. The RGB values may either be in 8-bit [0 to 255] (in which bit must be set to True when called) or arithmetic [0 to 1] (default). _make_cmap returns a cmap with equally spaced colors. Arrange your tuples so that the first color is the lowest value for the colorbar and the last is the highest. position contains values from 0 to 1 to dictate the location of each color.
[ "_make_cmap", "takes", "a", "list", "of", "tuples", "which", "contain", "RGB", "values", ".", "The", "RGB", "values", "may", "either", "be", "in", "8", "-", "bit", "[", "0", "to", "255", "]", "(", "in", "which", "bit", "must", "be", "set", "to", "True", "when", "called", ")", "or", "arithmetic", "[", "0", "to", "1", "]", "(", "default", ")", ".", "_make_cmap", "returns", "a", "cmap", "with", "equally", "spaced", "colors", ".", "Arrange", "your", "tuples", "so", "that", "the", "first", "color", "is", "the", "lowest", "value", "for", "the", "colorbar", "and", "the", "last", "is", "the", "highest", ".", "position", "contains", "values", "from", "0", "to", "1", "to", "dictate", "the", "location", "of", "each", "color", "." ]
python
train
45.409091
openid/python-openid
openid/consumer/discover.py
https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/consumer/discover.py#L221-L231
def fromOPEndpointURL(cls, op_endpoint_url): """Construct an OP-Identifier OpenIDServiceEndpoint object for a given OP Endpoint URL @param op_endpoint_url: The URL of the endpoint @rtype: OpenIDServiceEndpoint """ service = cls() service.server_url = op_endpoint_url service.type_uris = [OPENID_IDP_2_0_TYPE] return service
[ "def", "fromOPEndpointURL", "(", "cls", ",", "op_endpoint_url", ")", ":", "service", "=", "cls", "(", ")", "service", ".", "server_url", "=", "op_endpoint_url", "service", ".", "type_uris", "=", "[", "OPENID_IDP_2_0_TYPE", "]", "return", "service" ]
Construct an OP-Identifier OpenIDServiceEndpoint object for a given OP Endpoint URL @param op_endpoint_url: The URL of the endpoint @rtype: OpenIDServiceEndpoint
[ "Construct", "an", "OP", "-", "Identifier", "OpenIDServiceEndpoint", "object", "for", "a", "given", "OP", "Endpoint", "URL" ]
python
train
35.090909
rfarley3/Kibana
kibana/mapping.py
https://github.com/rfarley3/Kibana/blob/3df1e13be18edfb39ec173d8d2bbe9e90be61022/kibana/mapping.py#L256-L278
def get_field_mappings(self, field): """Converts ES field mappings to .kibana field mappings""" retdict = {} retdict['indexed'] = False retdict['analyzed'] = False for (key, val) in iteritems(field): if key in self.mappings: if (key == 'type' and (val == "long" or val == "integer" or val == "double" or val == "float")): val = "number" # self.pr_dbg("\t\t\tkey: %s" % key) # self.pr_dbg("\t\t\t\tval: %s" % val) retdict[key] = val if key == 'index' and val != "no": retdict['indexed'] = True # self.pr_dbg("\t\t\tkey: %s" % key) # self.pr_dbg("\t\t\t\tval: %s" % val) if val == "analyzed": retdict['analyzed'] = True return retdict
[ "def", "get_field_mappings", "(", "self", ",", "field", ")", ":", "retdict", "=", "{", "}", "retdict", "[", "'indexed'", "]", "=", "False", "retdict", "[", "'analyzed'", "]", "=", "False", "for", "(", "key", ",", "val", ")", "in", "iteritems", "(", "field", ")", ":", "if", "key", "in", "self", ".", "mappings", ":", "if", "(", "key", "==", "'type'", "and", "(", "val", "==", "\"long\"", "or", "val", "==", "\"integer\"", "or", "val", "==", "\"double\"", "or", "val", "==", "\"float\"", ")", ")", ":", "val", "=", "\"number\"", "# self.pr_dbg(\"\\t\\t\\tkey: %s\" % key)", "# self.pr_dbg(\"\\t\\t\\t\\tval: %s\" % val)", "retdict", "[", "key", "]", "=", "val", "if", "key", "==", "'index'", "and", "val", "!=", "\"no\"", ":", "retdict", "[", "'indexed'", "]", "=", "True", "# self.pr_dbg(\"\\t\\t\\tkey: %s\" % key)", "# self.pr_dbg(\"\\t\\t\\t\\tval: %s\" % val)", "if", "val", "==", "\"analyzed\"", ":", "retdict", "[", "'analyzed'", "]", "=", "True", "return", "retdict" ]
Converts ES field mappings to .kibana field mappings
[ "Converts", "ES", "field", "mappings", "to", ".", "kibana", "field", "mappings" ]
python
train
40.565217
JoelBender/bacpypes
py25/bacpypes/netservice.py
https://github.com/JoelBender/bacpypes/blob/4111b8604a16fa2b7f80d8104a43b9f3e28dfc78/py25/bacpypes/netservice.py#L234-L253
def bind(self, server, net=None, address=None): """Create a network adapter object and bind.""" if _debug: NetworkServiceAccessPoint._debug("bind %r net=%r address=%r", server, net, address) # make sure this hasn't already been called with this network if net in self.adapters: raise RuntimeError("already bound") # create an adapter object, add it to our map adapter = NetworkAdapter(self, net) self.adapters[net] = adapter if _debug: NetworkServiceAccessPoint._debug(" - adapters[%r]: %r", net, adapter) # if the address was given, make it the "local" one if address and not self.local_address: self.local_adapter = adapter self.local_address = address # bind to the server bind(adapter, server)
[ "def", "bind", "(", "self", ",", "server", ",", "net", "=", "None", ",", "address", "=", "None", ")", ":", "if", "_debug", ":", "NetworkServiceAccessPoint", ".", "_debug", "(", "\"bind %r net=%r address=%r\"", ",", "server", ",", "net", ",", "address", ")", "# make sure this hasn't already been called with this network", "if", "net", "in", "self", ".", "adapters", ":", "raise", "RuntimeError", "(", "\"already bound\"", ")", "# create an adapter object, add it to our map", "adapter", "=", "NetworkAdapter", "(", "self", ",", "net", ")", "self", ".", "adapters", "[", "net", "]", "=", "adapter", "if", "_debug", ":", "NetworkServiceAccessPoint", ".", "_debug", "(", "\" - adapters[%r]: %r\"", ",", "net", ",", "adapter", ")", "# if the address was given, make it the \"local\" one", "if", "address", "and", "not", "self", ".", "local_address", ":", "self", ".", "local_adapter", "=", "adapter", "self", ".", "local_address", "=", "address", "# bind to the server", "bind", "(", "adapter", ",", "server", ")" ]
Create a network adapter object and bind.
[ "Create", "a", "network", "adapter", "object", "and", "bind", "." ]
python
train
40.85
belbio/bel
bel/edge/edges.py
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/edge/edges.py#L199-L217
def extract_ast_species(ast): """Extract species from ast.species set of tuples (id, label)""" species_id = "None" species_label = "None" species = [ (species_id, species_label) for (species_id, species_label) in ast.species if species_id ] if len(species) == 1: (species_id, species_label) = species[0] if not species_id: species_id = "None" species_label = "None" log.debug(f"AST Species: {ast.species} Species: {species} SpeciesID: {species_id}") return (species_id, species_label)
[ "def", "extract_ast_species", "(", "ast", ")", ":", "species_id", "=", "\"None\"", "species_label", "=", "\"None\"", "species", "=", "[", "(", "species_id", ",", "species_label", ")", "for", "(", "species_id", ",", "species_label", ")", "in", "ast", ".", "species", "if", "species_id", "]", "if", "len", "(", "species", ")", "==", "1", ":", "(", "species_id", ",", "species_label", ")", "=", "species", "[", "0", "]", "if", "not", "species_id", ":", "species_id", "=", "\"None\"", "species_label", "=", "\"None\"", "log", ".", "debug", "(", "f\"AST Species: {ast.species} Species: {species} SpeciesID: {species_id}\"", ")", "return", "(", "species_id", ",", "species_label", ")" ]
Extract species from ast.species set of tuples (id, label)
[ "Extract", "species", "from", "ast", ".", "species", "set", "of", "tuples", "(", "id", "label", ")" ]
python
train
28.473684
mwshinn/paranoidscientist
paranoid/decorators.py
https://github.com/mwshinn/paranoidscientist/blob/a5e9198bc40b0a985174ad643cc5d6d0c46efdcd/paranoid/decorators.py#L176-L217
def accepts(*argtypes, **kwargtypes): """A function decorator to specify argument types of the function. Types may be specified either in the order that they appear in the function or via keyword arguments (just as if you were calling the function). Example usage: | @accepts(Positive0) | def square_root(x): | ... """ theseargtypes = [T.TypeFactory(a) for a in argtypes] thesekwargtypes = {k : T.TypeFactory(a) for k,a in kwargtypes.items()} def _decorator(func): # @accepts decorator f = func.__wrapped__ if hasattr(func, "__wrapped__") else func try: argtypes = inspect.getcallargs(f, *theseargtypes, **thesekwargtypes) argtypes = {k: v if issubclass(type(v), T.Type) else T.Constant(v) for k,v in argtypes.items()} except TypeError: raise E.ArgumentTypeError("Invalid argument specification to @accepts in %s" % func.__qualname__) # Support keyword arguments. Find the name of the **kwargs # parameter (not necessarily "kwargs") and set it to be a # dictionary of unspecified types. kwargname = U.get_func_kwargs_name(func) if kwargname in argtypes.keys(): argtypes[kwargname] = T.KeywordArguments() # Support positional arguments. Find the name of the *args # parameter (not necessarily "args") and set it to be an # unspecified type. posargname = U.get_func_posargs_name(func) if posargname in argtypes.keys(): argtypes[posargname] = T.PositionalArguments() # TODO merge with actual argument names if U.has_fun_prop(func, "argtypes"): raise ValueError("Cannot set argument types twice") U.set_fun_prop(func, "argtypes", argtypes) return _wrap(func) return _decorator
[ "def", "accepts", "(", "*", "argtypes", ",", "*", "*", "kwargtypes", ")", ":", "theseargtypes", "=", "[", "T", ".", "TypeFactory", "(", "a", ")", "for", "a", "in", "argtypes", "]", "thesekwargtypes", "=", "{", "k", ":", "T", ".", "TypeFactory", "(", "a", ")", "for", "k", ",", "a", "in", "kwargtypes", ".", "items", "(", ")", "}", "def", "_decorator", "(", "func", ")", ":", "# @accepts decorator", "f", "=", "func", ".", "__wrapped__", "if", "hasattr", "(", "func", ",", "\"__wrapped__\"", ")", "else", "func", "try", ":", "argtypes", "=", "inspect", ".", "getcallargs", "(", "f", ",", "*", "theseargtypes", ",", "*", "*", "thesekwargtypes", ")", "argtypes", "=", "{", "k", ":", "v", "if", "issubclass", "(", "type", "(", "v", ")", ",", "T", ".", "Type", ")", "else", "T", ".", "Constant", "(", "v", ")", "for", "k", ",", "v", "in", "argtypes", ".", "items", "(", ")", "}", "except", "TypeError", ":", "raise", "E", ".", "ArgumentTypeError", "(", "\"Invalid argument specification to @accepts in %s\"", "%", "func", ".", "__qualname__", ")", "# Support keyword arguments. Find the name of the **kwargs", "# parameter (not necessarily \"kwargs\") and set it to be a", "# dictionary of unspecified types.", "kwargname", "=", "U", ".", "get_func_kwargs_name", "(", "func", ")", "if", "kwargname", "in", "argtypes", ".", "keys", "(", ")", ":", "argtypes", "[", "kwargname", "]", "=", "T", ".", "KeywordArguments", "(", ")", "# Support positional arguments. Find the name of the *args", "# parameter (not necessarily \"args\") and set it to be an", "# unspecified type.", "posargname", "=", "U", ".", "get_func_posargs_name", "(", "func", ")", "if", "posargname", "in", "argtypes", ".", "keys", "(", ")", ":", "argtypes", "[", "posargname", "]", "=", "T", ".", "PositionalArguments", "(", ")", "# TODO merge with actual argument names", "if", "U", ".", "has_fun_prop", "(", "func", ",", "\"argtypes\"", ")", ":", "raise", "ValueError", "(", "\"Cannot set argument types twice\"", ")", "U", ".", "set_fun_prop", "(", "func", ",", "\"argtypes\"", ",", "argtypes", ")", "return", "_wrap", "(", "func", ")", "return", "_decorator" ]
A function decorator to specify argument types of the function. Types may be specified either in the order that they appear in the function or via keyword arguments (just as if you were calling the function). Example usage: | @accepts(Positive0) | def square_root(x): | ...
[ "A", "function", "decorator", "to", "specify", "argument", "types", "of", "the", "function", "." ]
python
train
43.595238
ElementAI/greensim
greensim/__init__.py
https://github.com/ElementAI/greensim/blob/f160e8b57d69f6ef469f2e991cc07b7721e08a91/greensim/__init__.py#L301-L313
def add_at(self, moment: float, fn_process: Callable, *args: Any, **kwargs: Any) -> 'Process': """ Adds a process to the simulation, which is made to start at the given exact time on the simulated clock. Note that times in the past when compared to the current moment on the simulated clock are forbidden. See method add() for more details. """ delay = moment - self.now() if delay < 0.0: raise ValueError( f"The given moment to start the process ({moment:f}) is in the past (now is {self.now():f})." ) return self.add_in(delay, fn_process, *args, **kwargs)
[ "def", "add_at", "(", "self", ",", "moment", ":", "float", ",", "fn_process", ":", "Callable", ",", "*", "args", ":", "Any", ",", "*", "*", "kwargs", ":", "Any", ")", "->", "'Process'", ":", "delay", "=", "moment", "-", "self", ".", "now", "(", ")", "if", "delay", "<", "0.0", ":", "raise", "ValueError", "(", "f\"The given moment to start the process ({moment:f}) is in the past (now is {self.now():f}).\"", ")", "return", "self", ".", "add_in", "(", "delay", ",", "fn_process", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Adds a process to the simulation, which is made to start at the given exact time on the simulated clock. Note that times in the past when compared to the current moment on the simulated clock are forbidden. See method add() for more details.
[ "Adds", "a", "process", "to", "the", "simulation", "which", "is", "made", "to", "start", "at", "the", "given", "exact", "time", "on", "the", "simulated", "clock", ".", "Note", "that", "times", "in", "the", "past", "when", "compared", "to", "the", "current", "moment", "on", "the", "simulated", "clock", "are", "forbidden", "." ]
python
train
50
secdev/scapy
scapy/asn1fields.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/asn1fields.py#L74-L101
def m2i(self, pkt, s): """ The good thing about safedec is that it may still decode ASN1 even if there is a mismatch between the expected tag (self.ASN1_tag) and the actual tag; the decoded ASN1 object will simply be put into an ASN1_BADTAG object. However, safedec prevents the raising of exceptions needed for ASN1F_optional processing. Thus we use 'flexible_tag', which should be False with ASN1F_optional. Regarding other fields, we might need to know whether encoding went as expected or not. Noticeably, input methods from cert.py expect certain exceptions to be raised. Hence default flexible_tag is False. """ diff_tag, s = BER_tagging_dec(s, hidden_tag=self.ASN1_tag, implicit_tag=self.implicit_tag, explicit_tag=self.explicit_tag, safe=self.flexible_tag) if diff_tag is not None: # this implies that flexible_tag was True if self.implicit_tag is not None: self.implicit_tag = diff_tag elif self.explicit_tag is not None: self.explicit_tag = diff_tag codec = self.ASN1_tag.get_codec(pkt.ASN1_codec) if self.flexible_tag: return codec.safedec(s, context=self.context) else: return codec.dec(s, context=self.context)
[ "def", "m2i", "(", "self", ",", "pkt", ",", "s", ")", ":", "diff_tag", ",", "s", "=", "BER_tagging_dec", "(", "s", ",", "hidden_tag", "=", "self", ".", "ASN1_tag", ",", "implicit_tag", "=", "self", ".", "implicit_tag", ",", "explicit_tag", "=", "self", ".", "explicit_tag", ",", "safe", "=", "self", ".", "flexible_tag", ")", "if", "diff_tag", "is", "not", "None", ":", "# this implies that flexible_tag was True", "if", "self", ".", "implicit_tag", "is", "not", "None", ":", "self", ".", "implicit_tag", "=", "diff_tag", "elif", "self", ".", "explicit_tag", "is", "not", "None", ":", "self", ".", "explicit_tag", "=", "diff_tag", "codec", "=", "self", ".", "ASN1_tag", ".", "get_codec", "(", "pkt", ".", "ASN1_codec", ")", "if", "self", ".", "flexible_tag", ":", "return", "codec", ".", "safedec", "(", "s", ",", "context", "=", "self", ".", "context", ")", "else", ":", "return", "codec", ".", "dec", "(", "s", ",", "context", "=", "self", ".", "context", ")" ]
The good thing about safedec is that it may still decode ASN1 even if there is a mismatch between the expected tag (self.ASN1_tag) and the actual tag; the decoded ASN1 object will simply be put into an ASN1_BADTAG object. However, safedec prevents the raising of exceptions needed for ASN1F_optional processing. Thus we use 'flexible_tag', which should be False with ASN1F_optional. Regarding other fields, we might need to know whether encoding went as expected or not. Noticeably, input methods from cert.py expect certain exceptions to be raised. Hence default flexible_tag is False.
[ "The", "good", "thing", "about", "safedec", "is", "that", "it", "may", "still", "decode", "ASN1", "even", "if", "there", "is", "a", "mismatch", "between", "the", "expected", "tag", "(", "self", ".", "ASN1_tag", ")", "and", "the", "actual", "tag", ";", "the", "decoded", "ASN1", "object", "will", "simply", "be", "put", "into", "an", "ASN1_BADTAG", "object", ".", "However", "safedec", "prevents", "the", "raising", "of", "exceptions", "needed", "for", "ASN1F_optional", "processing", ".", "Thus", "we", "use", "flexible_tag", "which", "should", "be", "False", "with", "ASN1F_optional", "." ]
python
train
51.107143
pyrogram/pyrogram
pyrogram/client/filters/filters.py
https://github.com/pyrogram/pyrogram/blob/e7258a341ba905cfa86264c22040654db732ec1c/pyrogram/client/filters/filters.py#L275-L292
def regex(pattern, flags: int = 0): """Filter messages that match a given RegEx pattern. Args: pattern (``str``): The RegEx pattern as string, it will be applied to the text of a message. When a pattern matches, all the `Match Objects <https://docs.python.org/3/library/re.html#match-objects>`_ are stored in the *matches* field of the :class:`Message <pyrogram.Message>` itself. flags (``int``, *optional*): RegEx flags. """ def f(_, m): m.matches = [i for i in _.p.finditer(m.text or m.caption or "")] return bool(m.matches) return create("Regex", f, p=re.compile(pattern, flags))
[ "def", "regex", "(", "pattern", ",", "flags", ":", "int", "=", "0", ")", ":", "def", "f", "(", "_", ",", "m", ")", ":", "m", ".", "matches", "=", "[", "i", "for", "i", "in", "_", ".", "p", ".", "finditer", "(", "m", ".", "text", "or", "m", ".", "caption", "or", "\"\"", ")", "]", "return", "bool", "(", "m", ".", "matches", ")", "return", "create", "(", "\"Regex\"", ",", "f", ",", "p", "=", "re", ".", "compile", "(", "pattern", ",", "flags", ")", ")" ]
Filter messages that match a given RegEx pattern. Args: pattern (``str``): The RegEx pattern as string, it will be applied to the text of a message. When a pattern matches, all the `Match Objects <https://docs.python.org/3/library/re.html#match-objects>`_ are stored in the *matches* field of the :class:`Message <pyrogram.Message>` itself. flags (``int``, *optional*): RegEx flags.
[ "Filter", "messages", "that", "match", "a", "given", "RegEx", "pattern", "." ]
python
train
40.055556
sethmlarson/virtualbox-python
virtualbox/library.py
https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L30553-L30574
def create_aggregator(self, subordinates): """Creates an aggregator event source, collecting events from multiple sources. This way a single listener can listen for events coming from multiple sources, using a single blocking :py:func:`get_event` on the returned aggregator. in subordinates of type :class:`IEventSource` Subordinate event source this one aggregates. return result of type :class:`IEventSource` Event source aggregating passed sources. """ if not isinstance(subordinates, list): raise TypeError("subordinates can only be an instance of type list") for a in subordinates[:10]: if not isinstance(a, IEventSource): raise TypeError( "array can only contain objects of type IEventSource") result = self._call("createAggregator", in_p=[subordinates]) result = IEventSource(result) return result
[ "def", "create_aggregator", "(", "self", ",", "subordinates", ")", ":", "if", "not", "isinstance", "(", "subordinates", ",", "list", ")", ":", "raise", "TypeError", "(", "\"subordinates can only be an instance of type list\"", ")", "for", "a", "in", "subordinates", "[", ":", "10", "]", ":", "if", "not", "isinstance", "(", "a", ",", "IEventSource", ")", ":", "raise", "TypeError", "(", "\"array can only contain objects of type IEventSource\"", ")", "result", "=", "self", ".", "_call", "(", "\"createAggregator\"", ",", "in_p", "=", "[", "subordinates", "]", ")", "result", "=", "IEventSource", "(", "result", ")", "return", "result" ]
Creates an aggregator event source, collecting events from multiple sources. This way a single listener can listen for events coming from multiple sources, using a single blocking :py:func:`get_event` on the returned aggregator. in subordinates of type :class:`IEventSource` Subordinate event source this one aggregates. return result of type :class:`IEventSource` Event source aggregating passed sources.
[ "Creates", "an", "aggregator", "event", "source", "collecting", "events", "from", "multiple", "sources", ".", "This", "way", "a", "single", "listener", "can", "listen", "for", "events", "coming", "from", "multiple", "sources", "using", "a", "single", "blocking", ":", "py", ":", "func", ":", "get_event", "on", "the", "returned", "aggregator", "." ]
python
train
44.727273
singularityhub/sregistry-cli
sregistry/utils/fileio.py
https://github.com/singularityhub/sregistry-cli/blob/abc96140a1d15b5e96d83432e1e0e1f4f8f36331/sregistry/utils/fileio.py#L283-L297
def write_json(json_obj, filename, mode="w", print_pretty=True): '''write_json will (optionally,pretty print) a json object to file Parameters ========== json_obj: the dict to print to json filename: the output file to write to pretty_print: if True, will use nicer formatting ''' with open(filename, mode) as filey: if print_pretty: filey.writelines(print_json(json_obj)) else: filey.writelines(json.dumps(json_obj)) return filename
[ "def", "write_json", "(", "json_obj", ",", "filename", ",", "mode", "=", "\"w\"", ",", "print_pretty", "=", "True", ")", ":", "with", "open", "(", "filename", ",", "mode", ")", "as", "filey", ":", "if", "print_pretty", ":", "filey", ".", "writelines", "(", "print_json", "(", "json_obj", ")", ")", "else", ":", "filey", ".", "writelines", "(", "json", ".", "dumps", "(", "json_obj", ")", ")", "return", "filename" ]
write_json will (optionally,pretty print) a json object to file Parameters ========== json_obj: the dict to print to json filename: the output file to write to pretty_print: if True, will use nicer formatting
[ "write_json", "will", "(", "optionally", "pretty", "print", ")", "a", "json", "object", "to", "file" ]
python
test
34.066667
openai/baselines
baselines/common/cmd_util.py
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/cmd_util.py#L88-L102
def make_mujoco_env(env_id, seed, reward_scale=1.0): """ Create a wrapped, monitored gym.Env for MuJoCo. """ rank = MPI.COMM_WORLD.Get_rank() myseed = seed + 1000 * rank if seed is not None else None set_global_seeds(myseed) env = gym.make(env_id) logger_path = None if logger.get_dir() is None else os.path.join(logger.get_dir(), str(rank)) env = Monitor(env, logger_path, allow_early_resets=True) env.seed(seed) if reward_scale != 1.0: from baselines.common.retro_wrappers import RewardScaler env = RewardScaler(env, reward_scale) return env
[ "def", "make_mujoco_env", "(", "env_id", ",", "seed", ",", "reward_scale", "=", "1.0", ")", ":", "rank", "=", "MPI", ".", "COMM_WORLD", ".", "Get_rank", "(", ")", "myseed", "=", "seed", "+", "1000", "*", "rank", "if", "seed", "is", "not", "None", "else", "None", "set_global_seeds", "(", "myseed", ")", "env", "=", "gym", ".", "make", "(", "env_id", ")", "logger_path", "=", "None", "if", "logger", ".", "get_dir", "(", ")", "is", "None", "else", "os", ".", "path", ".", "join", "(", "logger", ".", "get_dir", "(", ")", ",", "str", "(", "rank", ")", ")", "env", "=", "Monitor", "(", "env", ",", "logger_path", ",", "allow_early_resets", "=", "True", ")", "env", ".", "seed", "(", "seed", ")", "if", "reward_scale", "!=", "1.0", ":", "from", "baselines", ".", "common", ".", "retro_wrappers", "import", "RewardScaler", "env", "=", "RewardScaler", "(", "env", ",", "reward_scale", ")", "return", "env" ]
Create a wrapped, monitored gym.Env for MuJoCo.
[ "Create", "a", "wrapped", "monitored", "gym", ".", "Env", "for", "MuJoCo", "." ]
python
valid
39.6
ray-project/ray
python/ray/tune/trial_runner.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/trial_runner.py#L247-L256
def is_finished(self): """Returns whether all trials have finished running.""" if self._total_time > self._global_time_limit: logger.warning("Exceeded global time limit {} / {}".format( self._total_time, self._global_time_limit)) return True trials_done = all(trial.is_finished() for trial in self._trials) return trials_done and self._search_alg.is_finished()
[ "def", "is_finished", "(", "self", ")", ":", "if", "self", ".", "_total_time", ">", "self", ".", "_global_time_limit", ":", "logger", ".", "warning", "(", "\"Exceeded global time limit {} / {}\"", ".", "format", "(", "self", ".", "_total_time", ",", "self", ".", "_global_time_limit", ")", ")", "return", "True", "trials_done", "=", "all", "(", "trial", ".", "is_finished", "(", ")", "for", "trial", "in", "self", ".", "_trials", ")", "return", "trials_done", "and", "self", ".", "_search_alg", ".", "is_finished", "(", ")" ]
Returns whether all trials have finished running.
[ "Returns", "whether", "all", "trials", "have", "finished", "running", "." ]
python
train
42.5