nwo
stringlengths
5
106
sha
stringlengths
40
40
path
stringlengths
4
174
language
stringclasses
1 value
identifier
stringlengths
1
140
parameters
stringlengths
0
87.7k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
426k
docstring
stringlengths
0
64.3k
docstring_summary
stringlengths
0
26.3k
docstring_tokens
list
function
stringlengths
18
4.83M
function_tokens
list
url
stringlengths
83
304
Jajcus/pyxmpp2
59e5fd7c8837991ac265dc6aad23a6bd256768a7
pyxmpp2/streambase.py
python
StreamBase.write_element
(self, element)
Write XML `element` to the stream. :Parameters: - `element`: Element node to send. :Types: - `element`: :etree:`ElementTree.Element`
Write XML `element` to the stream.
[ "Write", "XML", "element", "to", "the", "stream", "." ]
def write_element(self, element): """Write XML `element` to the stream. :Parameters: - `element`: Element node to send. :Types: - `element`: :etree:`ElementTree.Element` """ with self.lock: self._write_element(element)
[ "def", "write_element", "(", "self", ",", "element", ")", ":", "with", "self", ".", "lock", ":", "self", ".", "_write_element", "(", "element", ")" ]
https://github.com/Jajcus/pyxmpp2/blob/59e5fd7c8837991ac265dc6aad23a6bd256768a7/pyxmpp2/streambase.py#L451-L460
caiiiac/Machine-Learning-with-Python
1a26c4467da41ca4ebc3d5bd789ea942ef79422f
MachineLearning/venv/lib/python3.5/site-packages/sklearn/multiclass.py
python
OneVsOneClassifier.decision_function
(self, X)
return Y
Decision function for the OneVsOneClassifier. The decision values for the samples are computed by adding the normalized sum of pair-wise classification confidence levels to the votes in order to disambiguate between the decision values when the votes for all the classes are equal leading to a tie. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- Y : array-like, shape = [n_samples, n_classes]
Decision function for the OneVsOneClassifier.
[ "Decision", "function", "for", "the", "OneVsOneClassifier", "." ]
def decision_function(self, X): """Decision function for the OneVsOneClassifier. The decision values for the samples are computed by adding the normalized sum of pair-wise classification confidence levels to the votes in order to disambiguate between the decision values when the votes for all the classes are equal leading to a tie. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- Y : array-like, shape = [n_samples, n_classes] """ check_is_fitted(self, 'estimators_') indices = self.pairwise_indices_ if indices is None: Xs = [X] * len(self.estimators_) else: Xs = [X[:, idx] for idx in indices] predictions = np.vstack([est.predict(Xi) for est, Xi in zip(self.estimators_, Xs)]).T confidences = np.vstack([_predict_binary(est, Xi) for est, Xi in zip(self.estimators_, Xs)]).T Y = _ovr_decision_function(predictions, confidences, len(self.classes_)) return Y
[ "def", "decision_function", "(", "self", ",", "X", ")", ":", "check_is_fitted", "(", "self", ",", "'estimators_'", ")", "indices", "=", "self", ".", "pairwise_indices_", "if", "indices", "is", "None", ":", "Xs", "=", "[", "X", "]", "*", "len", "(", "self", ".", "estimators_", ")", "else", ":", "Xs", "=", "[", "X", "[", ":", ",", "idx", "]", "for", "idx", "in", "indices", "]", "predictions", "=", "np", ".", "vstack", "(", "[", "est", ".", "predict", "(", "Xi", ")", "for", "est", ",", "Xi", "in", "zip", "(", "self", ".", "estimators_", ",", "Xs", ")", "]", ")", ".", "T", "confidences", "=", "np", ".", "vstack", "(", "[", "_predict_binary", "(", "est", ",", "Xi", ")", "for", "est", ",", "Xi", "in", "zip", "(", "self", ".", "estimators_", ",", "Xs", ")", "]", ")", ".", "T", "Y", "=", "_ovr_decision_function", "(", "predictions", ",", "confidences", ",", "len", "(", "self", ".", "classes_", ")", ")", "return", "Y" ]
https://github.com/caiiiac/Machine-Learning-with-Python/blob/1a26c4467da41ca4ebc3d5bd789ea942ef79422f/MachineLearning/venv/lib/python3.5/site-packages/sklearn/multiclass.py#L571-L602
theotherp/nzbhydra
4b03d7f769384b97dfc60dade4806c0fc987514e
libs/inspect.py
python
cleandoc
(doc)
Clean up indentation from docstrings. Any whitespace that can be uniformly removed from the second line onwards is removed.
Clean up indentation from docstrings.
[ "Clean", "up", "indentation", "from", "docstrings", "." ]
def cleandoc(doc): """Clean up indentation from docstrings. Any whitespace that can be uniformly removed from the second line onwards is removed.""" try: lines = string.split(string.expandtabs(doc), '\n') except UnicodeError: return None else: # Find minimum indentation of any non-blank lines after first line. margin = sys.maxint for line in lines[1:]: content = len(string.lstrip(line)) if content: indent = len(line) - content margin = min(margin, indent) # Remove indentation. if lines: lines[0] = lines[0].lstrip() if margin < sys.maxint: for i in range(1, len(lines)): lines[i] = lines[i][margin:] # Remove any trailing or leading blank lines. while lines and not lines[-1]: lines.pop() while lines and not lines[0]: lines.pop(0) return string.join(lines, '\n')
[ "def", "cleandoc", "(", "doc", ")", ":", "try", ":", "lines", "=", "string", ".", "split", "(", "string", ".", "expandtabs", "(", "doc", ")", ",", "'\\n'", ")", "except", "UnicodeError", ":", "return", "None", "else", ":", "# Find minimum indentation of any non-blank lines after first line.", "margin", "=", "sys", ".", "maxint", "for", "line", "in", "lines", "[", "1", ":", "]", ":", "content", "=", "len", "(", "string", ".", "lstrip", "(", "line", ")", ")", "if", "content", ":", "indent", "=", "len", "(", "line", ")", "-", "content", "margin", "=", "min", "(", "margin", ",", "indent", ")", "# Remove indentation.", "if", "lines", ":", "lines", "[", "0", "]", "=", "lines", "[", "0", "]", ".", "lstrip", "(", ")", "if", "margin", "<", "sys", ".", "maxint", ":", "for", "i", "in", "range", "(", "1", ",", "len", "(", "lines", ")", ")", ":", "lines", "[", "i", "]", "=", "lines", "[", "i", "]", "[", "margin", ":", "]", "# Remove any trailing or leading blank lines.", "while", "lines", "and", "not", "lines", "[", "-", "1", "]", ":", "lines", ".", "pop", "(", ")", "while", "lines", "and", "not", "lines", "[", "0", "]", ":", "lines", ".", "pop", "(", "0", ")", "return", "string", ".", "join", "(", "lines", ",", "'\\n'", ")" ]
https://github.com/theotherp/nzbhydra/blob/4b03d7f769384b97dfc60dade4806c0fc987514e/libs/inspect.py#L369-L396
PrefectHQ/prefect
67bdc94e2211726d99561f6f52614bec8970e981
src/prefect/schedules/clocks.py
python
CronClock.events
(self, after: datetime = None)
Generator that emits clock events Args: - after (datetime, optional): the first result will be after this date Returns: - Iterable[ClockEvent]: the next scheduled events
Generator that emits clock events
[ "Generator", "that", "emits", "clock", "events" ]
def events(self, after: datetime = None) -> Iterable[ClockEvent]: """ Generator that emits clock events Args: - after (datetime, optional): the first result will be after this date Returns: - Iterable[ClockEvent]: the next scheduled events """ tz = getattr(self.start_date, "tz", "UTC") if after is None: after = pendulum.now(tz) else: after = pendulum.instance(after).in_tz(tz) # if there is a start date, advance to at least one second before the start (so that # the start date itself will be registered as a valid clock date) if self.start_date is not None: after = max(after, self.start_date - timedelta(seconds=1)) # type: ignore assert isinstance(after, datetime) # mypy assertion after = pendulum.instance(after) assert isinstance(after, pendulum.DateTime) # mypy assertion assert isinstance(after.tz, pendulum.tz._Timezone) # mypy assertion # croniter's DST logic interferes with all other datetime libraries except pytz after_localized = pytz.timezone(after.tz.name).localize( datetime( year=after.year, month=after.month, day=after.day, hour=after.hour, minute=after.minute, second=after.second, microsecond=after.microsecond, ) ) # Respect microseconds by rounding up if after_localized.microsecond: after_localized = after_localized + timedelta(seconds=1) cron = croniter(self.cron, after_localized, day_or=self.day_or) # type: ignore dates = set() # type: Set[datetime] while True: next_date = pendulum.instance(cron.get_next(datetime)) # because of croniter's rounding behavior, we want to avoid # issuing the after date; we also want to avoid duplicates caused by # DST boundary issues if next_date.in_tz("UTC") == after.in_tz("UTC") or next_date in dates: next_date = pendulum.instance(cron.get_next(datetime)) if self.end_date and next_date > self.end_date: break dates.add(next_date) yield ClockEvent( start_time=next_date, parameter_defaults=self.parameter_defaults, labels=self.labels, )
[ "def", "events", "(", "self", ",", "after", ":", "datetime", "=", "None", ")", "->", "Iterable", "[", "ClockEvent", "]", ":", "tz", "=", "getattr", "(", "self", ".", "start_date", ",", "\"tz\"", ",", "\"UTC\"", ")", "if", "after", "is", "None", ":", "after", "=", "pendulum", ".", "now", "(", "tz", ")", "else", ":", "after", "=", "pendulum", ".", "instance", "(", "after", ")", ".", "in_tz", "(", "tz", ")", "# if there is a start date, advance to at least one second before the start (so that", "# the start date itself will be registered as a valid clock date)", "if", "self", ".", "start_date", "is", "not", "None", ":", "after", "=", "max", "(", "after", ",", "self", ".", "start_date", "-", "timedelta", "(", "seconds", "=", "1", ")", ")", "# type: ignore", "assert", "isinstance", "(", "after", ",", "datetime", ")", "# mypy assertion", "after", "=", "pendulum", ".", "instance", "(", "after", ")", "assert", "isinstance", "(", "after", ",", "pendulum", ".", "DateTime", ")", "# mypy assertion", "assert", "isinstance", "(", "after", ".", "tz", ",", "pendulum", ".", "tz", ".", "_Timezone", ")", "# mypy assertion", "# croniter's DST logic interferes with all other datetime libraries except pytz", "after_localized", "=", "pytz", ".", "timezone", "(", "after", ".", "tz", ".", "name", ")", ".", "localize", "(", "datetime", "(", "year", "=", "after", ".", "year", ",", "month", "=", "after", ".", "month", ",", "day", "=", "after", ".", "day", ",", "hour", "=", "after", ".", "hour", ",", "minute", "=", "after", ".", "minute", ",", "second", "=", "after", ".", "second", ",", "microsecond", "=", "after", ".", "microsecond", ",", ")", ")", "# Respect microseconds by rounding up", "if", "after_localized", ".", "microsecond", ":", "after_localized", "=", "after_localized", "+", "timedelta", "(", "seconds", "=", "1", ")", "cron", "=", "croniter", "(", "self", ".", "cron", ",", "after_localized", ",", "day_or", "=", "self", ".", "day_or", ")", "# type: ignore", "dates", "=", "set", "(", ")", "# type: Set[datetime]", "while", "True", ":", "next_date", "=", "pendulum", ".", "instance", "(", "cron", ".", "get_next", "(", "datetime", ")", ")", "# because of croniter's rounding behavior, we want to avoid", "# issuing the after date; we also want to avoid duplicates caused by", "# DST boundary issues", "if", "next_date", ".", "in_tz", "(", "\"UTC\"", ")", "==", "after", ".", "in_tz", "(", "\"UTC\"", ")", "or", "next_date", "in", "dates", ":", "next_date", "=", "pendulum", ".", "instance", "(", "cron", ".", "get_next", "(", "datetime", ")", ")", "if", "self", ".", "end_date", "and", "next_date", ">", "self", ".", "end_date", ":", "break", "dates", ".", "add", "(", "next_date", ")", "yield", "ClockEvent", "(", "start_time", "=", "next_date", ",", "parameter_defaults", "=", "self", ".", "parameter_defaults", ",", "labels", "=", "self", ".", "labels", ",", ")" ]
https://github.com/PrefectHQ/prefect/blob/67bdc94e2211726d99561f6f52614bec8970e981/src/prefect/schedules/clocks.py#L274-L335
andresriancho/w3af
cd22e5252243a87aaa6d0ddea47cf58dacfe00a9
w3af/core/data/dc/generic/form.py
python
Form.get_autocomplete
(self)
return self.form_params.get_autocomplete()
[]
def get_autocomplete(self): return self.form_params.get_autocomplete()
[ "def", "get_autocomplete", "(", "self", ")", ":", "return", "self", ".", "form_params", ".", "get_autocomplete", "(", ")" ]
https://github.com/andresriancho/w3af/blob/cd22e5252243a87aaa6d0ddea47cf58dacfe00a9/w3af/core/data/dc/generic/form.py#L68-L69
pysmt/pysmt
ade4dc2a825727615033a96d31c71e9f53ce4764
pysmt/solvers/z3.py
python
Z3Converter.__del__
(self)
[]
def __del__(self): # Cleaning-up Z3Converter requires dec-ref'ing the terms in the cache if self.ctx.ref(): # Check that there is still a context object # This might not be the case if we are using the global context # and the interpreter is shutting down for t in self.memoization.values(): z3.Z3_dec_ref(self.ctx.ref(), t)
[ "def", "__del__", "(", "self", ")", ":", "# Cleaning-up Z3Converter requires dec-ref'ing the terms in the cache", "if", "self", ".", "ctx", ".", "ref", "(", ")", ":", "# Check that there is still a context object", "# This might not be the case if we are using the global context", "# and the interpreter is shutting down", "for", "t", "in", "self", ".", "memoization", ".", "values", "(", ")", ":", "z3", ".", "Z3_dec_ref", "(", "self", ".", "ctx", ".", "ref", "(", ")", ",", "t", ")" ]
https://github.com/pysmt/pysmt/blob/ade4dc2a825727615033a96d31c71e9f53ce4764/pysmt/solvers/z3.py#L920-L927
jgyates/genmon
2cb2ed2945f55cd8c259b09ccfa9a51e23f1341e
genmonlib/mymodem.py
python
MyModem.Close
(self)
[]
def Close(self): try: try: self.KillThread("SendMessageThread") except: pass try: self.SerialDevice.Close() except: pass except Exception as e1: self.LogErrorLine("Error Closing Modem: " + str(e1))
[ "def", "Close", "(", "self", ")", ":", "try", ":", "try", ":", "self", ".", "KillThread", "(", "\"SendMessageThread\"", ")", "except", ":", "pass", "try", ":", "self", ".", "SerialDevice", ".", "Close", "(", ")", "except", ":", "pass", "except", "Exception", "as", "e1", ":", "self", ".", "LogErrorLine", "(", "\"Error Closing Modem: \"", "+", "str", "(", "e1", ")", ")" ]
https://github.com/jgyates/genmon/blob/2cb2ed2945f55cd8c259b09ccfa9a51e23f1341e/genmonlib/mymodem.py#L557-L568
MozillaSecurity/grizzly
1c41478e32f323189a2c322ec041c3e0902a158a
grizzly/common/reporter.py
python
FuzzManagerReporter._post_submit
(self)
[]
def _post_submit(self): self._extra_metadata.clear()
[ "def", "_post_submit", "(", "self", ")", ":", "self", ".", "_extra_metadata", ".", "clear", "(", ")" ]
https://github.com/MozillaSecurity/grizzly/blob/1c41478e32f323189a2c322ec041c3e0902a158a/grizzly/common/reporter.py#L185-L186
mlcommons/training
4a4d5a0b7efe99c680306b1940749211d4238a84
language_model/tensorflow/bert/cleanup_scripts/create_pretraining_data.py
python
create_masked_lm_predictions
(tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng)
return (output_tokens, masked_lm_positions, masked_lm_labels)
Creates the predictions for the masked LM objective.
Creates the predictions for the masked LM objective.
[ "Creates", "the", "predictions", "for", "the", "masked", "LM", "objective", "." ]
def create_masked_lm_predictions(tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng): """Creates the predictions for the masked LM objective.""" cand_indexes = [] for (i, token) in enumerate(tokens): if token == "[CLS]" or token == "[SEP]": continue cand_indexes.append(i) rng.shuffle(cand_indexes) output_tokens = list(tokens) num_to_predict = min(max_predictions_per_seq, max(1, int(round(len(tokens) * masked_lm_prob)))) masked_lms = [] covered_indexes = set() for index in cand_indexes: if len(masked_lms) >= num_to_predict: break if index in covered_indexes: continue covered_indexes.add(index) masked_token = None # 80% of the time, replace with [MASK] if rng.random() < 0.8: masked_token = "[MASK]" else: # 10% of the time, keep original if rng.random() < 0.5: masked_token = tokens[index] # 10% of the time, replace with random word else: masked_token = vocab_words[rng.randint(0, len(vocab_words) - 1)] output_tokens[index] = masked_token masked_lms.append(MaskedLmInstance(index=index, label=tokens[index])) masked_lms = sorted(masked_lms, key=lambda x: x.index) masked_lm_positions = [] masked_lm_labels = [] for p in masked_lms: masked_lm_positions.append(p.index) masked_lm_labels.append(p.label) return (output_tokens, masked_lm_positions, masked_lm_labels)
[ "def", "create_masked_lm_predictions", "(", "tokens", ",", "masked_lm_prob", ",", "max_predictions_per_seq", ",", "vocab_words", ",", "rng", ")", ":", "cand_indexes", "=", "[", "]", "for", "(", "i", ",", "token", ")", "in", "enumerate", "(", "tokens", ")", ":", "if", "token", "==", "\"[CLS]\"", "or", "token", "==", "\"[SEP]\"", ":", "continue", "cand_indexes", ".", "append", "(", "i", ")", "rng", ".", "shuffle", "(", "cand_indexes", ")", "output_tokens", "=", "list", "(", "tokens", ")", "num_to_predict", "=", "min", "(", "max_predictions_per_seq", ",", "max", "(", "1", ",", "int", "(", "round", "(", "len", "(", "tokens", ")", "*", "masked_lm_prob", ")", ")", ")", ")", "masked_lms", "=", "[", "]", "covered_indexes", "=", "set", "(", ")", "for", "index", "in", "cand_indexes", ":", "if", "len", "(", "masked_lms", ")", ">=", "num_to_predict", ":", "break", "if", "index", "in", "covered_indexes", ":", "continue", "covered_indexes", ".", "add", "(", "index", ")", "masked_token", "=", "None", "# 80% of the time, replace with [MASK]", "if", "rng", ".", "random", "(", ")", "<", "0.8", ":", "masked_token", "=", "\"[MASK]\"", "else", ":", "# 10% of the time, keep original", "if", "rng", ".", "random", "(", ")", "<", "0.5", ":", "masked_token", "=", "tokens", "[", "index", "]", "# 10% of the time, replace with random word", "else", ":", "masked_token", "=", "vocab_words", "[", "rng", ".", "randint", "(", "0", ",", "len", "(", "vocab_words", ")", "-", "1", ")", "]", "output_tokens", "[", "index", "]", "=", "masked_token", "masked_lms", ".", "append", "(", "MaskedLmInstance", "(", "index", "=", "index", ",", "label", "=", "tokens", "[", "index", "]", ")", ")", "masked_lms", "=", "sorted", "(", "masked_lms", ",", "key", "=", "lambda", "x", ":", "x", ".", "index", ")", "masked_lm_positions", "=", "[", "]", "masked_lm_labels", "=", "[", "]", "for", "p", "in", "masked_lms", ":", "masked_lm_positions", ".", "append", "(", "p", ".", "index", ")", "masked_lm_labels", ".", "append", "(", "p", ".", "label", ")", "return", "(", "output_tokens", ",", "masked_lm_positions", ",", "masked_lm_labels", ")" ]
https://github.com/mlcommons/training/blob/4a4d5a0b7efe99c680306b1940749211d4238a84/language_model/tensorflow/bert/cleanup_scripts/create_pretraining_data.py#L324-L374
espnet/espnet
ea411f3f627b8f101c211e107d0ff7053344ac80
espnet/nets/pytorch_backend/nets_utils.py
python
pad_list
(xs, pad_value)
return pad
Perform padding for the list of tensors. Args: xs (List): List of Tensors [(T_1, `*`), (T_2, `*`), ..., (T_B, `*`)]. pad_value (float): Value for padding. Returns: Tensor: Padded tensor (B, Tmax, `*`). Examples: >>> x = [torch.ones(4), torch.ones(2), torch.ones(1)] >>> x [tensor([1., 1., 1., 1.]), tensor([1., 1.]), tensor([1.])] >>> pad_list(x, 0) tensor([[1., 1., 1., 1.], [1., 1., 0., 0.], [1., 0., 0., 0.]])
Perform padding for the list of tensors.
[ "Perform", "padding", "for", "the", "list", "of", "tensors", "." ]
def pad_list(xs, pad_value): """Perform padding for the list of tensors. Args: xs (List): List of Tensors [(T_1, `*`), (T_2, `*`), ..., (T_B, `*`)]. pad_value (float): Value for padding. Returns: Tensor: Padded tensor (B, Tmax, `*`). Examples: >>> x = [torch.ones(4), torch.ones(2), torch.ones(1)] >>> x [tensor([1., 1., 1., 1.]), tensor([1., 1.]), tensor([1.])] >>> pad_list(x, 0) tensor([[1., 1., 1., 1.], [1., 1., 0., 0.], [1., 0., 0., 0.]]) """ n_batch = len(xs) max_len = max(x.size(0) for x in xs) pad = xs[0].new(n_batch, max_len, *xs[0].size()[1:]).fill_(pad_value) for i in range(n_batch): pad[i, : xs[i].size(0)] = xs[i] return pad
[ "def", "pad_list", "(", "xs", ",", "pad_value", ")", ":", "n_batch", "=", "len", "(", "xs", ")", "max_len", "=", "max", "(", "x", ".", "size", "(", "0", ")", "for", "x", "in", "xs", ")", "pad", "=", "xs", "[", "0", "]", ".", "new", "(", "n_batch", ",", "max_len", ",", "*", "xs", "[", "0", "]", ".", "size", "(", ")", "[", "1", ":", "]", ")", ".", "fill_", "(", "pad_value", ")", "for", "i", "in", "range", "(", "n_batch", ")", ":", "pad", "[", "i", ",", ":", "xs", "[", "i", "]", ".", "size", "(", "0", ")", "]", "=", "xs", "[", "i", "]", "return", "pad" ]
https://github.com/espnet/espnet/blob/ea411f3f627b8f101c211e107d0ff7053344ac80/espnet/nets/pytorch_backend/nets_utils.py#L34-L61
sysdream/pysqli
37418c9b3fbb760b97f28f6583f0d84f66bbff45
pysqli/core/forge.py
python
SQLForge.take
(self,records, index)
return "(%s LIMIT %d,1)" % (records, index)
Forge a piece of SQL returning the n-th record of a set.
Forge a piece of SQL returning the n-th record of a set.
[ "Forge", "a", "piece", "of", "SQL", "returning", "the", "n", "-", "th", "record", "of", "a", "set", "." ]
def take(self,records, index): """ Forge a piece of SQL returning the n-th record of a set. """ return "(%s LIMIT %d,1)" % (records, index)
[ "def", "take", "(", "self", ",", "records", ",", "index", ")", ":", "return", "\"(%s LIMIT %d,1)\"", "%", "(", "records", ",", "index", ")" ]
https://github.com/sysdream/pysqli/blob/37418c9b3fbb760b97f28f6583f0d84f66bbff45/pysqli/core/forge.py#L135-L139
civicsoft/ieddit
2d85fe6655d0a6c9e41a098cf8dad894566e2b87
app/functions/db_functions.py
python
user_id_from_username
(username)
return db.session.query(Iuser.id).filter_by(username=username).first()[0]
returns just the id of an user
returns just the id of an user
[ "returns", "just", "the", "id", "of", "an", "user" ]
def user_id_from_username(username): """ returns just the id of an user """ return db.session.query(Iuser.id).filter_by(username=username).first()[0]
[ "def", "user_id_from_username", "(", "username", ")", ":", "return", "db", ".", "session", ".", "query", "(", "Iuser", ".", "id", ")", ".", "filter_by", "(", "username", "=", "username", ")", ".", "first", "(", ")", "[", "0", "]" ]
https://github.com/civicsoft/ieddit/blob/2d85fe6655d0a6c9e41a098cf8dad894566e2b87/app/functions/db_functions.py#L376-L380
DataDog/integrations-core
934674b29d94b70ccc008f76ea172d0cdae05e1e
gitlab_runner/datadog_checks/gitlab_runner/config_models/defaults.py
python
instance_kerberos_keytab
(field, value)
return get_default_field_value(field, value)
[]
def instance_kerberos_keytab(field, value): return get_default_field_value(field, value)
[ "def", "instance_kerberos_keytab", "(", "field", ",", "value", ")", ":", "return", "get_default_field_value", "(", "field", ",", "value", ")" ]
https://github.com/DataDog/integrations-core/blob/934674b29d94b70ccc008f76ea172d0cdae05e1e/gitlab_runner/datadog_checks/gitlab_runner/config_models/defaults.py#L125-L126
knownsec/ZoomEye-python
30b4a69e5724fce91c1dbd7afa1b04dafb048a58
zoomeye/data.py
python
CliZoomEye.request_data
(self)
[]
def request_data(self): if os.path.exists(self.dork): self.load() else: page_count = self.handle_page() for page in range(page_count): cache_file = Cache(self.dork, self.resource, page) if cache_file.check() and self.force is False: dork_data_list, self.facet_data, self.total = cache_file.load() self.dork_data.extend(dork_data_list) else: if self.resource == 'host': self.facet = ['app', 'device', 'service', 'os', 'port', 'country', 'city'] if self.resource == 'web': self.facet = ['webapp', 'component', 'framework', 'frontend', 'server', 'waf', 'os', 'country', 'city'] try: dork_data_list = self.zoomeye.dork_search( dork=self.dork, page=page + 1, resource=self.resource, facets=self.facet ) except ValueError: print("the access token expires, please re-run [zoomeye init] command." "it is recommended to use API KEY for initialization!") exit(0) self.facet_data = self.zoomeye.facet_data self.total = self.zoomeye.total self.dork_data.extend(dork_data_list) self.cache_dork(page, self.zoomeye.raw_data)
[ "def", "request_data", "(", "self", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "dork", ")", ":", "self", ".", "load", "(", ")", "else", ":", "page_count", "=", "self", ".", "handle_page", "(", ")", "for", "page", "in", "range", "(", "page_count", ")", ":", "cache_file", "=", "Cache", "(", "self", ".", "dork", ",", "self", ".", "resource", ",", "page", ")", "if", "cache_file", ".", "check", "(", ")", "and", "self", ".", "force", "is", "False", ":", "dork_data_list", ",", "self", ".", "facet_data", ",", "self", ".", "total", "=", "cache_file", ".", "load", "(", ")", "self", ".", "dork_data", ".", "extend", "(", "dork_data_list", ")", "else", ":", "if", "self", ".", "resource", "==", "'host'", ":", "self", ".", "facet", "=", "[", "'app'", ",", "'device'", ",", "'service'", ",", "'os'", ",", "'port'", ",", "'country'", ",", "'city'", "]", "if", "self", ".", "resource", "==", "'web'", ":", "self", ".", "facet", "=", "[", "'webapp'", ",", "'component'", ",", "'framework'", ",", "'frontend'", ",", "'server'", ",", "'waf'", ",", "'os'", ",", "'country'", ",", "'city'", "]", "try", ":", "dork_data_list", "=", "self", ".", "zoomeye", ".", "dork_search", "(", "dork", "=", "self", ".", "dork", ",", "page", "=", "page", "+", "1", ",", "resource", "=", "self", ".", "resource", ",", "facets", "=", "self", ".", "facet", ")", "except", "ValueError", ":", "print", "(", "\"the access token expires, please re-run [zoomeye init] command.\"", "\"it is recommended to use API KEY for initialization!\"", ")", "exit", "(", "0", ")", "self", ".", "facet_data", "=", "self", ".", "zoomeye", ".", "facet_data", "self", ".", "total", "=", "self", ".", "zoomeye", ".", "total", "self", ".", "dork_data", ".", "extend", "(", "dork_data_list", ")", "self", ".", "cache_dork", "(", "page", ",", "self", ".", "zoomeye", ".", "raw_data", ")" ]
https://github.com/knownsec/ZoomEye-python/blob/30b4a69e5724fce91c1dbd7afa1b04dafb048a58/zoomeye/data.py#L391-L421
microsoft/botbuilder-python
3d410365461dc434df59bdfeaa2f16d28d9df868
libraries/botbuilder-dialogs/botbuilder/dialogs/object_path.py
python
ObjectPath.get_path_value
( obj, path: str, default: Union[Callable, object] = None )
return default() if callable(default) else copy.deepcopy(default)
Get the value for a path relative to an object.
Get the value for a path relative to an object.
[ "Get", "the", "value", "for", "a", "path", "relative", "to", "an", "object", "." ]
def get_path_value( obj, path: str, default: Union[Callable, object] = None ) -> object: """ Get the value for a path relative to an object. """ value = ObjectPath.try_get_path_value(obj, path) if value: return value if default is None: raise KeyError(f"Key {path} not found") return default() if callable(default) else copy.deepcopy(default)
[ "def", "get_path_value", "(", "obj", ",", "path", ":", "str", ",", "default", ":", "Union", "[", "Callable", ",", "object", "]", "=", "None", ")", "->", "object", ":", "value", "=", "ObjectPath", ".", "try_get_path_value", "(", "obj", ",", "path", ")", "if", "value", ":", "return", "value", "if", "default", "is", "None", ":", "raise", "KeyError", "(", "f\"Key {path} not found\"", ")", "return", "default", "(", ")", "if", "callable", "(", "default", ")", "else", "copy", ".", "deepcopy", "(", "default", ")" ]
https://github.com/microsoft/botbuilder-python/blob/3d410365461dc434df59bdfeaa2f16d28d9df868/libraries/botbuilder-dialogs/botbuilder/dialogs/object_path.py#L109-L122
sxjscience/HKO-7
adeb05a366d4b57f94a5ddb814af57cc62ffe3c5
nowcasting/operators/base_rnn.py
python
BaseStackRNN.split_to_concat
(self, split_states)
return concat_states
[]
def split_to_concat(self, split_states): # Concat the states together concat_states = [] for i in range(len(self.state_info)): channel_axis = self.state_info[i]['__layout__'].lower().find('c') concat_states.append(mx.sym.concat(*[ele[i] for ele in split_states], dim=channel_axis)) return concat_states
[ "def", "split_to_concat", "(", "self", ",", "split_states", ")", ":", "# Concat the states together", "concat_states", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "self", ".", "state_info", ")", ")", ":", "channel_axis", "=", "self", ".", "state_info", "[", "i", "]", "[", "'__layout__'", "]", ".", "lower", "(", ")", ".", "find", "(", "'c'", ")", "concat_states", ".", "append", "(", "mx", ".", "sym", ".", "concat", "(", "*", "[", "ele", "[", "i", "]", "for", "ele", "in", "split_states", "]", ",", "dim", "=", "channel_axis", ")", ")", "return", "concat_states" ]
https://github.com/sxjscience/HKO-7/blob/adeb05a366d4b57f94a5ddb814af57cc62ffe3c5/nowcasting/operators/base_rnn.py#L142-L149
python/cpython
e13cdca0f5224ec4e23bdd04bb3120506964bc8b
Mac/BuildScript/build-installer.py
python
shellQuote
(value)
return "'%s'"%(value.replace("'", "'\"'\"'"))
Return the string value in a form that can safely be inserted into a shell command.
Return the string value in a form that can safely be inserted into a shell command.
[ "Return", "the", "string", "value", "in", "a", "form", "that", "can", "safely", "be", "inserted", "into", "a", "shell", "command", "." ]
def shellQuote(value): """ Return the string value in a form that can safely be inserted into a shell command. """ return "'%s'"%(value.replace("'", "'\"'\"'"))
[ "def", "shellQuote", "(", "value", ")", ":", "return", "\"'%s'\"", "%", "(", "value", ".", "replace", "(", "\"'\"", ",", "\"'\\\"'\\\"'\"", ")", ")" ]
https://github.com/python/cpython/blob/e13cdca0f5224ec4e23bdd04bb3120506964bc8b/Mac/BuildScript/build-installer.py#L66-L71
quip/quip-api
19f3b32a05ed092a70dc2c616e214aaff8a06de2
samples/wordpress/quip.py
python
QuipClient.get_first_list_item_id
(self, list_tree)
return None
Like `get_last_list_item_id`, but the first item in the list.
Like `get_last_list_item_id`, but the first item in the list.
[ "Like", "get_last_list_item_id", "but", "the", "first", "item", "in", "the", "list", "." ]
def get_first_list_item_id(self, list_tree): """Like `get_last_list_item_id`, but the first item in the list.""" for item in list_tree.iter("li"): return item.attrib["id"] return None
[ "def", "get_first_list_item_id", "(", "self", ",", "list_tree", ")", ":", "for", "item", "in", "list_tree", ".", "iter", "(", "\"li\"", ")", ":", "return", "item", ".", "attrib", "[", "\"id\"", "]", "return", "None" ]
https://github.com/quip/quip-api/blob/19f3b32a05ed092a70dc2c616e214aaff8a06de2/samples/wordpress/quip.py#L612-L616
cea-hpc/clustershell
c421133ed4baa69e35ff76c476d4097201485344
lib/ClusterShell/NodeUtils.py
python
GroupResolver.all_nodes
(self, namespace=None)
return self._list_nodes(source, 'all')
Find all nodes. You may specify an optional namespace.
Find all nodes. You may specify an optional namespace.
[ "Find", "all", "nodes", ".", "You", "may", "specify", "an", "optional", "namespace", "." ]
def all_nodes(self, namespace=None): """ Find all nodes. You may specify an optional namespace. """ source = self._source(namespace) return self._list_nodes(source, 'all')
[ "def", "all_nodes", "(", "self", ",", "namespace", "=", "None", ")", ":", "source", "=", "self", ".", "_source", "(", "namespace", ")", "return", "self", ".", "_list_nodes", "(", "source", ",", "'all'", ")" ]
https://github.com/cea-hpc/clustershell/blob/c421133ed4baa69e35ff76c476d4097201485344/lib/ClusterShell/NodeUtils.py#L492-L497
mjpost/sacrebleu
65a8a9eeccd8c0c7875e875e12edf10db33ab0ba
sacrebleu/utils.py
python
get_reference_files
(test_set: str, langpair: str)
return get_files(test_set, langpair)[1:]
Returns a list of one or more reference file paths for the given testset/langpair. Downloads the references first if they are not already local. :param test_set: The test set (e.g., "wmt19") :param langpair: The language pair (e.g., "de-en") :return: a list of one or more reference file paths
Returns a list of one or more reference file paths for the given testset/langpair. Downloads the references first if they are not already local.
[ "Returns", "a", "list", "of", "one", "or", "more", "reference", "file", "paths", "for", "the", "given", "testset", "/", "langpair", ".", "Downloads", "the", "references", "first", "if", "they", "are", "not", "already", "local", "." ]
def get_reference_files(test_set: str, langpair: str) -> List[str]: """ Returns a list of one or more reference file paths for the given testset/langpair. Downloads the references first if they are not already local. :param test_set: The test set (e.g., "wmt19") :param langpair: The language pair (e.g., "de-en") :return: a list of one or more reference file paths """ return get_files(test_set, langpair)[1:]
[ "def", "get_reference_files", "(", "test_set", ":", "str", ",", "langpair", ":", "str", ")", "->", "List", "[", "str", "]", ":", "return", "get_files", "(", "test_set", ",", "langpair", ")", "[", "1", ":", "]" ]
https://github.com/mjpost/sacrebleu/blob/65a8a9eeccd8c0c7875e875e12edf10db33ab0ba/sacrebleu/utils.py#L353-L362
zake7749/Chatbot
209d8b9fc68958e21bf9cae262727c2629527efd
Chatbot/task_modules/medicine/combineData.py
python
writeDDPair2file
(filename,dic)
輸出疾病與部門的配對列表
輸出疾病與部門的配對列表
[ "輸出疾病與部門的配對列表" ]
def writeDDPair2file(filename,dic): '''輸出疾病與部門的配對列表 ''' with open(filename,'w',encoding='utf-8') as output: for department,diseaseSet in dic.items(): output.write(department+":") for disease in diseaseSet: output.write(disease+",") output.write('\n')
[ "def", "writeDDPair2file", "(", "filename", ",", "dic", ")", ":", "with", "open", "(", "filename", ",", "'w'", ",", "encoding", "=", "'utf-8'", ")", "as", "output", ":", "for", "department", ",", "diseaseSet", "in", "dic", ".", "items", "(", ")", ":", "output", ".", "write", "(", "department", "+", "\":\"", ")", "for", "disease", "in", "diseaseSet", ":", "output", ".", "write", "(", "disease", "+", "\",\"", ")", "output", ".", "write", "(", "'\\n'", ")" ]
https://github.com/zake7749/Chatbot/blob/209d8b9fc68958e21bf9cae262727c2629527efd/Chatbot/task_modules/medicine/combineData.py#L87-L96
xtiankisutsa/MARA_Framework
ac4ac88bfd38f33ae8780a606ed09ab97177c562
tools/AndroBugs/tools/modified/androguard/patch/zipfile.py
python
ZipFile._GetContents
(self)
Read the directory, making sure we close the file if the format is bad.
Read the directory, making sure we close the file if the format is bad.
[ "Read", "the", "directory", "making", "sure", "we", "close", "the", "file", "if", "the", "format", "is", "bad", "." ]
def _GetContents(self): """Read the directory, making sure we close the file if the format is bad.""" try: self._RealGetContents() except BadZipfile: if not self._filePassed: self.fp.close() self.fp = None raise
[ "def", "_GetContents", "(", "self", ")", ":", "try", ":", "self", ".", "_RealGetContents", "(", ")", "except", "BadZipfile", ":", "if", "not", "self", ".", "_filePassed", ":", "self", ".", "fp", ".", "close", "(", ")", "self", ".", "fp", "=", "None", "raise" ]
https://github.com/xtiankisutsa/MARA_Framework/blob/ac4ac88bfd38f33ae8780a606ed09ab97177c562/tools/AndroBugs/tools/modified/androguard/patch/zipfile.py#L740-L749
cosmin/stashy
bc627e6e2889d6df7b35f710a1944699abcf9d5f
stashy/repos.py
python
Repository.update
(self, name)
return self._client.put(self.url(), data=dict(name=name))
Update the name of a repository. The repository's slug is derived from its name. If the name changes the slug may also change.
Update the name of a repository.
[ "Update", "the", "name", "of", "a", "repository", "." ]
def update(self, name): """ Update the name of a repository. The repository's slug is derived from its name. If the name changes the slug may also change. """ return self._client.put(self.url(), data=dict(name=name))
[ "def", "update", "(", "self", ",", "name", ")", ":", "return", "self", ".", "_client", ".", "put", "(", "self", ".", "url", "(", ")", ",", "data", "=", "dict", "(", "name", "=", "name", ")", ")" ]
https://github.com/cosmin/stashy/blob/bc627e6e2889d6df7b35f710a1944699abcf9d5f/stashy/repos.py#L103-L109
pytorch/contrib
c545fedf4f73c8e95f91fd81f2d5bf7fa9c62a61
torchcontrib/optim/swa.py
python
SWA.bn_update
(loader, model, device=None)
r"""Updates BatchNorm running_mean, running_var buffers in the model. It performs one pass over data in `loader` to estimate the activation statistics for BatchNorm layers in the model. Args: loader (torch.utils.data.DataLoader): dataset loader to compute the activation statistics on. Each data batch should be either a tensor, or a list/tuple whose first element is a tensor containing data. model (torch.nn.Module): model for which we seek to update BatchNorm statistics. device (torch.device, optional): If set, data will be trasferred to :attr:`device` before being passed into :attr:`model`.
r"""Updates BatchNorm running_mean, running_var buffers in the model.
[ "r", "Updates", "BatchNorm", "running_mean", "running_var", "buffers", "in", "the", "model", "." ]
def bn_update(loader, model, device=None): r"""Updates BatchNorm running_mean, running_var buffers in the model. It performs one pass over data in `loader` to estimate the activation statistics for BatchNorm layers in the model. Args: loader (torch.utils.data.DataLoader): dataset loader to compute the activation statistics on. Each data batch should be either a tensor, or a list/tuple whose first element is a tensor containing data. model (torch.nn.Module): model for which we seek to update BatchNorm statistics. device (torch.device, optional): If set, data will be trasferred to :attr:`device` before being passed into :attr:`model`. """ if not _check_bn(model): return was_training = model.training model.train() momenta = {} model.apply(_reset_bn) model.apply(lambda module: _get_momenta(module, momenta)) n = 0 for input in loader: if isinstance(input, (list, tuple)): input = input[0] b = input.size(0) momentum = b / float(n + b) for module in momenta.keys(): module.momentum = momentum if device is not None: input = input.to(device) model(input) n += b model.apply(lambda module: _set_momenta(module, momenta)) model.train(was_training)
[ "def", "bn_update", "(", "loader", ",", "model", ",", "device", "=", "None", ")", ":", "if", "not", "_check_bn", "(", "model", ")", ":", "return", "was_training", "=", "model", ".", "training", "model", ".", "train", "(", ")", "momenta", "=", "{", "}", "model", ".", "apply", "(", "_reset_bn", ")", "model", ".", "apply", "(", "lambda", "module", ":", "_get_momenta", "(", "module", ",", "momenta", ")", ")", "n", "=", "0", "for", "input", "in", "loader", ":", "if", "isinstance", "(", "input", ",", "(", "list", ",", "tuple", ")", ")", ":", "input", "=", "input", "[", "0", "]", "b", "=", "input", ".", "size", "(", "0", ")", "momentum", "=", "b", "/", "float", "(", "n", "+", "b", ")", "for", "module", "in", "momenta", ".", "keys", "(", ")", ":", "module", ".", "momentum", "=", "momentum", "if", "device", "is", "not", "None", ":", "input", "=", "input", ".", "to", "(", "device", ")", "model", "(", "input", ")", "n", "+=", "b", "model", ".", "apply", "(", "lambda", "module", ":", "_set_momenta", "(", "module", ",", "momenta", ")", ")", "model", ".", "train", "(", "was_training", ")" ]
https://github.com/pytorch/contrib/blob/c545fedf4f73c8e95f91fd81f2d5bf7fa9c62a61/torchcontrib/optim/swa.py#L274-L316
rizkiarm/LipNet
bec592aa1e66378bc7ab33f6110f1c8a431b3b9a
lipnet/utils/spell.py
python
Spell.correction
(self, word)
return max(self.candidates(word), key=self.P)
Most probable spelling correction for word.
Most probable spelling correction for word.
[ "Most", "probable", "spelling", "correction", "for", "word", "." ]
def correction(self, word): "Most probable spelling correction for word." return max(self.candidates(word), key=self.P)
[ "def", "correction", "(", "self", ",", "word", ")", ":", "return", "max", "(", "self", ".", "candidates", "(", "word", ")", ",", "key", "=", "self", ".", "P", ")" ]
https://github.com/rizkiarm/LipNet/blob/bec592aa1e66378bc7ab33f6110f1c8a431b3b9a/lipnet/utils/spell.py#L41-L43
ikergarcia1996/Self-Driving-Car-in-Video-Games
ee59b721b9590df774e553aea0bc694406894407
keyboard/inputsHandler.py
python
W
()
Release all keys and push W
Release all keys and push W
[ "Release", "all", "keys", "and", "push", "W" ]
def W() -> None: """ Release all keys and push W """ PressKey(0x11) ReleaseKey(0x1E) ReleaseKey(0x1F) ReleaseKey(0x20)
[ "def", "W", "(", ")", "->", "None", ":", "PressKey", "(", "0x11", ")", "ReleaseKey", "(", "0x1E", ")", "ReleaseKey", "(", "0x1F", ")", "ReleaseKey", "(", "0x20", ")" ]
https://github.com/ikergarcia1996/Self-Driving-Car-in-Video-Games/blob/ee59b721b9590df774e553aea0bc694406894407/keyboard/inputsHandler.py#L18-L25
mininet/mininet
8a50d3867c49781c60b6171acc6e4b46954b4281
examples/mobility.py
python
MobilitySwitch.addIntf
( self, intf, rename=False, **kwargs )
Add (and reparent) an interface
Add (and reparent) an interface
[ "Add", "(", "and", "reparent", ")", "an", "interface" ]
def addIntf( self, intf, rename=False, **kwargs ): "Add (and reparent) an interface" OVSSwitch.addIntf( self, intf, **kwargs ) intf.node = self if rename: self.renameIntf( intf )
[ "def", "addIntf", "(", "self", ",", "intf", ",", "rename", "=", "False", ",", "*", "*", "kwargs", ")", ":", "OVSSwitch", ".", "addIntf", "(", "self", ",", "intf", ",", "*", "*", "kwargs", ")", "intf", ".", "node", "=", "self", "if", "rename", ":", "self", ".", "renameIntf", "(", "intf", ")" ]
https://github.com/mininet/mininet/blob/8a50d3867c49781c60b6171acc6e4b46954b4281/examples/mobility.py#L41-L46
NTMC-Community/MatchZoo
8a487ee5a574356fc91e4f48e219253dc11bcff2
matchzoo/contrib/layers/multi_perspective_layer.py
python
_mask_relevancy_matrix
(relevancy_matrix, mask_lt, mask_rt)
return relevancy_matrix
Mask relevancy matrix. :param relevancy_matrix: [b, len_rt, len_lt] :param mask_lt: [b, len_lt] :param mask_rt: [b, len_rt] :return: masked_matrix: [b, len_rt, len_lt]
Mask relevancy matrix.
[ "Mask", "relevancy", "matrix", "." ]
def _mask_relevancy_matrix(relevancy_matrix, mask_lt, mask_rt): """ Mask relevancy matrix. :param relevancy_matrix: [b, len_rt, len_lt] :param mask_lt: [b, len_lt] :param mask_rt: [b, len_rt] :return: masked_matrix: [b, len_rt, len_lt] """ if mask_lt is not None: relevancy_matrix = relevancy_matrix * tf.expand_dims(mask_lt, 1) relevancy_matrix = relevancy_matrix * tf.expand_dims(mask_rt, 2) return relevancy_matrix
[ "def", "_mask_relevancy_matrix", "(", "relevancy_matrix", ",", "mask_lt", ",", "mask_rt", ")", ":", "if", "mask_lt", "is", "not", "None", ":", "relevancy_matrix", "=", "relevancy_matrix", "*", "tf", ".", "expand_dims", "(", "mask_lt", ",", "1", ")", "relevancy_matrix", "=", "relevancy_matrix", "*", "tf", ".", "expand_dims", "(", "mask_rt", ",", "2", ")", "return", "relevancy_matrix" ]
https://github.com/NTMC-Community/MatchZoo/blob/8a487ee5a574356fc91e4f48e219253dc11bcff2/matchzoo/contrib/layers/multi_perspective_layer.py#L438-L450
rainofmine/Face_Attention_Network
68393da155da02d365e50e4118ca428eb9d24eb7
dataloader.py
python
AspectRatioBasedSampler.__iter__
(self)
[]
def __iter__(self): random.shuffle(self.groups) for group in self.groups: yield group
[ "def", "__iter__", "(", "self", ")", ":", "random", ".", "shuffle", "(", "self", ".", "groups", ")", "for", "group", "in", "self", ".", "groups", ":", "yield", "group" ]
https://github.com/rainofmine/Face_Attention_Network/blob/68393da155da02d365e50e4118ca428eb9d24eb7/dataloader.py#L452-L455
lohriialo/photoshop-scripting-python
6b97da967a5d0a45e54f7c99631b29773b923f09
api_reference/photoshop_2021.py
python
ArtLayer.ApplyBlur
(self)
return self._oleobj_.InvokeTypes(1177563185, LCID, 1, (24, 0), (),)
apply the blur filter
apply the blur filter
[ "apply", "the", "blur", "filter" ]
def ApplyBlur(self): 'apply the blur filter' return self._oleobj_.InvokeTypes(1177563185, LCID, 1, (24, 0), (),)
[ "def", "ApplyBlur", "(", "self", ")", ":", "return", "self", ".", "_oleobj_", ".", "InvokeTypes", "(", "1177563185", ",", "LCID", ",", "1", ",", "(", "24", ",", "0", ")", ",", "(", ")", ",", ")" ]
https://github.com/lohriialo/photoshop-scripting-python/blob/6b97da967a5d0a45e54f7c99631b29773b923f09/api_reference/photoshop_2021.py#L797-L799
twilio/twilio-python
6e1e811ea57a1edfadd5161ace87397c563f6915
twilio/rest/wireless/v1/rate_plan.py
python
RatePlanInstance.date_updated
(self)
return self._properties['date_updated']
:returns: The date when the resource was last updated, given as GMT in ISO 8601 format :rtype: datetime
:returns: The date when the resource was last updated, given as GMT in ISO 8601 format :rtype: datetime
[ ":", "returns", ":", "The", "date", "when", "the", "resource", "was", "last", "updated", "given", "as", "GMT", "in", "ISO", "8601", "format", ":", "rtype", ":", "datetime" ]
def date_updated(self): """ :returns: The date when the resource was last updated, given as GMT in ISO 8601 format :rtype: datetime """ return self._properties['date_updated']
[ "def", "date_updated", "(", "self", ")", ":", "return", "self", ".", "_properties", "[", "'date_updated'", "]" ]
https://github.com/twilio/twilio-python/blob/6e1e811ea57a1edfadd5161ace87397c563f6915/twilio/rest/wireless/v1/rate_plan.py#L455-L460
jython/frozen-mirror
b8d7aa4cee50c0c0fe2f4b235dd62922dd0f3f99
lib-python/2.7/modulefinder.py
python
ModuleFinder.any_missing
(self)
return missing + maybe
Return a list of modules that appear to be missing. Use any_missing_maybe() if you want to know which modules are certain to be missing, and which *may* be missing.
Return a list of modules that appear to be missing. Use any_missing_maybe() if you want to know which modules are certain to be missing, and which *may* be missing.
[ "Return", "a", "list", "of", "modules", "that", "appear", "to", "be", "missing", ".", "Use", "any_missing_maybe", "()", "if", "you", "want", "to", "know", "which", "modules", "are", "certain", "to", "be", "missing", "and", "which", "*", "may", "*", "be", "missing", "." ]
def any_missing(self): """Return a list of modules that appear to be missing. Use any_missing_maybe() if you want to know which modules are certain to be missing, and which *may* be missing. """ missing, maybe = self.any_missing_maybe() return missing + maybe
[ "def", "any_missing", "(", "self", ")", ":", "missing", ",", "maybe", "=", "self", ".", "any_missing_maybe", "(", ")", "return", "missing", "+", "maybe" ]
https://github.com/jython/frozen-mirror/blob/b8d7aa4cee50c0c0fe2f4b235dd62922dd0f3f99/lib-python/2.7/modulefinder.py#L526-L532
ganeti/ganeti
d340a9ddd12f501bef57da421b5f9b969a4ba905
lib/hypervisor/hv_base.py
python
BaseHypervisor.VersionsSafeForMigration
(src, target)
return False
Decide if migration between those version is likely to suceed. Given two versions of a hypervisor, give a guess whether live migration from the one version to the other version is likely to succeed. The current
Decide if migration between those version is likely to suceed.
[ "Decide", "if", "migration", "between", "those", "version", "is", "likely", "to", "suceed", "." ]
def VersionsSafeForMigration(src, target): """Decide if migration between those version is likely to suceed. Given two versions of a hypervisor, give a guess whether live migration from the one version to the other version is likely to succeed. The current """ if src == target: return True return False
[ "def", "VersionsSafeForMigration", "(", "src", ",", "target", ")", ":", "if", "src", "==", "target", ":", "return", "True", "return", "False" ]
https://github.com/ganeti/ganeti/blob/d340a9ddd12f501bef57da421b5f9b969a4ba905/lib/hypervisor/hv_base.py#L458-L468
statsmodels/statsmodels
debbe7ea6ba28fe5bdb78f09f8cac694bef98722
statsmodels/base/model.py
python
LikelihoodModel._fit_collinear
(self, atol=1e-14, rtol=1e-13, **kwds)
return self._fit_zeros(keep_index=idx_keep, **kwds)
experimental, fit of the model without collinear variables This currently uses QR to drop variables based on the given sequence. Options will be added in future, when the supporting functions to identify collinear variables become available.
experimental, fit of the model without collinear variables
[ "experimental", "fit", "of", "the", "model", "without", "collinear", "variables" ]
def _fit_collinear(self, atol=1e-14, rtol=1e-13, **kwds): """experimental, fit of the model without collinear variables This currently uses QR to drop variables based on the given sequence. Options will be added in future, when the supporting functions to identify collinear variables become available. """ # ------ copied from PR #2380 remove when merged x = self.exog tol = atol + rtol * x.var(0) r = np.linalg.qr(x, mode='r') mask = np.abs(r.diagonal()) < np.sqrt(tol) # TODO add to results instance # idx_collinear = np.where(mask)[0] idx_keep = np.where(~mask)[0] return self._fit_zeros(keep_index=idx_keep, **kwds)
[ "def", "_fit_collinear", "(", "self", ",", "atol", "=", "1e-14", ",", "rtol", "=", "1e-13", ",", "*", "*", "kwds", ")", ":", "# ------ copied from PR #2380 remove when merged", "x", "=", "self", ".", "exog", "tol", "=", "atol", "+", "rtol", "*", "x", ".", "var", "(", "0", ")", "r", "=", "np", ".", "linalg", ".", "qr", "(", "x", ",", "mode", "=", "'r'", ")", "mask", "=", "np", ".", "abs", "(", "r", ".", "diagonal", "(", ")", ")", "<", "np", ".", "sqrt", "(", "tol", ")", "# TODO add to results instance", "# idx_collinear = np.where(mask)[0]", "idx_keep", "=", "np", ".", "where", "(", "~", "mask", ")", "[", "0", "]", "return", "self", ".", "_fit_zeros", "(", "keep_index", "=", "idx_keep", ",", "*", "*", "kwds", ")" ]
https://github.com/statsmodels/statsmodels/blob/debbe7ea6ba28fe5bdb78f09f8cac694bef98722/statsmodels/base/model.py#L753-L770
Source-Python-Dev-Team/Source.Python
d0ffd8ccbd1e9923c9bc44936f20613c1c76b7fb
addons/source-python/Python3/distutils/command/register.py
python
register._set_config
(self)
Reads the configuration file and set attributes.
Reads the configuration file and set attributes.
[ "Reads", "the", "configuration", "file", "and", "set", "attributes", "." ]
def _set_config(self): ''' Reads the configuration file and set attributes. ''' config = self._read_pypirc() if config != {}: self.username = config['username'] self.password = config['password'] self.repository = config['repository'] self.realm = config['realm'] self.has_config = True else: if self.repository not in ('pypi', self.DEFAULT_REPOSITORY): raise ValueError('%s not found in .pypirc' % self.repository) if self.repository == 'pypi': self.repository = self.DEFAULT_REPOSITORY self.has_config = False
[ "def", "_set_config", "(", "self", ")", ":", "config", "=", "self", ".", "_read_pypirc", "(", ")", "if", "config", "!=", "{", "}", ":", "self", ".", "username", "=", "config", "[", "'username'", "]", "self", ".", "password", "=", "config", "[", "'password'", "]", "self", ".", "repository", "=", "config", "[", "'repository'", "]", "self", ".", "realm", "=", "config", "[", "'realm'", "]", "self", ".", "has_config", "=", "True", "else", ":", "if", "self", ".", "repository", "not", "in", "(", "'pypi'", ",", "self", ".", "DEFAULT_REPOSITORY", ")", ":", "raise", "ValueError", "(", "'%s not found in .pypirc'", "%", "self", ".", "repository", ")", "if", "self", ".", "repository", "==", "'pypi'", ":", "self", ".", "repository", "=", "self", ".", "DEFAULT_REPOSITORY", "self", ".", "has_config", "=", "False" ]
https://github.com/Source-Python-Dev-Team/Source.Python/blob/d0ffd8ccbd1e9923c9bc44936f20613c1c76b7fb/addons/source-python/Python3/distutils/command/register.py#L68-L83
TeamErlich/dna-fountain
4b2338b64db48ef648926748ca079ab9fbfd5dfd
other_screens.py
python
_toDigits
(n, b, width)
return digits.rjust(width, "0")
Convert a positive number n to its digit representation in base b. width is the number of overall digits. base MUST BE SMALLER THAN 10.
Convert a positive number n to its digit representation in base b. width is the number of overall digits. base MUST BE SMALLER THAN 10.
[ "Convert", "a", "positive", "number", "n", "to", "its", "digit", "representation", "in", "base", "b", ".", "width", "is", "the", "number", "of", "overall", "digits", ".", "base", "MUST", "BE", "SMALLER", "THAN", "10", "." ]
def _toDigits(n, b, width): """Convert a positive number n to its digit representation in base b. width is the number of overall digits. base MUST BE SMALLER THAN 10. """ digits = '' while n > 0: digits += str(n % b) n = n // b digits = digits[::-1] #revsersing to little endian return digits.rjust(width, "0")
[ "def", "_toDigits", "(", "n", ",", "b", ",", "width", ")", ":", "digits", "=", "''", "while", "n", ">", "0", ":", "digits", "+=", "str", "(", "n", "%", "b", ")", "n", "=", "n", "//", "b", "digits", "=", "digits", "[", ":", ":", "-", "1", "]", "#revsersing to little endian", "return", "digits", ".", "rjust", "(", "width", ",", "\"0\"", ")" ]
https://github.com/TeamErlich/dna-fountain/blob/4b2338b64db48ef648926748ca079ab9fbfd5dfd/other_screens.py#L141-L153
python-telegram-bot/python-telegram-bot
ade1529986f5b6d394a65372d6a27045a70725b2
telegram/ext/contexttypes.py
python
ContextTypes.__init__
( self: 'ContextTypes[CallbackContext[Dict, CD, BD], Dict, CD, BD]', chat_data: Type[CD], bot_data: Type[BD], )
[]
def __init__( self: 'ContextTypes[CallbackContext[Dict, CD, BD], Dict, CD, BD]', chat_data: Type[CD], bot_data: Type[BD], ): ...
[ "def", "__init__", "(", "self", ":", "'ContextTypes[CallbackContext[Dict, CD, BD], Dict, CD, BD]'", ",", "chat_data", ":", "Type", "[", "CD", "]", ",", "bot_data", ":", "Type", "[", "BD", "]", ",", ")", ":", "..." ]
https://github.com/python-telegram-bot/python-telegram-bot/blob/ade1529986f5b6d394a65372d6a27045a70725b2/telegram/ext/contexttypes.py#L118-L123
oracle/graalpython
577e02da9755d916056184ec441c26e00b70145c
graalpython/lib-python/3/importlib/_bootstrap_external.py
python
_validate_hash_pyc
(data, source_hash, name, exc_details)
Validate a hash-based pyc by checking the real source hash against the one in the pyc header. *data* is the contents of the pyc file. (Only the first 16 bytes are required.) *source_hash* is the importlib.util.source_hash() of the source file. *name* is the name of the module being imported. It is used for logging. *exc_details* is a dictionary passed to ImportError if it raised for improved debugging. An ImportError is raised if the bytecode is stale.
Validate a hash-based pyc by checking the real source hash against the one in the pyc header.
[ "Validate", "a", "hash", "-", "based", "pyc", "by", "checking", "the", "real", "source", "hash", "against", "the", "one", "in", "the", "pyc", "header", "." ]
def _validate_hash_pyc(data, source_hash, name, exc_details): """Validate a hash-based pyc by checking the real source hash against the one in the pyc header. *data* is the contents of the pyc file. (Only the first 16 bytes are required.) *source_hash* is the importlib.util.source_hash() of the source file. *name* is the name of the module being imported. It is used for logging. *exc_details* is a dictionary passed to ImportError if it raised for improved debugging. An ImportError is raised if the bytecode is stale. """ if data[8:16] != source_hash: raise ImportError( f'hash in bytecode doesn\'t match hash of source {name!r}', **exc_details, )
[ "def", "_validate_hash_pyc", "(", "data", ",", "source_hash", ",", "name", ",", "exc_details", ")", ":", "if", "data", "[", "8", ":", "16", "]", "!=", "source_hash", ":", "raise", "ImportError", "(", "f'hash in bytecode doesn\\'t match hash of source {name!r}'", ",", "*", "*", "exc_details", ",", ")" ]
https://github.com/oracle/graalpython/blob/577e02da9755d916056184ec441c26e00b70145c/graalpython/lib-python/3/importlib/_bootstrap_external.py#L559-L580
windelbouwman/ppci
915c069e0667042c085ec42c78e9e3c9a5295324
ppci/build/tasks.py
python
TaskRunner.run
(self, project, targets=[])
Try to run a project
Try to run a project
[ "Try", "to", "run", "a", "project" ]
def run(self, project, targets=[]): """ Try to run a project """ # Determine what targets to run: if targets: target_list = targets else: if project.default: target_list = [project.default] else: target_list = [] if not target_list: self.logger.info('No targets to run!') return # Check for loops: for target in target_list: project.check_target(target) # Calculate all dependencies: # TODO: make this understandable: target_list = set.union( *[project.dependencies(t) for t in target_list])\ .union(set(target_list)) # Lookup actual targets: target_list = [project.get_target(target_name) for target_name in target_list] target_list.sort() self.logger.info('Target sequence: {}'.format(target_list)) # Run tasks: for target in target_list: self.logger.info('Target {} Started'.format(target.name)) for tname, props in target.tasks: for arg in props: props[arg] = project.expand_macros(props[arg]) task = self.get_task(tname)(target, props) self.logger.info('Running {}'.format(task)) task.run() self.logger.info('Target {} Ready'.format(target.name)) self.logger.info('All targets done!')
[ "def", "run", "(", "self", ",", "project", ",", "targets", "=", "[", "]", ")", ":", "# Determine what targets to run:", "if", "targets", ":", "target_list", "=", "targets", "else", ":", "if", "project", ".", "default", ":", "target_list", "=", "[", "project", ".", "default", "]", "else", ":", "target_list", "=", "[", "]", "if", "not", "target_list", ":", "self", ".", "logger", ".", "info", "(", "'No targets to run!'", ")", "return", "# Check for loops:", "for", "target", "in", "target_list", ":", "project", ".", "check_target", "(", "target", ")", "# Calculate all dependencies:", "# TODO: make this understandable:", "target_list", "=", "set", ".", "union", "(", "*", "[", "project", ".", "dependencies", "(", "t", ")", "for", "t", "in", "target_list", "]", ")", ".", "union", "(", "set", "(", "target_list", ")", ")", "# Lookup actual targets:", "target_list", "=", "[", "project", ".", "get_target", "(", "target_name", ")", "for", "target_name", "in", "target_list", "]", "target_list", ".", "sort", "(", ")", "self", ".", "logger", ".", "info", "(", "'Target sequence: {}'", ".", "format", "(", "target_list", ")", ")", "# Run tasks:", "for", "target", "in", "target_list", ":", "self", ".", "logger", ".", "info", "(", "'Target {} Started'", ".", "format", "(", "target", ".", "name", ")", ")", "for", "tname", ",", "props", "in", "target", ".", "tasks", ":", "for", "arg", "in", "props", ":", "props", "[", "arg", "]", "=", "project", ".", "expand_macros", "(", "props", "[", "arg", "]", ")", "task", "=", "self", ".", "get_task", "(", "tname", ")", "(", "target", ",", "props", ")", "self", ".", "logger", ".", "info", "(", "'Running {}'", ".", "format", "(", "task", ")", ")", "task", ".", "run", "(", ")", "self", ".", "logger", ".", "info", "(", "'Target {} Ready'", ".", "format", "(", "target", ".", "name", ")", ")", "self", ".", "logger", ".", "info", "(", "'All targets done!'", ")" ]
https://github.com/windelbouwman/ppci/blob/915c069e0667042c085ec42c78e9e3c9a5295324/ppci/build/tasks.py#L182-L224
whtlkeep/BAT-algorithms
1a339effd3719be5e94e742490e582bf4a03b7c0
Array & String/S_划分字母区间.py
python
partition_labels_return_len
(string)
return result
[]
def partition_labels_return_len(string): order = [] start_end = dict() # 统计每个字符的起始位置 for i, c in enumerate(string): if c not in order: order.append(c) start_end[c] = [i, i] else: start_end[c][-1] = i result = list() temp = start_end[order[0]] for k in order[1:]: aft_se = start_end[k] if aft_se[0] > temp[1]: result.append(temp[1] - temp[0] + 1) temp = aft_se else: temp[1] = max(temp[1], aft_se[1]) result.append(temp[1] - temp[0] + 1) return result
[ "def", "partition_labels_return_len", "(", "string", ")", ":", "order", "=", "[", "]", "start_end", "=", "dict", "(", ")", "# 统计每个字符的起始位置", "for", "i", ",", "c", "in", "enumerate", "(", "string", ")", ":", "if", "c", "not", "in", "order", ":", "order", ".", "append", "(", "c", ")", "start_end", "[", "c", "]", "=", "[", "i", ",", "i", "]", "else", ":", "start_end", "[", "c", "]", "[", "-", "1", "]", "=", "i", "result", "=", "list", "(", ")", "temp", "=", "start_end", "[", "order", "[", "0", "]", "]", "for", "k", "in", "order", "[", "1", ":", "]", ":", "aft_se", "=", "start_end", "[", "k", "]", "if", "aft_se", "[", "0", "]", ">", "temp", "[", "1", "]", ":", "result", ".", "append", "(", "temp", "[", "1", "]", "-", "temp", "[", "0", "]", "+", "1", ")", "temp", "=", "aft_se", "else", ":", "temp", "[", "1", "]", "=", "max", "(", "temp", "[", "1", "]", ",", "aft_se", "[", "1", "]", ")", "result", ".", "append", "(", "temp", "[", "1", "]", "-", "temp", "[", "0", "]", "+", "1", ")", "return", "result" ]
https://github.com/whtlkeep/BAT-algorithms/blob/1a339effd3719be5e94e742490e582bf4a03b7c0/Array & String/S_划分字母区间.py#L71-L90
dmnfarrell/tkintertable
f3fc8950aaa0f087de100d671ce13c24006d9639
tkintertable/Plot.py
python
pylabPlotter.plotXY
(self, x, y, title='', xlabel=None, ylabel=None, shape=None, clr=None, lw=1)
return line
Do x-y plot of 2 lists
Do x-y plot of 2 lists
[ "Do", "x", "-", "y", "plot", "of", "2", "lists" ]
def plotXY(self, x, y, title='', xlabel=None, ylabel=None, shape=None, clr=None, lw=1): """Do x-y plot of 2 lists""" if shape == None: shape = self.shape if clr == None: clr = 'b' if self.xscale == 1: if self.yscale == 1: line, = pylab.loglog(x, y, shape, color=clr, linewidth=lw) else: line, = pylab.semilogx(x, y, shape, color=clr, linewidth=lw) elif self.yscale == 1: line, = pylab.semilogy(x, y, shape, color=clr, linewidth=lw) else: line, = pylab.plot(x, y, shape, color=clr, linewidth=lw) return line
[ "def", "plotXY", "(", "self", ",", "x", ",", "y", ",", "title", "=", "''", ",", "xlabel", "=", "None", ",", "ylabel", "=", "None", ",", "shape", "=", "None", ",", "clr", "=", "None", ",", "lw", "=", "1", ")", ":", "if", "shape", "==", "None", ":", "shape", "=", "self", ".", "shape", "if", "clr", "==", "None", ":", "clr", "=", "'b'", "if", "self", ".", "xscale", "==", "1", ":", "if", "self", ".", "yscale", "==", "1", ":", "line", ",", "=", "pylab", ".", "loglog", "(", "x", ",", "y", ",", "shape", ",", "color", "=", "clr", ",", "linewidth", "=", "lw", ")", "else", ":", "line", ",", "=", "pylab", ".", "semilogx", "(", "x", ",", "y", ",", "shape", ",", "color", "=", "clr", ",", "linewidth", "=", "lw", ")", "elif", "self", ".", "yscale", "==", "1", ":", "line", ",", "=", "pylab", ".", "semilogy", "(", "x", ",", "y", ",", "shape", ",", "color", "=", "clr", ",", "linewidth", "=", "lw", ")", "else", ":", "line", ",", "=", "pylab", ".", "plot", "(", "x", ",", "y", ",", "shape", ",", "color", "=", "clr", ",", "linewidth", "=", "lw", ")", "return", "line" ]
https://github.com/dmnfarrell/tkintertable/blob/f3fc8950aaa0f087de100d671ce13c24006d9639/tkintertable/Plot.py#L92-L108
graalvm/mx
29c0debab406352df3af246be2f8973be5db69ae
mx_ide_eclipse.py
python
_source_locator_memento
(deps, jdk=None)
return slm, sources
[]
def _source_locator_memento(deps, jdk=None): slm = mx.XMLDoc() slm.open('sourceLookupDirector') slm.open('sourceContainers', {'duplicates' : 'false'}) javaCompliance = None sources = [] for dep in deps: if dep.isLibrary(): if hasattr(dep, 'eclipse.container'): memento = mx.XMLDoc().element('classpathContainer', {'path' : getattr(dep, 'eclipse.container')}).xml(standalone='no') slm.element('classpathContainer', {'memento' : memento, 'typeId':'org.eclipse.jdt.launching.sourceContainer.classpathContainer'}) sources.append(getattr(dep, 'eclipse.container') +' [classpathContainer]') elif dep.get_source_path(resolve=True): memento = mx.XMLDoc().element('archive', {'detectRoot' : 'true', 'path' : dep.get_source_path(resolve=True)}).xml(standalone='no') slm.element('container', {'memento' : memento, 'typeId':'org.eclipse.debug.core.containerType.externalArchive'}) sources.append(dep.get_source_path(resolve=True) + ' [externalArchive]') elif dep.isJdkLibrary(): if jdk is None: jdk = mx.get_jdk(tag='default') path = dep.get_source_path(jdk) if path: if os.path.isdir(path): memento = mx.XMLDoc().element('directory', {'nest' : 'false', 'path' : path}).xml(standalone='no') slm.element('container', {'memento' : memento, 'typeId':'org.eclipse.debug.core.containerType.directory'}) sources.append(path + ' [directory]') else: memento = mx.XMLDoc().element('archive', {'detectRoot' : 'true', 'path' : path}).xml(standalone='no') slm.element('container', {'memento' : memento, 'typeId':'org.eclipse.debug.core.containerType.externalArchive'}) sources.append(path + ' [externalArchive]') elif dep.isProject(): if not dep.isJavaProject(): continue memento = mx.XMLDoc().element('javaProject', {'name' : dep.name}).xml(standalone='no') slm.element('container', {'memento' : memento, 'typeId':'org.eclipse.jdt.launching.sourceContainer.javaProject'}) sources.append(dep.name + ' [javaProject]') if javaCompliance is None or dep.javaCompliance > javaCompliance: javaCompliance = dep.javaCompliance if javaCompliance: jdkContainer = 'org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/' + _to_EclipseJRESystemLibrary(javaCompliance) memento = mx.XMLDoc().element('classpathContainer', {'path' : jdkContainer}).xml(standalone='no') slm.element('classpathContainer', {'memento' : memento, 'typeId':'org.eclipse.jdt.launching.sourceContainer.classpathContainer'}) sources.append(jdkContainer + ' [classpathContainer]') else: memento = mx.XMLDoc().element('classpathContainer', {'path' : 'org.eclipse.jdt.launching.JRE_CONTAINER'}).xml(standalone='no') slm.element('classpathContainer', {'memento' : memento, 'typeId':'org.eclipse.jdt.launching.sourceContainer.classpathContainer'}) sources.append('org.eclipse.jdt.launching.JRE_CONTAINER [classpathContainer]') slm.close('sourceContainers') slm.close('sourceLookupDirector') return slm, sources
[ "def", "_source_locator_memento", "(", "deps", ",", "jdk", "=", "None", ")", ":", "slm", "=", "mx", ".", "XMLDoc", "(", ")", "slm", ".", "open", "(", "'sourceLookupDirector'", ")", "slm", ".", "open", "(", "'sourceContainers'", ",", "{", "'duplicates'", ":", "'false'", "}", ")", "javaCompliance", "=", "None", "sources", "=", "[", "]", "for", "dep", "in", "deps", ":", "if", "dep", ".", "isLibrary", "(", ")", ":", "if", "hasattr", "(", "dep", ",", "'eclipse.container'", ")", ":", "memento", "=", "mx", ".", "XMLDoc", "(", ")", ".", "element", "(", "'classpathContainer'", ",", "{", "'path'", ":", "getattr", "(", "dep", ",", "'eclipse.container'", ")", "}", ")", ".", "xml", "(", "standalone", "=", "'no'", ")", "slm", ".", "element", "(", "'classpathContainer'", ",", "{", "'memento'", ":", "memento", ",", "'typeId'", ":", "'org.eclipse.jdt.launching.sourceContainer.classpathContainer'", "}", ")", "sources", ".", "append", "(", "getattr", "(", "dep", ",", "'eclipse.container'", ")", "+", "' [classpathContainer]'", ")", "elif", "dep", ".", "get_source_path", "(", "resolve", "=", "True", ")", ":", "memento", "=", "mx", ".", "XMLDoc", "(", ")", ".", "element", "(", "'archive'", ",", "{", "'detectRoot'", ":", "'true'", ",", "'path'", ":", "dep", ".", "get_source_path", "(", "resolve", "=", "True", ")", "}", ")", ".", "xml", "(", "standalone", "=", "'no'", ")", "slm", ".", "element", "(", "'container'", ",", "{", "'memento'", ":", "memento", ",", "'typeId'", ":", "'org.eclipse.debug.core.containerType.externalArchive'", "}", ")", "sources", ".", "append", "(", "dep", ".", "get_source_path", "(", "resolve", "=", "True", ")", "+", "' [externalArchive]'", ")", "elif", "dep", ".", "isJdkLibrary", "(", ")", ":", "if", "jdk", "is", "None", ":", "jdk", "=", "mx", ".", "get_jdk", "(", "tag", "=", "'default'", ")", "path", "=", "dep", ".", "get_source_path", "(", "jdk", ")", "if", "path", ":", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "memento", "=", "mx", ".", "XMLDoc", "(", ")", ".", "element", "(", "'directory'", ",", "{", "'nest'", ":", "'false'", ",", "'path'", ":", "path", "}", ")", ".", "xml", "(", "standalone", "=", "'no'", ")", "slm", ".", "element", "(", "'container'", ",", "{", "'memento'", ":", "memento", ",", "'typeId'", ":", "'org.eclipse.debug.core.containerType.directory'", "}", ")", "sources", ".", "append", "(", "path", "+", "' [directory]'", ")", "else", ":", "memento", "=", "mx", ".", "XMLDoc", "(", ")", ".", "element", "(", "'archive'", ",", "{", "'detectRoot'", ":", "'true'", ",", "'path'", ":", "path", "}", ")", ".", "xml", "(", "standalone", "=", "'no'", ")", "slm", ".", "element", "(", "'container'", ",", "{", "'memento'", ":", "memento", ",", "'typeId'", ":", "'org.eclipse.debug.core.containerType.externalArchive'", "}", ")", "sources", ".", "append", "(", "path", "+", "' [externalArchive]'", ")", "elif", "dep", ".", "isProject", "(", ")", ":", "if", "not", "dep", ".", "isJavaProject", "(", ")", ":", "continue", "memento", "=", "mx", ".", "XMLDoc", "(", ")", ".", "element", "(", "'javaProject'", ",", "{", "'name'", ":", "dep", ".", "name", "}", ")", ".", "xml", "(", "standalone", "=", "'no'", ")", "slm", ".", "element", "(", "'container'", ",", "{", "'memento'", ":", "memento", ",", "'typeId'", ":", "'org.eclipse.jdt.launching.sourceContainer.javaProject'", "}", ")", "sources", ".", "append", "(", "dep", ".", "name", "+", "' [javaProject]'", ")", "if", "javaCompliance", "is", "None", "or", "dep", ".", "javaCompliance", ">", "javaCompliance", ":", "javaCompliance", "=", "dep", ".", "javaCompliance", "if", "javaCompliance", ":", "jdkContainer", "=", "'org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/'", "+", "_to_EclipseJRESystemLibrary", "(", "javaCompliance", ")", "memento", "=", "mx", ".", "XMLDoc", "(", ")", ".", "element", "(", "'classpathContainer'", ",", "{", "'path'", ":", "jdkContainer", "}", ")", ".", "xml", "(", "standalone", "=", "'no'", ")", "slm", ".", "element", "(", "'classpathContainer'", ",", "{", "'memento'", ":", "memento", ",", "'typeId'", ":", "'org.eclipse.jdt.launching.sourceContainer.classpathContainer'", "}", ")", "sources", ".", "append", "(", "jdkContainer", "+", "' [classpathContainer]'", ")", "else", ":", "memento", "=", "mx", ".", "XMLDoc", "(", ")", ".", "element", "(", "'classpathContainer'", ",", "{", "'path'", ":", "'org.eclipse.jdt.launching.JRE_CONTAINER'", "}", ")", ".", "xml", "(", "standalone", "=", "'no'", ")", "slm", ".", "element", "(", "'classpathContainer'", ",", "{", "'memento'", ":", "memento", ",", "'typeId'", ":", "'org.eclipse.jdt.launching.sourceContainer.classpathContainer'", "}", ")", "sources", ".", "append", "(", "'org.eclipse.jdt.launching.JRE_CONTAINER [classpathContainer]'", ")", "slm", ".", "close", "(", "'sourceContainers'", ")", "slm", ".", "close", "(", "'sourceLookupDirector'", ")", "return", "slm", ",", "sources" ]
https://github.com/graalvm/mx/blob/29c0debab406352df3af246be2f8973be5db69ae/mx_ide_eclipse.py#L261-L313
PowerScript/KatanaFramework
0f6ad90a88de865d58ec26941cb4460501e75496
lib/IPy/IPy.py
python
IPint.__str__
(self)
return self.strCompressed()
Dispatch to the prefered String Representation. Used to implement str(IP).
Dispatch to the prefered String Representation.
[ "Dispatch", "to", "the", "prefered", "String", "Representation", "." ]
def __str__(self): """Dispatch to the prefered String Representation. Used to implement str(IP).""" return self.strCompressed()
[ "def", "__str__", "(", "self", ")", ":", "return", "self", ".", "strCompressed", "(", ")" ]
https://github.com/PowerScript/KatanaFramework/blob/0f6ad90a88de865d58ec26941cb4460501e75496/lib/IPy/IPy.py#L684-L689
NoGameNoLife00/mybolg
afe17ea5bfe405e33766e5682c43a4262232ee12
libs/werkzeug/urls.py
python
url_encode_stream
(obj, stream=None, charset='utf-8', encode_keys=False, sort=False, key=None, separator=b'&')
Like :meth:`url_encode` but writes the results to a stream object. If the stream is `None` a generator over all encoded pairs is returned. .. versionadded:: 0.8 :param obj: the object to encode into a query string. :param stream: a stream to write the encoded object into or `None` if an iterator over the encoded pairs should be returned. In that case the separator argument is ignored. :param charset: the charset of the query string. :param encode_keys: set to `True` if you have unicode keys. (Ignored on Python 3.x) :param sort: set to `True` if you want parameters to be sorted by `key`. :param separator: the separator to be used for the pairs. :param key: an optional function to be used for sorting. For more details check out the :func:`sorted` documentation.
Like :meth:`url_encode` but writes the results to a stream object. If the stream is `None` a generator over all encoded pairs is returned.
[ "Like", ":", "meth", ":", "url_encode", "but", "writes", "the", "results", "to", "a", "stream", "object", ".", "If", "the", "stream", "is", "None", "a", "generator", "over", "all", "encoded", "pairs", "is", "returned", "." ]
def url_encode_stream(obj, stream=None, charset='utf-8', encode_keys=False, sort=False, key=None, separator=b'&'): """Like :meth:`url_encode` but writes the results to a stream object. If the stream is `None` a generator over all encoded pairs is returned. .. versionadded:: 0.8 :param obj: the object to encode into a query string. :param stream: a stream to write the encoded object into or `None` if an iterator over the encoded pairs should be returned. In that case the separator argument is ignored. :param charset: the charset of the query string. :param encode_keys: set to `True` if you have unicode keys. (Ignored on Python 3.x) :param sort: set to `True` if you want parameters to be sorted by `key`. :param separator: the separator to be used for the pairs. :param key: an optional function to be used for sorting. For more details check out the :func:`sorted` documentation. """ separator = to_native(separator, 'ascii') gen = _url_encode_impl(obj, charset, encode_keys, sort, key) if stream is None: return gen for idx, chunk in enumerate(gen): if idx: stream.write(separator) stream.write(chunk)
[ "def", "url_encode_stream", "(", "obj", ",", "stream", "=", "None", ",", "charset", "=", "'utf-8'", ",", "encode_keys", "=", "False", ",", "sort", "=", "False", ",", "key", "=", "None", ",", "separator", "=", "b'&'", ")", ":", "separator", "=", "to_native", "(", "separator", ",", "'ascii'", ")", "gen", "=", "_url_encode_impl", "(", "obj", ",", "charset", ",", "encode_keys", ",", "sort", ",", "key", ")", "if", "stream", "is", "None", ":", "return", "gen", "for", "idx", ",", "chunk", "in", "enumerate", "(", "gen", ")", ":", "if", "idx", ":", "stream", ".", "write", "(", "separator", ")", "stream", ".", "write", "(", "chunk", ")" ]
https://github.com/NoGameNoLife00/mybolg/blob/afe17ea5bfe405e33766e5682c43a4262232ee12/libs/werkzeug/urls.py#L811-L838
realpython/book2-exercises
cde325eac8e6d8cff2316601c2e5b36bb46af7d0
web2py/venv/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.py
python
LegacyMetadata.read
(self, filepath)
Read the metadata values from a file path.
Read the metadata values from a file path.
[ "Read", "the", "metadata", "values", "from", "a", "file", "path", "." ]
def read(self, filepath): """Read the metadata values from a file path.""" fp = codecs.open(filepath, 'r', encoding='utf-8') try: self.read_file(fp) finally: fp.close()
[ "def", "read", "(", "self", ",", "filepath", ")", ":", "fp", "=", "codecs", ".", "open", "(", "filepath", ",", "'r'", ",", "encoding", "=", "'utf-8'", ")", "try", ":", "self", ".", "read_file", "(", "fp", ")", "finally", ":", "fp", ".", "close", "(", ")" ]
https://github.com/realpython/book2-exercises/blob/cde325eac8e6d8cff2316601c2e5b36bb46af7d0/web2py/venv/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.py#L330-L336
UCL-INGI/INGInious
60f10cb4c375ce207471043e76bd813220b95399
inginious/frontend/pages/api/_api_page.py
python
APIPage.POST
(self, *args, **kwargs)
return self._handle_api(self.API_POST, args, kwargs)
POST request
POST request
[ "POST", "request" ]
def POST(self, *args, **kwargs): """ POST request """ return self._handle_api(self.API_POST, args, kwargs)
[ "def", "POST", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_handle_api", "(", "self", ".", "API_POST", ",", "args", ",", "kwargs", ")" ]
https://github.com/UCL-INGI/INGInious/blob/60f10cb4c375ce207471043e76bd813220b95399/inginious/frontend/pages/api/_api_page.py#L27-L29
francisck/DanderSpritz_docs
86bb7caca5a957147f120b18bb5c31f299914904
Python/Core/Lib/decimal.py
python
Decimal.logical_and
(self, other, context=None)
Applies an 'and' operation between self and other's digits.
Applies an 'and' operation between self and other's digits.
[ "Applies", "an", "and", "operation", "between", "self", "and", "other", "s", "digits", "." ]
def logical_and(self, other, context=None): """Applies an 'and' operation between self and other's digits.""" if context is None: context = getcontext() other = _convert_other(other, raiseit=True) if not self._islogical() or not other._islogical(): return context._raise_error(InvalidOperation) else: opa, opb = self._fill_logical(context, self._int, other._int) result = ''.join([ str(int(a) & int(b)) for a, b in zip(opa, opb) ]) return _dec_from_triple(0, result.lstrip('0') or '0', 0)
[ "def", "logical_and", "(", "self", ",", "other", ",", "context", "=", "None", ")", ":", "if", "context", "is", "None", ":", "context", "=", "getcontext", "(", ")", "other", "=", "_convert_other", "(", "other", ",", "raiseit", "=", "True", ")", "if", "not", "self", ".", "_islogical", "(", ")", "or", "not", "other", ".", "_islogical", "(", ")", ":", "return", "context", ".", "_raise_error", "(", "InvalidOperation", ")", "else", ":", "opa", ",", "opb", "=", "self", ".", "_fill_logical", "(", "context", ",", "self", ".", "_int", ",", "other", ".", "_int", ")", "result", "=", "''", ".", "join", "(", "[", "str", "(", "int", "(", "a", ")", "&", "int", "(", "b", ")", ")", "for", "a", ",", "b", "in", "zip", "(", "opa", ",", "opb", ")", "]", ")", "return", "_dec_from_triple", "(", "0", ",", "result", ".", "lstrip", "(", "'0'", ")", "or", "'0'", ",", "0", ")" ]
https://github.com/francisck/DanderSpritz_docs/blob/86bb7caca5a957147f120b18bb5c31f299914904/Python/Core/Lib/decimal.py#L2588-L2598
agronholm/anyio
ac3e7c619913bd0ddf9c36b6e633b278d07405b7
src/anyio/_core/_synchronization.py
python
Condition.statistics
(self)
return ConditionStatistics(len(self._waiters), self._lock.statistics())
Return statistics about the current state of this condition. .. versionadded:: 3.0
Return statistics about the current state of this condition.
[ "Return", "statistics", "about", "the", "current", "state", "of", "this", "condition", "." ]
def statistics(self) -> ConditionStatistics: """ Return statistics about the current state of this condition. .. versionadded:: 3.0 """ return ConditionStatistics(len(self._waiters), self._lock.statistics())
[ "def", "statistics", "(", "self", ")", "->", "ConditionStatistics", ":", "return", "ConditionStatistics", "(", "len", "(", "self", ".", "_waiters", ")", ",", "self", ".", "_lock", ".", "statistics", "(", ")", ")" ]
https://github.com/agronholm/anyio/blob/ac3e7c619913bd0ddf9c36b6e633b278d07405b7/src/anyio/_core/_synchronization.py#L264-L270
cltk/cltk
1a8c2f5ef72389e2579dfce1fa5af8e59ebc9ec1
src/cltk/lemmatize/lat.py
python
LatinBackoffLemmatizer.__repr__
(self: object)
return f"<BackoffLatinLemmatizer v0.2>"
[]
def __repr__(self: object): return f"<BackoffLatinLemmatizer v0.2>"
[ "def", "__repr__", "(", "self", ":", "object", ")", ":", "return", "f\"<BackoffLatinLemmatizer v0.2>\"" ]
https://github.com/cltk/cltk/blob/1a8c2f5ef72389e2579dfce1fa5af8e59ebc9ec1/src/cltk/lemmatize/lat.py#L601-L602
pyqt/examples
843bb982917cecb2350b5f6d7f42c9b7fb142ec1
src/pyqt-official/qml/referenceexamples/default.py
python
BirthdayParty.guests
(self)
return QQmlListProperty(Person, self, self._guests)
[]
def guests(self): return QQmlListProperty(Person, self, self._guests)
[ "def", "guests", "(", "self", ")", ":", "return", "QQmlListProperty", "(", "Person", ",", "self", ",", "self", ".", "_guests", ")" ]
https://github.com/pyqt/examples/blob/843bb982917cecb2350b5f6d7f42c9b7fb142ec1/src/pyqt-official/qml/referenceexamples/default.py#L119-L120
LudovicRousseau/pyscard
c0a5e2f626be69a0fc7b530631471cf014e4b20e
smartcard/System.py
python
readers
(groups=[])
return smartcard.reader.ReaderFactory.ReaderFactory.readers(groups)
Returns the list of smartcard readers in groups. If group is not specified, returns the list of all smartcard readers. import smartcard r=smartcard.readers() r=smartcard.readers(['SCard$DefaultReaders', 'MyReaderGroup'])
Returns the list of smartcard readers in groups.
[ "Returns", "the", "list", "of", "smartcard", "readers", "in", "groups", "." ]
def readers(groups=[]): """Returns the list of smartcard readers in groups. If group is not specified, returns the list of all smartcard readers. import smartcard r=smartcard.readers() r=smartcard.readers(['SCard$DefaultReaders', 'MyReaderGroup']) """ return smartcard.reader.ReaderFactory.ReaderFactory.readers(groups)
[ "def", "readers", "(", "groups", "=", "[", "]", ")", ":", "return", "smartcard", ".", "reader", ".", "ReaderFactory", ".", "ReaderFactory", ".", "readers", "(", "groups", ")" ]
https://github.com/LudovicRousseau/pyscard/blob/c0a5e2f626be69a0fc7b530631471cf014e4b20e/smartcard/System.py#L31-L41
quantumblacklabs/causalnex
127d9324a3d68c1795299c7522f22cdea880f344
causalnex/network/network.py
python
BayesianNetwork.cpds
(self)
return cpds
Conditional Probability Distributions of each node within the Bayesian Network. The row-index of each dataframe is all possible states for the node. The col-index of each dataframe is a MultiIndex that describes all possible permutations of parent states. For example, for a node :math:`P(A | B, D)`, where .. math:: - A \\in \\text{{"a", "b", "c", "d"}} - B \\in \\text{{"x", "y", "z"}} - C \\in \\text{{False, True}} >>> b x y z >>> d False True False True False True >>> a >>> a 0.265306 0.214286 0.066667 0.25 0.444444 0.000000 >>> b 0.183673 0.214286 0.200000 0.25 0.222222 0.666667 >>> c 0.285714 0.285714 0.400000 0.25 0.333333 0.333333 >>> d 0.265306 0.285714 0.333333 0.25 0.000000 0.000000 Returns: Conditional Probability Distributions of each node within the Bayesian Network.
Conditional Probability Distributions of each node within the Bayesian Network.
[ "Conditional", "Probability", "Distributions", "of", "each", "node", "within", "the", "Bayesian", "Network", "." ]
def cpds(self) -> Dict[str, pd.DataFrame]: """ Conditional Probability Distributions of each node within the Bayesian Network. The row-index of each dataframe is all possible states for the node. The col-index of each dataframe is a MultiIndex that describes all possible permutations of parent states. For example, for a node :math:`P(A | B, D)`, where .. math:: - A \\in \\text{{"a", "b", "c", "d"}} - B \\in \\text{{"x", "y", "z"}} - C \\in \\text{{False, True}} >>> b x y z >>> d False True False True False True >>> a >>> a 0.265306 0.214286 0.066667 0.25 0.444444 0.000000 >>> b 0.183673 0.214286 0.200000 0.25 0.222222 0.666667 >>> c 0.285714 0.285714 0.400000 0.25 0.333333 0.333333 >>> d 0.265306 0.285714 0.333333 0.25 0.000000 0.000000 Returns: Conditional Probability Distributions of each node within the Bayesian Network. """ cpds = {} for cpd in self._model.cpds: names = cpd.variables[1:] cols = [""] if names: cols = pd.MultiIndex.from_product( [sorted(self._node_states[var].keys()) for var in names], names=names, ) cpds[cpd.variable] = pd.DataFrame( cpd.values.reshape( len(self._node_states[cpd.variable]), max(1, len(cols)) ) ) cpds[cpd.variable][cpd.variable] = sorted( self._node_states[cpd.variable].keys() ) cpds[cpd.variable].set_index([cpd.variable], inplace=True) cpds[cpd.variable].columns = cols return cpds
[ "def", "cpds", "(", "self", ")", "->", "Dict", "[", "str", ",", "pd", ".", "DataFrame", "]", ":", "cpds", "=", "{", "}", "for", "cpd", "in", "self", ".", "_model", ".", "cpds", ":", "names", "=", "cpd", ".", "variables", "[", "1", ":", "]", "cols", "=", "[", "\"\"", "]", "if", "names", ":", "cols", "=", "pd", ".", "MultiIndex", ".", "from_product", "(", "[", "sorted", "(", "self", ".", "_node_states", "[", "var", "]", ".", "keys", "(", ")", ")", "for", "var", "in", "names", "]", ",", "names", "=", "names", ",", ")", "cpds", "[", "cpd", ".", "variable", "]", "=", "pd", ".", "DataFrame", "(", "cpd", ".", "values", ".", "reshape", "(", "len", "(", "self", ".", "_node_states", "[", "cpd", ".", "variable", "]", ")", ",", "max", "(", "1", ",", "len", "(", "cols", ")", ")", ")", ")", "cpds", "[", "cpd", ".", "variable", "]", "[", "cpd", ".", "variable", "]", "=", "sorted", "(", "self", ".", "_node_states", "[", "cpd", ".", "variable", "]", ".", "keys", "(", ")", ")", "cpds", "[", "cpd", ".", "variable", "]", ".", "set_index", "(", "[", "cpd", ".", "variable", "]", ",", "inplace", "=", "True", ")", "cpds", "[", "cpd", ".", "variable", "]", ".", "columns", "=", "cols", "return", "cpds" ]
https://github.com/quantumblacklabs/causalnex/blob/127d9324a3d68c1795299c7522f22cdea880f344/causalnex/network/network.py#L225-L272
HSLCY/ABSA-BERT-pair
7d238eb8c772946b9e572373c144b11151e4187f
processor.py
python
Semeval_NLI_B_Processor.get_train_examples
(self, data_dir)
return self._create_examples(train_data, "train")
See base class.
See base class.
[ "See", "base", "class", "." ]
def get_train_examples(self, data_dir): """See base class.""" train_data = pd.read_csv(os.path.join(data_dir, "train_NLI_B.csv"),header=None,sep="\t").values return self._create_examples(train_data, "train")
[ "def", "get_train_examples", "(", "self", ",", "data_dir", ")", ":", "train_data", "=", "pd", ".", "read_csv", "(", "os", ".", "path", ".", "join", "(", "data_dir", ",", "\"train_NLI_B.csv\"", ")", ",", "header", "=", "None", ",", "sep", "=", "\"\\t\"", ")", ".", "values", "return", "self", ".", "_create_examples", "(", "train_data", ",", "\"train\"", ")" ]
https://github.com/HSLCY/ABSA-BERT-pair/blob/7d238eb8c772946b9e572373c144b11151e4187f/processor.py#L397-L400
ni/nidaqmx-python
62fc6b48cbbb330fe1bcc9aedadc86610a1269b6
nidaqmx/_task_modules/channels/ai_channel.py
python
AIChannel.ai_accel_4_wire_dc_voltage_sensitivity_units
(self)
[]
def ai_accel_4_wire_dc_voltage_sensitivity_units(self): cfunc = (lib_importer.windll. DAQmxResetAIAccel4WireDCVoltageSensitivityUnits) if cfunc.argtypes is None: with cfunc.arglock: if cfunc.argtypes is None: cfunc.argtypes = [ lib_importer.task_handle, ctypes_byte_str] error_code = cfunc( self._handle, self._name) check_for_error(error_code)
[ "def", "ai_accel_4_wire_dc_voltage_sensitivity_units", "(", "self", ")", ":", "cfunc", "=", "(", "lib_importer", ".", "windll", ".", "DAQmxResetAIAccel4WireDCVoltageSensitivityUnits", ")", "if", "cfunc", ".", "argtypes", "is", "None", ":", "with", "cfunc", ".", "arglock", ":", "if", "cfunc", ".", "argtypes", "is", "None", ":", "cfunc", ".", "argtypes", "=", "[", "lib_importer", ".", "task_handle", ",", "ctypes_byte_str", "]", "error_code", "=", "cfunc", "(", "self", ".", "_handle", ",", "self", ".", "_name", ")", "check_for_error", "(", "error_code", ")" ]
https://github.com/ni/nidaqmx-python/blob/62fc6b48cbbb330fe1bcc9aedadc86610a1269b6/nidaqmx/_task_modules/channels/ai_channel.py#L289-L300
naftaliharris/tauthon
5587ceec329b75f7caf6d65a036db61ac1bae214
Lib/site.py
python
aliasmbcs
()
On Windows, some default encodings are not provided by Python, while they are always available as "mbcs" in each locale. Make them usable by aliasing to "mbcs" in such a case.
On Windows, some default encodings are not provided by Python, while they are always available as "mbcs" in each locale. Make them usable by aliasing to "mbcs" in such a case.
[ "On", "Windows", "some", "default", "encodings", "are", "not", "provided", "by", "Python", "while", "they", "are", "always", "available", "as", "mbcs", "in", "each", "locale", ".", "Make", "them", "usable", "by", "aliasing", "to", "mbcs", "in", "such", "a", "case", "." ]
def aliasmbcs(): """On Windows, some default encodings are not provided by Python, while they are always available as "mbcs" in each locale. Make them usable by aliasing to "mbcs" in such a case.""" if sys.platform == 'win32': import locale, codecs enc = locale.getdefaultlocale()[1] if enc.startswith('cp'): # "cp***" ? try: codecs.lookup(enc) except LookupError: import encodings encodings._cache[enc] = encodings._unknown encodings.aliases.aliases[enc] = 'mbcs'
[ "def", "aliasmbcs", "(", ")", ":", "if", "sys", ".", "platform", "==", "'win32'", ":", "import", "locale", ",", "codecs", "enc", "=", "locale", ".", "getdefaultlocale", "(", ")", "[", "1", "]", "if", "enc", ".", "startswith", "(", "'cp'", ")", ":", "# \"cp***\" ?", "try", ":", "codecs", ".", "lookup", "(", "enc", ")", "except", "LookupError", ":", "import", "encodings", "encodings", ".", "_cache", "[", "enc", "]", "=", "encodings", ".", "_unknown", "encodings", ".", "aliases", ".", "aliases", "[", "enc", "]", "=", "'mbcs'" ]
https://github.com/naftaliharris/tauthon/blob/5587ceec329b75f7caf6d65a036db61ac1bae214/Lib/site.py#L517-L530
martin68/apt-smart
7085f398e08a703759d7e81a898f1e237796f232
apt_smart/backends/ubuntu.py
python
discover_mirrors_old
()
return mirrors
Discover available Ubuntu mirrors. (fallback) :returns: A set of :class:`.CandidateMirror` objects that have their :attr:`~.CandidateMirror.mirror_url` property set and may have the :attr:`~.CandidateMirror.last_updated` property set. :raises: If no mirrors are discovered an exception is raised. This queries :data:`MIRRORS_URL`to discover available Ubuntu mirrors. Here's an example run: >>> from apt_smart.backends.ubuntu import discover_mirrors_old >>> from pprint import pprint >>> pprint(discover_mirrors_old()) set([CandidateMirror(mirror_url='http://archive.ubuntu.com/ubuntu/'), CandidateMirror(mirror_url='http://ftp.nluug.nl/os/Linux/distr/ubuntu/'), CandidateMirror(mirror_url='http://ftp.snt.utwente.nl/pub/os/linux/ubuntu/'), CandidateMirror(mirror_url='http://ftp.tudelft.nl/archive.ubuntu.com/'), CandidateMirror(mirror_url='http://mirror.1000mbps.com/ubuntu/'), CandidateMirror(mirror_url='http://mirror.amsiohosting.net/archive.ubuntu.com/'), CandidateMirror(mirror_url='http://mirror.i3d.net/pub/ubuntu/'), CandidateMirror(mirror_url='http://mirror.nforce.com/pub/linux/ubuntu/'), CandidateMirror(mirror_url='http://mirror.nl.leaseweb.net/ubuntu/'), CandidateMirror(mirror_url='http://mirror.transip.net/ubuntu/ubuntu/'), ...]) It may be super-slow somewhere ( with 100Mbps fibre though ) in the world to access launchpad.net (see below), so we have to no longer rely on MIRRORS_URL . time curl -o/dev/null 'https://launchpad.net/ubuntu/+archivemirrors' % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 263k 100 263k 0 0 5316 0 0:00:50 0:00:50 --:--:-- 6398 real 0m50.869s user 0m0.045s sys 0m0.039s But it can be a fallback when MIRROR_SELECTION_URL is down.
Discover available Ubuntu mirrors. (fallback)
[ "Discover", "available", "Ubuntu", "mirrors", ".", "(", "fallback", ")" ]
def discover_mirrors_old(): """ Discover available Ubuntu mirrors. (fallback) :returns: A set of :class:`.CandidateMirror` objects that have their :attr:`~.CandidateMirror.mirror_url` property set and may have the :attr:`~.CandidateMirror.last_updated` property set. :raises: If no mirrors are discovered an exception is raised. This queries :data:`MIRRORS_URL`to discover available Ubuntu mirrors. Here's an example run: >>> from apt_smart.backends.ubuntu import discover_mirrors_old >>> from pprint import pprint >>> pprint(discover_mirrors_old()) set([CandidateMirror(mirror_url='http://archive.ubuntu.com/ubuntu/'), CandidateMirror(mirror_url='http://ftp.nluug.nl/os/Linux/distr/ubuntu/'), CandidateMirror(mirror_url='http://ftp.snt.utwente.nl/pub/os/linux/ubuntu/'), CandidateMirror(mirror_url='http://ftp.tudelft.nl/archive.ubuntu.com/'), CandidateMirror(mirror_url='http://mirror.1000mbps.com/ubuntu/'), CandidateMirror(mirror_url='http://mirror.amsiohosting.net/archive.ubuntu.com/'), CandidateMirror(mirror_url='http://mirror.i3d.net/pub/ubuntu/'), CandidateMirror(mirror_url='http://mirror.nforce.com/pub/linux/ubuntu/'), CandidateMirror(mirror_url='http://mirror.nl.leaseweb.net/ubuntu/'), CandidateMirror(mirror_url='http://mirror.transip.net/ubuntu/ubuntu/'), ...]) It may be super-slow somewhere ( with 100Mbps fibre though ) in the world to access launchpad.net (see below), so we have to no longer rely on MIRRORS_URL . time curl -o/dev/null 'https://launchpad.net/ubuntu/+archivemirrors' % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 263k 100 263k 0 0 5316 0 0:00:50 0:00:50 --:--:-- 6398 real 0m50.869s user 0m0.045s sys 0m0.039s But it can be a fallback when MIRROR_SELECTION_URL is down. """ mirrors = set() logger.info("Discovering Ubuntu mirrors at %s ..", MIRRORS_URL) # Find which country the user is in to get mirrors in that country try: url = 'https://ipapi.co/json' response = fetch_url(url, timeout=2) # On py3 response is bytes and json.loads throws TypeError in py3.4 and 3.5, # so decode it to str if isinstance(response, six.binary_type): response = response.decode('utf-8') data = json.loads(response) country = data['country_name'] logger.info("Found your location: %s by %s", country, url) except Exception: url = 'http://ip-api.com/json' response = fetch_url(url, timeout=5) if isinstance(response, six.binary_type): response = response.decode('utf-8') data = json.loads(response) country = data['country'] logger.info("Found your location: %s by %s", country, url) data = fetch_url(MIRRORS_URL, timeout=70, retry=True) soup = BeautifulSoup(data, 'html.parser') tables = soup.findAll('table') flag = False # flag is True when find the row's text is that country if not tables: raise Exception("Failed to locate <table> element in Ubuntu mirror page! (%s)" % MIRRORS_URL) else: for row in tables[0].findAll("tr"): if flag: if not row.a: # End of mirrors located in that country break else: for a in row.findAll('a', href=True): # Check if the link looks like a mirror URL. if a['href'].startswith(('http://', 'https://')): mirrors.add(CandidateMirror(mirror_url=a['href'])) if row.th and row.th.get_text() == country: flag = True if not mirrors: raise Exception("Failed to discover any Ubuntu mirrors! (using %s)" % MIRRORS_URL) return mirrors
[ "def", "discover_mirrors_old", "(", ")", ":", "mirrors", "=", "set", "(", ")", "logger", ".", "info", "(", "\"Discovering Ubuntu mirrors at %s ..\"", ",", "MIRRORS_URL", ")", "# Find which country the user is in to get mirrors in that country", "try", ":", "url", "=", "'https://ipapi.co/json'", "response", "=", "fetch_url", "(", "url", ",", "timeout", "=", "2", ")", "# On py3 response is bytes and json.loads throws TypeError in py3.4 and 3.5,", "# so decode it to str", "if", "isinstance", "(", "response", ",", "six", ".", "binary_type", ")", ":", "response", "=", "response", ".", "decode", "(", "'utf-8'", ")", "data", "=", "json", ".", "loads", "(", "response", ")", "country", "=", "data", "[", "'country_name'", "]", "logger", ".", "info", "(", "\"Found your location: %s by %s\"", ",", "country", ",", "url", ")", "except", "Exception", ":", "url", "=", "'http://ip-api.com/json'", "response", "=", "fetch_url", "(", "url", ",", "timeout", "=", "5", ")", "if", "isinstance", "(", "response", ",", "six", ".", "binary_type", ")", ":", "response", "=", "response", ".", "decode", "(", "'utf-8'", ")", "data", "=", "json", ".", "loads", "(", "response", ")", "country", "=", "data", "[", "'country'", "]", "logger", ".", "info", "(", "\"Found your location: %s by %s\"", ",", "country", ",", "url", ")", "data", "=", "fetch_url", "(", "MIRRORS_URL", ",", "timeout", "=", "70", ",", "retry", "=", "True", ")", "soup", "=", "BeautifulSoup", "(", "data", ",", "'html.parser'", ")", "tables", "=", "soup", ".", "findAll", "(", "'table'", ")", "flag", "=", "False", "# flag is True when find the row's text is that country", "if", "not", "tables", ":", "raise", "Exception", "(", "\"Failed to locate <table> element in Ubuntu mirror page! (%s)\"", "%", "MIRRORS_URL", ")", "else", ":", "for", "row", "in", "tables", "[", "0", "]", ".", "findAll", "(", "\"tr\"", ")", ":", "if", "flag", ":", "if", "not", "row", ".", "a", ":", "# End of mirrors located in that country", "break", "else", ":", "for", "a", "in", "row", ".", "findAll", "(", "'a'", ",", "href", "=", "True", ")", ":", "# Check if the link looks like a mirror URL.", "if", "a", "[", "'href'", "]", ".", "startswith", "(", "(", "'http://'", ",", "'https://'", ")", ")", ":", "mirrors", ".", "add", "(", "CandidateMirror", "(", "mirror_url", "=", "a", "[", "'href'", "]", ")", ")", "if", "row", ".", "th", "and", "row", ".", "th", ".", "get_text", "(", ")", "==", "country", ":", "flag", "=", "True", "if", "not", "mirrors", ":", "raise", "Exception", "(", "\"Failed to discover any Ubuntu mirrors! (using %s)\"", "%", "MIRRORS_URL", ")", "return", "mirrors" ]
https://github.com/martin68/apt-smart/blob/7085f398e08a703759d7e81a898f1e237796f232/apt_smart/backends/ubuntu.py#L63-L147
Emptyset110/dHydra
8ec44994ff4dda8bf1ec40e38dd068b757945933
dHydra/core/util.py
python
symbol_list_to_code
(symbolList)
return codeList
[]
def symbol_list_to_code(symbolList): codeList = [] for symbol in symbolList: codeList.append(symbol[2:8]) return codeList
[ "def", "symbol_list_to_code", "(", "symbolList", ")", ":", "codeList", "=", "[", "]", "for", "symbol", "in", "symbolList", ":", "codeList", ".", "append", "(", "symbol", "[", "2", ":", "8", "]", ")", "return", "codeList" ]
https://github.com/Emptyset110/dHydra/blob/8ec44994ff4dda8bf1ec40e38dd068b757945933/dHydra/core/util.py#L164-L168
demisto/content
5c664a65b992ac8ca90ac3f11b1b2cdf11ee9b07
Packs/Anomali_ThreatStream/Integrations/Anomali_ThreatStream_v2/Anomali_ThreatStream_v2.py
python
get_passive_dns
(client: Client, value, type="ip", limit=50)
Receives value and type of indicator and returns enrichment data for domain or ip.
Receives value and type of indicator and returns enrichment data for domain or ip.
[ "Receives", "value", "and", "type", "of", "indicator", "and", "returns", "enrichment", "data", "for", "domain", "or", "ip", "." ]
def get_passive_dns(client: Client, value, type="ip", limit=50): """ Receives value and type of indicator and returns enrichment data for domain or ip. """ dns_results = client.http_request("GET", F"v1/pdns/{type}/{value}/", params=CREDENTIALS).get('results', None) if not dns_results: demisto.results(F"No Passive DNS enrichment data found for {value}") sys.exit() dns_results = dns_results[:int(limit)] output = camelize(dns_results, delim='_') ec = ({ 'ThreatStream.PassiveDNS': output }) human_readable = tableToMarkdown(F"Passive DNS enrichment data for: {value}", output) return_outputs(human_readable, ec, dns_results)
[ "def", "get_passive_dns", "(", "client", ":", "Client", ",", "value", ",", "type", "=", "\"ip\"", ",", "limit", "=", "50", ")", ":", "dns_results", "=", "client", ".", "http_request", "(", "\"GET\"", ",", "F\"v1/pdns/{type}/{value}/\"", ",", "params", "=", "CREDENTIALS", ")", ".", "get", "(", "'results'", ",", "None", ")", "if", "not", "dns_results", ":", "demisto", ".", "results", "(", "F\"No Passive DNS enrichment data found for {value}\"", ")", "sys", ".", "exit", "(", ")", "dns_results", "=", "dns_results", "[", ":", "int", "(", "limit", ")", "]", "output", "=", "camelize", "(", "dns_results", ",", "delim", "=", "'_'", ")", "ec", "=", "(", "{", "'ThreatStream.PassiveDNS'", ":", "output", "}", ")", "human_readable", "=", "tableToMarkdown", "(", "F\"Passive DNS enrichment data for: {value}\"", ",", "output", ")", "return_outputs", "(", "human_readable", ",", "ec", ",", "dns_results", ")" ]
https://github.com/demisto/content/blob/5c664a65b992ac8ca90ac3f11b1b2cdf11ee9b07/Packs/Anomali_ThreatStream/Integrations/Anomali_ThreatStream_v2/Anomali_ThreatStream_v2.py#L639-L658
apple/ccs-calendarserver
13c706b985fb728b9aab42dc0fef85aae21921c3
twistedcaldav/resource.py
python
CalendarPrincipalResource.liveProperties
(self)
return super(CalendarPrincipalResource, self).liveProperties() + baseProperties
[]
def liveProperties(self): baseProperties = () if self.calendarsEnabled(): baseProperties += ( (caldav_namespace, "calendar-home-set"), (caldav_namespace, "calendar-user-address-set"), (caldav_namespace, "schedule-inbox-URL"), (caldav_namespace, "schedule-outbox-URL"), (caldav_namespace, "calendar-user-type"), (calendarserver_namespace, "calendar-proxy-read-for"), (calendarserver_namespace, "calendar-proxy-write-for"), (calendarserver_namespace, "auto-schedule-mode"), ) if self.addressBooksEnabled(): baseProperties += (carddavxml.AddressBookHomeSet.qname(),) if self.directoryAddressBookEnabled(): baseProperties += (carddavxml.DirectoryGateway.qname(),) if config.EnableDropBox or config.EnableManagedAttachments: baseProperties += (customxml.DropBoxHomeURL.qname(),) if config.Sharing.Enabled: baseProperties += (customxml.NotificationURL.qname(),) return super(CalendarPrincipalResource, self).liveProperties() + baseProperties
[ "def", "liveProperties", "(", "self", ")", ":", "baseProperties", "=", "(", ")", "if", "self", ".", "calendarsEnabled", "(", ")", ":", "baseProperties", "+=", "(", "(", "caldav_namespace", ",", "\"calendar-home-set\"", ")", ",", "(", "caldav_namespace", ",", "\"calendar-user-address-set\"", ")", ",", "(", "caldav_namespace", ",", "\"schedule-inbox-URL\"", ")", ",", "(", "caldav_namespace", ",", "\"schedule-outbox-URL\"", ")", ",", "(", "caldav_namespace", ",", "\"calendar-user-type\"", ")", ",", "(", "calendarserver_namespace", ",", "\"calendar-proxy-read-for\"", ")", ",", "(", "calendarserver_namespace", ",", "\"calendar-proxy-write-for\"", ")", ",", "(", "calendarserver_namespace", ",", "\"auto-schedule-mode\"", ")", ",", ")", "if", "self", ".", "addressBooksEnabled", "(", ")", ":", "baseProperties", "+=", "(", "carddavxml", ".", "AddressBookHomeSet", ".", "qname", "(", ")", ",", ")", "if", "self", ".", "directoryAddressBookEnabled", "(", ")", ":", "baseProperties", "+=", "(", "carddavxml", ".", "DirectoryGateway", ".", "qname", "(", ")", ",", ")", "if", "config", ".", "EnableDropBox", "or", "config", ".", "EnableManagedAttachments", ":", "baseProperties", "+=", "(", "customxml", ".", "DropBoxHomeURL", ".", "qname", "(", ")", ",", ")", "if", "config", ".", "Sharing", ".", "Enabled", ":", "baseProperties", "+=", "(", "customxml", ".", "NotificationURL", ".", "qname", "(", ")", ",", ")", "return", "super", "(", "CalendarPrincipalResource", ",", "self", ")", ".", "liveProperties", "(", ")", "+", "baseProperties" ]
https://github.com/apple/ccs-calendarserver/blob/13c706b985fb728b9aab42dc0fef85aae21921c3/twistedcaldav/resource.py#L1681-L1708
Spacelog/Spacelog
92df308be5923765607a89b022acb57c041c86b3
ext/redis-py/redis/client.py
python
Redis.ltrim
(self, name, start, end)
return self.execute_command('LTRIM', name, start, end)
Trim the list ``name``, removing all values not within the slice between ``start`` and ``end`` ``start`` and ``end`` can be negative numbers just like Python slicing notation
Trim the list ``name``, removing all values not within the slice between ``start`` and ``end``
[ "Trim", "the", "list", "name", "removing", "all", "values", "not", "within", "the", "slice", "between", "start", "and", "end" ]
def ltrim(self, name, start, end): """ Trim the list ``name``, removing all values not within the slice between ``start`` and ``end`` ``start`` and ``end`` can be negative numbers just like Python slicing notation """ return self.execute_command('LTRIM', name, start, end)
[ "def", "ltrim", "(", "self", ",", "name", ",", "start", ",", "end", ")", ":", "return", "self", ".", "execute_command", "(", "'LTRIM'", ",", "name", ",", "start", ",", "end", ")" ]
https://github.com/Spacelog/Spacelog/blob/92df308be5923765607a89b022acb57c041c86b3/ext/redis-py/redis/client.py#L784-L792
jython/frozen-mirror
b8d7aa4cee50c0c0fe2f4b235dd62922dd0f3f99
lib-python/2.7/lib-tk/Tkinter.py
python
Misc.grab_set_global
(self)
Set global grab for this widget. A global grab directs all events to this and descendant widgets on the display. Use with caution - other applications do not get events anymore.
Set global grab for this widget.
[ "Set", "global", "grab", "for", "this", "widget", "." ]
def grab_set_global(self): """Set global grab for this widget. A global grab directs all events to this and descendant widgets on the display. Use with caution - other applications do not get events anymore.""" self.tk.call('grab', 'set', '-global', self._w)
[ "def", "grab_set_global", "(", "self", ")", ":", "self", ".", "tk", ".", "call", "(", "'grab'", ",", "'set'", ",", "'-global'", ",", "self", ".", "_w", ")" ]
https://github.com/jython/frozen-mirror/blob/b8d7aa4cee50c0c0fe2f4b235dd62922dd0f3f99/lib-python/2.7/lib-tk/Tkinter.py#L620-L626
aws/aws-parallelcluster
f1fe5679a01c524e7ea904c329bd6d17318c6cd9
cli/src/pcluster/schemas/imagebuilder_schema.py
python
BuildSchema.validate_security_group_ids
(self, value)
Validate security group ids.
Validate security group ids.
[ "Validate", "security", "group", "ids", "." ]
def validate_security_group_ids(self, value): """Validate security group ids.""" if value and not all( re.match(ALLOWED_VALUES["security_group_id"], security_group_id) for security_group_id in value ): raise ValidationError(message="The SecurityGroupIds contains invalid security group id.")
[ "def", "validate_security_group_ids", "(", "self", ",", "value", ")", ":", "if", "value", "and", "not", "all", "(", "re", ".", "match", "(", "ALLOWED_VALUES", "[", "\"security_group_id\"", "]", ",", "security_group_id", ")", "for", "security_group_id", "in", "value", ")", ":", "raise", "ValidationError", "(", "message", "=", "\"The SecurityGroupIds contains invalid security group id.\"", ")" ]
https://github.com/aws/aws-parallelcluster/blob/f1fe5679a01c524e7ea904c329bd6d17318c6cd9/cli/src/pcluster/schemas/imagebuilder_schema.py#L185-L190
golbin/TensorFlow-Tutorials
909a8b77d5bb1db4732febee9ed68ab218478b97
11 - Inception/retrain.py
python
read_list_of_floats_from_file
(file_path)
Reads list of floats from a given file. Args: file_path: Path to a file where list of floats was stored. Returns: Array of bottleneck values (list of floats).
Reads list of floats from a given file.
[ "Reads", "list", "of", "floats", "from", "a", "given", "file", "." ]
def read_list_of_floats_from_file(file_path): """Reads list of floats from a given file. Args: file_path: Path to a file where list of floats was stored. Returns: Array of bottleneck values (list of floats). """ with open(file_path, 'rb') as f: s = struct.unpack('d' * BOTTLENECK_TENSOR_SIZE, f.read()) return list(s)
[ "def", "read_list_of_floats_from_file", "(", "file_path", ")", ":", "with", "open", "(", "file_path", ",", "'rb'", ")", "as", "f", ":", "s", "=", "struct", ".", "unpack", "(", "'d'", "*", "BOTTLENECK_TENSOR_SIZE", ",", "f", ".", "read", "(", ")", ")", "return", "list", "(", "s", ")" ]
https://github.com/golbin/TensorFlow-Tutorials/blob/909a8b77d5bb1db4732febee9ed68ab218478b97/11 - Inception/retrain.py#L332-L344
triaquae/triaquae
bbabf736b3ba56a0c6498e7f04e16c13b8b8f2b9
TriAquae/models/django/contrib/gis/db/models/query.py
python
GeoQuerySet.kml
(self, **kwargs)
return self._spatial_attribute('kml', s, **kwargs)
Returns KML representation of the geometry field in a `kml` attribute on each element of this GeoQuerySet.
Returns KML representation of the geometry field in a `kml` attribute on each element of this GeoQuerySet.
[ "Returns", "KML", "representation", "of", "the", "geometry", "field", "in", "a", "kml", "attribute", "on", "each", "element", "of", "this", "GeoQuerySet", "." ]
def kml(self, **kwargs): """ Returns KML representation of the geometry field in a `kml` attribute on each element of this GeoQuerySet. """ s = {'desc' : 'KML', 'procedure_fmt' : '%(geo_col)s,%(precision)s', 'procedure_args' : {'precision' : kwargs.pop('precision', 8)}, } return self._spatial_attribute('kml', s, **kwargs)
[ "def", "kml", "(", "self", ",", "*", "*", "kwargs", ")", ":", "s", "=", "{", "'desc'", ":", "'KML'", ",", "'procedure_fmt'", ":", "'%(geo_col)s,%(precision)s'", ",", "'procedure_args'", ":", "{", "'precision'", ":", "kwargs", ".", "pop", "(", "'precision'", ",", "8", ")", "}", ",", "}", "return", "self", ".", "_spatial_attribute", "(", "'kml'", ",", "s", ",", "*", "*", "kwargs", ")" ]
https://github.com/triaquae/triaquae/blob/bbabf736b3ba56a0c6498e7f04e16c13b8b8f2b9/TriAquae/models/django/contrib/gis/db/models/query.py#L213-L222
AstroPrint/AstroBox
e7e3b8a7d33ea85fcb6b2696869c0d719ceb8b75
src/octoprint/server/util.py
python
PrinterStateConnection._onEvent
(self, event, payload)
[]
def _onEvent(self, event, payload): self.sendEvent(event, payload)
[ "def", "_onEvent", "(", "self", ",", "event", ",", "payload", ")", ":", "self", ".", "sendEvent", "(", "event", ",", "payload", ")" ]
https://github.com/AstroPrint/AstroBox/blob/e7e3b8a7d33ea85fcb6b2696869c0d719ceb8b75/src/octoprint/server/util.py#L244-L245
wucng/TensorExpand
4ea58f64f5c5082b278229b799c9f679536510b7
TensorExpand/图片项目/5、迁移学习/TF-slim/slim/deployment/model_deploy.py
python
DeploymentConfig.clone_device
(self, clone_index)
return device
Device used to create the clone and all the ops inside the clone. Args: clone_index: Int, representing the clone_index. Returns: A value suitable for `tf.device()`. Raises: ValueError: if `clone_index` is greater or equal to the number of clones".
Device used to create the clone and all the ops inside the clone. Args: clone_index: Int, representing the clone_index. Returns: A value suitable for `tf.device()`. Raises: ValueError: if `clone_index` is greater or equal to the number of clones".
[ "Device", "used", "to", "create", "the", "clone", "and", "all", "the", "ops", "inside", "the", "clone", ".", "Args", ":", "clone_index", ":", "Int", "representing", "the", "clone_index", ".", "Returns", ":", "A", "value", "suitable", "for", "tf", ".", "device", "()", ".", "Raises", ":", "ValueError", ":", "if", "clone_index", "is", "greater", "or", "equal", "to", "the", "number", "of", "clones", "." ]
def clone_device(self, clone_index): """Device used to create the clone and all the ops inside the clone. Args: clone_index: Int, representing the clone_index. Returns: A value suitable for `tf.device()`. Raises: ValueError: if `clone_index` is greater or equal to the number of clones". """ if clone_index >= self._num_clones: raise ValueError('clone_index must be less than num_clones') device = '' if self._num_ps_tasks > 0: device += self._worker_device if self._clone_on_cpu: device += '/device:CPU:0' else: device += '/device:GPU:%d' % clone_index return device
[ "def", "clone_device", "(", "self", ",", "clone_index", ")", ":", "if", "clone_index", ">=", "self", ".", "_num_clones", ":", "raise", "ValueError", "(", "'clone_index must be less than num_clones'", ")", "device", "=", "''", "if", "self", ".", "_num_ps_tasks", ">", "0", ":", "device", "+=", "self", ".", "_worker_device", "if", "self", ".", "_clone_on_cpu", ":", "device", "+=", "'/device:CPU:0'", "else", ":", "device", "+=", "'/device:GPU:%d'", "%", "clone_index", "return", "device" ]
https://github.com/wucng/TensorExpand/blob/4ea58f64f5c5082b278229b799c9f679536510b7/TensorExpand/图片项目/5、迁移学习/TF-slim/slim/deployment/model_deploy.py#L522-L540
beetbox/confuse
c328e810f7a31412e0650235f71728e983edd18e
confuse/core.py
python
Configuration.user_config_path
(self)
return os.path.join(self.config_dir(), CONFIG_FILENAME)
Points to the location of the user configuration. The file may not exist.
Points to the location of the user configuration.
[ "Points", "to", "the", "location", "of", "the", "user", "configuration", "." ]
def user_config_path(self): """Points to the location of the user configuration. The file may not exist. """ return os.path.join(self.config_dir(), CONFIG_FILENAME)
[ "def", "user_config_path", "(", "self", ")", ":", "return", "os", ".", "path", ".", "join", "(", "self", ".", "config_dir", "(", ")", ",", "CONFIG_FILENAME", ")" ]
https://github.com/beetbox/confuse/blob/c328e810f7a31412e0650235f71728e983edd18e/confuse/core.py#L517-L522
Pyomo/pyomo
dbd4faee151084f343b893cc2b0c04cf2b76fd92
pyomo/network/decomposition.py
python
SequentialDecomposition.pass_tear_direct
(self, G, tears)
Pass values across all tears in the given tear set
Pass values across all tears in the given tear set
[ "Pass", "values", "across", "all", "tears", "in", "the", "given", "tear", "set" ]
def pass_tear_direct(self, G, tears): """Pass values across all tears in the given tear set""" fixed_outputs = ComponentSet() edge_list = self.idx_to_edge(G) for tear in tears: # fix everything then call pass values arc = G.edges[edge_list[tear]]["arc"] for var in arc.src.iter_vars(expr_vars=True, fixed=False): fixed_outputs.add(var) var.fix() self.pass_values(arc, fixed_inputs=self.fixed_inputs()) for var in fixed_outputs: var.free() fixed_outputs.clear()
[ "def", "pass_tear_direct", "(", "self", ",", "G", ",", "tears", ")", ":", "fixed_outputs", "=", "ComponentSet", "(", ")", "edge_list", "=", "self", ".", "idx_to_edge", "(", "G", ")", "for", "tear", "in", "tears", ":", "# fix everything then call pass values", "arc", "=", "G", ".", "edges", "[", "edge_list", "[", "tear", "]", "]", "[", "\"arc\"", "]", "for", "var", "in", "arc", ".", "src", ".", "iter_vars", "(", "expr_vars", "=", "True", ",", "fixed", "=", "False", ")", ":", "fixed_outputs", ".", "add", "(", "var", ")", "var", ".", "fix", "(", ")", "self", ".", "pass_values", "(", "arc", ",", "fixed_inputs", "=", "self", ".", "fixed_inputs", "(", ")", ")", "for", "var", "in", "fixed_outputs", ":", "var", ".", "free", "(", ")", "fixed_outputs", ".", "clear", "(", ")" ]
https://github.com/Pyomo/pyomo/blob/dbd4faee151084f343b893cc2b0c04cf2b76fd92/pyomo/network/decomposition.py#L862-L876
idanr1986/cuckoo-droid
1350274639473d3d2b0ac740cae133ca53ab7444
analyzer/android_on_linux/lib/api/androguard/apk.py
python
APK.get_files_information
(self)
Return the files inside the APK with their associated types and crc32 :rtype: string, string, int
Return the files inside the APK with their associated types and crc32
[ "Return", "the", "files", "inside", "the", "APK", "with", "their", "associated", "types", "and", "crc32" ]
def get_files_information(self): """ Return the files inside the APK with their associated types and crc32 :rtype: string, string, int """ if self.files == {}: self.get_files_types() for i in self.get_files(): try: yield i, self.files[i], self.files_crc32[i] except KeyError: yield i, "", ""
[ "def", "get_files_information", "(", "self", ")", ":", "if", "self", ".", "files", "==", "{", "}", ":", "self", ".", "get_files_types", "(", ")", "for", "i", "in", "self", ".", "get_files", "(", ")", ":", "try", ":", "yield", "i", ",", "self", ".", "files", "[", "i", "]", ",", "self", ".", "files_crc32", "[", "i", "]", "except", "KeyError", ":", "yield", "i", ",", "\"\"", ",", "\"\"" ]
https://github.com/idanr1986/cuckoo-droid/blob/1350274639473d3d2b0ac740cae133ca53ab7444/analyzer/android_on_linux/lib/api/androguard/apk.py#L352-L365
pythonzm/Ops
e6fdddad2cd6bc697805a2bdba521a26bacada50
assets/utils/ali_api.py
python
AliAPI.get_response
(self)
return str(response, encoding='utf-8')
获取返回值 :return:
获取返回值 :return:
[ "获取返回值", ":", "return", ":" ]
def get_response(self): """ 获取返回值 :return: """ request = self.set_request() response = self.client.do_action_with_exception(request) return str(response, encoding='utf-8')
[ "def", "get_response", "(", "self", ")", ":", "request", "=", "self", ".", "set_request", "(", ")", "response", "=", "self", ".", "client", ".", "do_action_with_exception", "(", "request", ")", "return", "str", "(", "response", ",", "encoding", "=", "'utf-8'", ")" ]
https://github.com/pythonzm/Ops/blob/e6fdddad2cd6bc697805a2bdba521a26bacada50/assets/utils/ali_api.py#L43-L50
pwnieexpress/pwn_plug_sources
1a23324f5dc2c3de20f9c810269b6a29b2758cad
src/voiper/sulley/impacket/dcerpc/dcerpc.py
python
MSRPCBindAck.get_results_num
(self)
return self.get_byte(self._get_results_offset()-2)
[]
def get_results_num(self): return self.get_byte(self._get_results_offset()-2)
[ "def", "get_results_num", "(", "self", ")", ":", "return", "self", ".", "get_byte", "(", "self", ".", "_get_results_offset", "(", ")", "-", "2", ")" ]
https://github.com/pwnieexpress/pwn_plug_sources/blob/1a23324f5dc2c3de20f9c810269b6a29b2758cad/src/voiper/sulley/impacket/dcerpc/dcerpc.py#L523-L524
angr/claripy
4c961b4dc664706be8142fe4868f27655bc8da77
claripy/vsa/valueset.py
python
RegionAnnotation.relocatable
(self)
return False
A Region annotation is not relocatable in simplifications. :return: False :rtype: bool
A Region annotation is not relocatable in simplifications.
[ "A", "Region", "annotation", "is", "not", "relocatable", "in", "simplifications", "." ]
def relocatable(self): """ A Region annotation is not relocatable in simplifications. :return: False :rtype: bool """ return False
[ "def", "relocatable", "(", "self", ")", ":", "return", "False" ]
https://github.com/angr/claripy/blob/4c961b4dc664706be8142fe4868f27655bc8da77/claripy/vsa/valueset.py#L72-L80
leo-editor/leo-editor
383d6776d135ef17d73d935a2f0ecb3ac0e99494
leo/core/leoAst.py
python
Tokenizer.add_token
(self, kind, five_tuple, line, s_row, value)
Add a token to the results list. Subclasses could override this method to filter out specific tokens.
Add a token to the results list.
[ "Add", "a", "token", "to", "the", "results", "list", "." ]
def add_token(self, kind, five_tuple, line, s_row, value): """ Add a token to the results list. Subclasses could override this method to filter out specific tokens. """ tok = Token(kind, value) tok.five_tuple = five_tuple tok.index = self.token_index # Bump the token index. self.token_index += 1 tok.line = line tok.line_number = s_row self.results.append(tok)
[ "def", "add_token", "(", "self", ",", "kind", ",", "five_tuple", ",", "line", ",", "s_row", ",", "value", ")", ":", "tok", "=", "Token", "(", "kind", ",", "value", ")", "tok", ".", "five_tuple", "=", "five_tuple", "tok", ".", "index", "=", "self", ".", "token_index", "# Bump the token index.", "self", ".", "token_index", "+=", "1", "tok", ".", "line", "=", "line", "tok", ".", "line_number", "=", "s_row", "self", ".", "results", ".", "append", "(", "tok", ")" ]
https://github.com/leo-editor/leo-editor/blob/383d6776d135ef17d73d935a2f0ecb3ac0e99494/leo/core/leoAst.py#L4051-L4064
scikit-learn/scikit-learn
1d1aadd0711b87d2a11c80aad15df6f8cf156712
sklearn/pipeline.py
python
make_pipeline
(*steps, memory=None, verbose=False)
return Pipeline(_name_estimators(steps), memory=memory, verbose=verbose)
Construct a :class:`Pipeline` from the given estimators. This is a shorthand for the :class:`Pipeline` constructor; it does not require, and does not permit, naming the estimators. Instead, their names will be set to the lowercase of their types automatically. Parameters ---------- *steps : list of Estimator objects List of the scikit-learn estimators that are chained together. memory : str or object with the joblib.Memory interface, default=None Used to cache the fitted transformers of the pipeline. By default, no caching is performed. If a string is given, it is the path to the caching directory. Enabling caching triggers a clone of the transformers before fitting. Therefore, the transformer instance given to the pipeline cannot be inspected directly. Use the attribute ``named_steps`` or ``steps`` to inspect estimators within the pipeline. Caching the transformers is advantageous when fitting is time consuming. verbose : bool, default=False If True, the time elapsed while fitting each step will be printed as it is completed. Returns ------- p : Pipeline Returns a scikit-learn :class:`Pipeline` object. See Also -------- Pipeline : Class for creating a pipeline of transforms with a final estimator. Examples -------- >>> from sklearn.naive_bayes import GaussianNB >>> from sklearn.preprocessing import StandardScaler >>> from sklearn.pipeline import make_pipeline >>> make_pipeline(StandardScaler(), GaussianNB(priors=None)) Pipeline(steps=[('standardscaler', StandardScaler()), ('gaussiannb', GaussianNB())])
Construct a :class:`Pipeline` from the given estimators.
[ "Construct", "a", ":", "class", ":", "Pipeline", "from", "the", "given", "estimators", "." ]
def make_pipeline(*steps, memory=None, verbose=False): """Construct a :class:`Pipeline` from the given estimators. This is a shorthand for the :class:`Pipeline` constructor; it does not require, and does not permit, naming the estimators. Instead, their names will be set to the lowercase of their types automatically. Parameters ---------- *steps : list of Estimator objects List of the scikit-learn estimators that are chained together. memory : str or object with the joblib.Memory interface, default=None Used to cache the fitted transformers of the pipeline. By default, no caching is performed. If a string is given, it is the path to the caching directory. Enabling caching triggers a clone of the transformers before fitting. Therefore, the transformer instance given to the pipeline cannot be inspected directly. Use the attribute ``named_steps`` or ``steps`` to inspect estimators within the pipeline. Caching the transformers is advantageous when fitting is time consuming. verbose : bool, default=False If True, the time elapsed while fitting each step will be printed as it is completed. Returns ------- p : Pipeline Returns a scikit-learn :class:`Pipeline` object. See Also -------- Pipeline : Class for creating a pipeline of transforms with a final estimator. Examples -------- >>> from sklearn.naive_bayes import GaussianNB >>> from sklearn.preprocessing import StandardScaler >>> from sklearn.pipeline import make_pipeline >>> make_pipeline(StandardScaler(), GaussianNB(priors=None)) Pipeline(steps=[('standardscaler', StandardScaler()), ('gaussiannb', GaussianNB())]) """ return Pipeline(_name_estimators(steps), memory=memory, verbose=verbose)
[ "def", "make_pipeline", "(", "*", "steps", ",", "memory", "=", "None", ",", "verbose", "=", "False", ")", ":", "return", "Pipeline", "(", "_name_estimators", "(", "steps", ")", ",", "memory", "=", "memory", ",", "verbose", "=", "verbose", ")" ]
https://github.com/scikit-learn/scikit-learn/blob/1d1aadd0711b87d2a11c80aad15df6f8cf156712/sklearn/pipeline.py#L827-L872
holzschu/Carnets
44effb10ddfc6aa5c8b0687582a724ba82c6b547
Library/lib/python3.7/site-packages/sympy/discrete/transforms.py
python
fft
(seq, dps=None)
return _fourier_transform(seq, dps=dps)
r""" Performs the Discrete Fourier Transform (**DFT**) in the complex domain. The sequence is automatically padded to the right with zeros, as the *radix-2 FFT* requires the number of sample points to be a power of 2. This method should be used with default arguments only for short sequences as the complexity of expressions increases with the size of the sequence. Parameters ========== seq : iterable The sequence on which **DFT** is to be applied. dps : Integer Specifies the number of decimal digits for precision. Examples ======== >>> from sympy import fft, ifft >>> fft([1, 2, 3, 4]) [10, -2 - 2*I, -2, -2 + 2*I] >>> ifft(_) [1, 2, 3, 4] >>> ifft([1, 2, 3, 4]) [5/2, -1/2 + I/2, -1/2, -1/2 - I/2] >>> fft(_) [1, 2, 3, 4] >>> ifft([1, 7, 3, 4], dps=15) [3.75, -0.5 - 0.75*I, -1.75, -0.5 + 0.75*I] >>> fft(_) [1.0, 7.0, 3.0, 4.0] References ========== .. [1] https://en.wikipedia.org/wiki/Cooley%E2%80%93Tukey_FFT_algorithm .. [2] http://mathworld.wolfram.com/FastFourierTransform.html
r""" Performs the Discrete Fourier Transform (**DFT**) in the complex domain.
[ "r", "Performs", "the", "Discrete", "Fourier", "Transform", "(", "**", "DFT", "**", ")", "in", "the", "complex", "domain", "." ]
def fft(seq, dps=None): r""" Performs the Discrete Fourier Transform (**DFT**) in the complex domain. The sequence is automatically padded to the right with zeros, as the *radix-2 FFT* requires the number of sample points to be a power of 2. This method should be used with default arguments only for short sequences as the complexity of expressions increases with the size of the sequence. Parameters ========== seq : iterable The sequence on which **DFT** is to be applied. dps : Integer Specifies the number of decimal digits for precision. Examples ======== >>> from sympy import fft, ifft >>> fft([1, 2, 3, 4]) [10, -2 - 2*I, -2, -2 + 2*I] >>> ifft(_) [1, 2, 3, 4] >>> ifft([1, 2, 3, 4]) [5/2, -1/2 + I/2, -1/2, -1/2 - I/2] >>> fft(_) [1, 2, 3, 4] >>> ifft([1, 7, 3, 4], dps=15) [3.75, -0.5 - 0.75*I, -1.75, -0.5 + 0.75*I] >>> fft(_) [1.0, 7.0, 3.0, 4.0] References ========== .. [1] https://en.wikipedia.org/wiki/Cooley%E2%80%93Tukey_FFT_algorithm .. [2] http://mathworld.wolfram.com/FastFourierTransform.html """ return _fourier_transform(seq, dps=dps)
[ "def", "fft", "(", "seq", ",", "dps", "=", "None", ")", ":", "return", "_fourier_transform", "(", "seq", ",", "dps", "=", "dps", ")" ]
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/sympy/discrete/transforms.py#L71-L117
p4-team/ctf
05ab90cd04ea26f0fca860579939617f57961a1a
2015-09-26-trendmicro/calculator/calculator.py
python
fromRoman
(s)
return result
convert Roman numeral to integer
convert Roman numeral to integer
[ "convert", "Roman", "numeral", "to", "integer" ]
def fromRoman(s): """convert Roman numeral to integer""" if not s: raise InvalidRomanNumeralError, 'Input can not be blank' if not romanNumeralPattern.search(s): raise InvalidRomanNumeralError, 'Invalid Roman numeral: %s' % s result = 0 index = 0 for numeral, integer in romanNumeralMap: while s[index:index + len(numeral)] == numeral: result += integer index += len(numeral) return result
[ "def", "fromRoman", "(", "s", ")", ":", "if", "not", "s", ":", "raise", "InvalidRomanNumeralError", ",", "'Input can not be blank'", "if", "not", "romanNumeralPattern", ".", "search", "(", "s", ")", ":", "raise", "InvalidRomanNumeralError", ",", "'Invalid Roman numeral: %s'", "%", "s", "result", "=", "0", "index", "=", "0", "for", "numeral", ",", "integer", "in", "romanNumeralMap", ":", "while", "s", "[", "index", ":", "index", "+", "len", "(", "numeral", ")", "]", "==", "numeral", ":", "result", "+=", "integer", "index", "+=", "len", "(", "numeral", ")", "return", "result" ]
https://github.com/p4-team/ctf/blob/05ab90cd04ea26f0fca860579939617f57961a1a/2015-09-26-trendmicro/calculator/calculator.py#L94-L107
quantumlib/OpenFermion
6187085f2a7707012b68370b625acaeed547e62b
src/openfermion/ops/operators/symbolic_operator.py
python
SymbolicOperator.__repr__
(self)
return str(self)
[]
def __repr__(self): return str(self)
[ "def", "__repr__", "(", "self", ")", ":", "return", "str", "(", "self", ")" ]
https://github.com/quantumlib/OpenFermion/blob/6187085f2a7707012b68370b625acaeed547e62b/src/openfermion/ops/operators/symbolic_operator.py#L347-L348
facebookresearch/ReAgent
52f666670a7fa03206812ef48949f6b934d400f7
reagent/ope/datasets/logged_dataset.py
python
BanditsDataset.num_features
(self)
Returns: number of features
Returns: number of features
[ "Returns", ":", "number", "of", "features" ]
def num_features(self) -> int: """ Returns: number of features """ pass
[ "def", "num_features", "(", "self", ")", "->", "int", ":", "pass" ]
https://github.com/facebookresearch/ReAgent/blob/52f666670a7fa03206812ef48949f6b934d400f7/reagent/ope/datasets/logged_dataset.py#L36-L41
demisto/content
5c664a65b992ac8ca90ac3f11b1b2cdf11ee9b07
Packs/CofenseTriage/Integrations/CofenseTriagev3/CofenseTriagev3.py
python
Client.exception_handler
(response: requests.models.Response)
Handle error in the response and display error message based on status code. :type response: ``requests.models.Response`` :param response: response from API. :raises: raise DemistoException based on status code abd response.
Handle error in the response and display error message based on status code.
[ "Handle", "error", "in", "the", "response", "and", "display", "error", "message", "based", "on", "status", "code", "." ]
def exception_handler(response: requests.models.Response): """ Handle error in the response and display error message based on status code. :type response: ``requests.models.Response`` :param response: response from API. :raises: raise DemistoException based on status code abd response. """ err_msg = "" if response.status_code in HTTP_ERRORS: err_msg = HTTP_ERRORS[response.status_code] if response.status_code not in HTTP_ERRORS or response.status_code in [400, 404]: if response.status_code not in [400, 404]: err_msg = response.reason try: # Try to parse json error response error_entry = response.json().get("errors") if error_entry: err_details = ','.join([entry.get('detail') for entry in error_entry if entry.get('detail')]) if err_details: err_msg = f"{err_msg}\nDetails: {err_details}" except (ValueError, AttributeError): if response.text: err_msg = f"{err_msg}\nDetails: {response.text}" raise DemistoException(err_msg)
[ "def", "exception_handler", "(", "response", ":", "requests", ".", "models", ".", "Response", ")", ":", "err_msg", "=", "\"\"", "if", "response", ".", "status_code", "in", "HTTP_ERRORS", ":", "err_msg", "=", "HTTP_ERRORS", "[", "response", ".", "status_code", "]", "if", "response", ".", "status_code", "not", "in", "HTTP_ERRORS", "or", "response", ".", "status_code", "in", "[", "400", ",", "404", "]", ":", "if", "response", ".", "status_code", "not", "in", "[", "400", ",", "404", "]", ":", "err_msg", "=", "response", ".", "reason", "try", ":", "# Try to parse json error response", "error_entry", "=", "response", ".", "json", "(", ")", ".", "get", "(", "\"errors\"", ")", "if", "error_entry", ":", "err_details", "=", "','", ".", "join", "(", "[", "entry", ".", "get", "(", "'detail'", ")", "for", "entry", "in", "error_entry", "if", "entry", ".", "get", "(", "'detail'", ")", "]", ")", "if", "err_details", ":", "err_msg", "=", "f\"{err_msg}\\nDetails: {err_details}\"", "except", "(", "ValueError", ",", "AttributeError", ")", ":", "if", "response", ".", "text", ":", "err_msg", "=", "f\"{err_msg}\\nDetails: {response.text}\"", "raise", "DemistoException", "(", "err_msg", ")" ]
https://github.com/demisto/content/blob/5c664a65b992ac8ca90ac3f11b1b2cdf11ee9b07/Packs/CofenseTriage/Integrations/CofenseTriagev3/CofenseTriagev3.py#L150-L180
python-provy/provy
ca3d5e96a2210daf3c1fd4b96e047efff152db14
provy/more/debian/package/pip.py
python
PipRole.provision
(self)
Installs pip dependencies. This method should be called upon if overriden in base classes, or PIP won't work properly in the remote server. Example: :: class MySampleRole(Role): def provision(self): self.provision_role(PipRole) # does not need to be called if using with block.
Installs pip dependencies. This method should be called upon if overriden in base classes, or PIP won't work properly in the remote server.
[ "Installs", "pip", "dependencies", ".", "This", "method", "should", "be", "called", "upon", "if", "overriden", "in", "base", "classes", "or", "PIP", "won", "t", "work", "properly", "in", "the", "remote", "server", "." ]
def provision(self): ''' Installs pip dependencies. This method should be called upon if overriden in base classes, or PIP won't work properly in the remote server. Example: :: class MySampleRole(Role): def provision(self): self.provision_role(PipRole) # does not need to be called if using with block. ''' with self.using(AptitudeRole) as role: role.ensure_up_to_date() role.ensure_package_installed('python-setuptools') role.ensure_package_installed('python-dev') self.execute("easy_install pip", sudo=True, stdout=False, user=None)
[ "def", "provision", "(", "self", ")", ":", "with", "self", ".", "using", "(", "AptitudeRole", ")", "as", "role", ":", "role", ".", "ensure_up_to_date", "(", ")", "role", ".", "ensure_package_installed", "(", "'python-setuptools'", ")", "role", ".", "ensure_package_installed", "(", "'python-dev'", ")", "self", ".", "execute", "(", "\"easy_install pip\"", ",", "sudo", "=", "True", ",", "stdout", "=", "False", ",", "user", "=", "None", ")" ]
https://github.com/python-provy/provy/blob/ca3d5e96a2210daf3c1fd4b96e047efff152db14/provy/more/debian/package/pip.py#L44-L59
konomae/lastpass-python
41af73adecda1fbf48b83e53ed198e128e505405
lastpass/parser.py
python
skip_item
(stream, times=1)
Skips an item in a stream.
Skips an item in a stream.
[ "Skips", "an", "item", "in", "a", "stream", "." ]
def skip_item(stream, times=1): """Skips an item in a stream.""" for i in range(times): read_item(stream)
[ "def", "skip_item", "(", "stream", ",", "times", "=", "1", ")", ":", "for", "i", "in", "range", "(", "times", ")", ":", "read_item", "(", "stream", ")" ]
https://github.com/konomae/lastpass-python/blob/41af73adecda1fbf48b83e53ed198e128e505405/lastpass/parser.py#L164-L167
openstack/barbican
a9d2b133c8dc3307974f119f9a2b23a4ba82e8ce
barbican/tasks/certificate_resources.py
python
is_last_project_ca
(project_id)
return total == 1
Returns True iff project has exactly one project CA :param project_id: internal project ID :return: Boolean
Returns True iff project has exactly one project CA
[ "Returns", "True", "iff", "project", "has", "exactly", "one", "project", "CA" ]
def is_last_project_ca(project_id): """Returns True iff project has exactly one project CA :param project_id: internal project ID :return: Boolean """ project_ca_repo = repos.get_project_ca_repository() _, _, _, total = project_ca_repo.get_by_create_date( project_id=project_id, suppress_exception=True ) return total == 1
[ "def", "is_last_project_ca", "(", "project_id", ")", ":", "project_ca_repo", "=", "repos", ".", "get_project_ca_repository", "(", ")", "_", ",", "_", ",", "_", ",", "total", "=", "project_ca_repo", ".", "get_by_create_date", "(", "project_id", "=", "project_id", ",", "suppress_exception", "=", "True", ")", "return", "total", "==", "1" ]
https://github.com/openstack/barbican/blob/a9d2b133c8dc3307974f119f9a2b23a4ba82e8ce/barbican/tasks/certificate_resources.py#L290-L301
gramps-project/gramps
04d4651a43eb210192f40a9f8c2bad8ee8fa3753
gramps/gui/widgets/fanchart.py
python
FanChartGrampsGUI.set_fan
(self, fan)
Set the fanchartwidget to work on
Set the fanchartwidget to work on
[ "Set", "the", "fanchartwidget", "to", "work", "on" ]
def set_fan(self, fan): """ Set the fanchartwidget to work on """ self.fan = fan self.fan.format_helper = self.format_helper self.fan.goto = self.on_childmenu_changed
[ "def", "set_fan", "(", "self", ",", "fan", ")", ":", "self", ".", "fan", "=", "fan", "self", ".", "fan", ".", "format_helper", "=", "self", ".", "format_helper", "self", ".", "fan", ".", "goto", "=", "self", ".", "on_childmenu_changed" ]
https://github.com/gramps-project/gramps/blob/04d4651a43eb210192f40a9f8c2bad8ee8fa3753/gramps/gui/widgets/fanchart.py#L1774-L1780
kubernetes-client/python
47b9da9de2d02b2b7a34fbe05afb44afd130d73a
kubernetes/client/models/v1beta1_pod_disruption_budget.py
python
V1beta1PodDisruptionBudget.spec
(self)
return self._spec
Gets the spec of this V1beta1PodDisruptionBudget. # noqa: E501 :return: The spec of this V1beta1PodDisruptionBudget. # noqa: E501 :rtype: V1beta1PodDisruptionBudgetSpec
Gets the spec of this V1beta1PodDisruptionBudget. # noqa: E501
[ "Gets", "the", "spec", "of", "this", "V1beta1PodDisruptionBudget", ".", "#", "noqa", ":", "E501" ]
def spec(self): """Gets the spec of this V1beta1PodDisruptionBudget. # noqa: E501 :return: The spec of this V1beta1PodDisruptionBudget. # noqa: E501 :rtype: V1beta1PodDisruptionBudgetSpec """ return self._spec
[ "def", "spec", "(", "self", ")", ":", "return", "self", ".", "_spec" ]
https://github.com/kubernetes-client/python/blob/47b9da9de2d02b2b7a34fbe05afb44afd130d73a/kubernetes/client/models/v1beta1_pod_disruption_budget.py#L143-L150
TuSimple/simpledet
97413463f0bc3116f684eaf7031fd3dd6ded3149
operator_py/cython/setup.py
python
locate_cuda
()
return cudaconfig
Locate the CUDA environment on the system Returns a dict with keys 'home', 'nvcc', 'include', and 'lib64' and values giving the absolute path to each directory. Starts by looking for the CUDAHOME env variable. If not found, everything is based on finding 'nvcc' in the PATH.
Locate the CUDA environment on the system
[ "Locate", "the", "CUDA", "environment", "on", "the", "system" ]
def locate_cuda(): """Locate the CUDA environment on the system Returns a dict with keys 'home', 'nvcc', 'include', and 'lib64' and values giving the absolute path to each directory. Starts by looking for the CUDAHOME env variable. If not found, everything is based on finding 'nvcc' in the PATH. """ # first check if the CUDAHOME env variable is in use if 'CUDAHOME' in os.environ: home = os.environ['CUDAHOME'] nvcc = pjoin(home, 'bin', 'nvcc') else: # otherwise, search the PATH for NVCC default_path = pjoin(os.sep, 'usr', 'local', 'cuda', 'bin') nvcc = find_in_path('nvcc', os.environ['PATH'] + os.pathsep + default_path) if nvcc is None: raise EnvironmentError('The nvcc binary could not be ' 'located in your $PATH. Either add it to your path, or set $CUDAHOME') home = os.path.dirname(os.path.dirname(nvcc)) cudaconfig = {'home':home, 'nvcc':nvcc, 'include': pjoin(home, 'include'), 'lib64': pjoin(home, 'lib64')} for k, v in cudaconfig.items(): if not os.path.exists(v): raise EnvironmentError('The CUDA %s path could not be located in %s' % (k, v)) return cudaconfig
[ "def", "locate_cuda", "(", ")", ":", "# first check if the CUDAHOME env variable is in use", "if", "'CUDAHOME'", "in", "os", ".", "environ", ":", "home", "=", "os", ".", "environ", "[", "'CUDAHOME'", "]", "nvcc", "=", "pjoin", "(", "home", ",", "'bin'", ",", "'nvcc'", ")", "else", ":", "# otherwise, search the PATH for NVCC", "default_path", "=", "pjoin", "(", "os", ".", "sep", ",", "'usr'", ",", "'local'", ",", "'cuda'", ",", "'bin'", ")", "nvcc", "=", "find_in_path", "(", "'nvcc'", ",", "os", ".", "environ", "[", "'PATH'", "]", "+", "os", ".", "pathsep", "+", "default_path", ")", "if", "nvcc", "is", "None", ":", "raise", "EnvironmentError", "(", "'The nvcc binary could not be '", "'located in your $PATH. Either add it to your path, or set $CUDAHOME'", ")", "home", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "dirname", "(", "nvcc", ")", ")", "cudaconfig", "=", "{", "'home'", ":", "home", ",", "'nvcc'", ":", "nvcc", ",", "'include'", ":", "pjoin", "(", "home", ",", "'include'", ")", ",", "'lib64'", ":", "pjoin", "(", "home", ",", "'lib64'", ")", "}", "for", "k", ",", "v", "in", "cudaconfig", ".", "items", "(", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "v", ")", ":", "raise", "EnvironmentError", "(", "'The CUDA %s path could not be located in %s'", "%", "(", "k", ",", "v", ")", ")", "return", "cudaconfig" ]
https://github.com/TuSimple/simpledet/blob/97413463f0bc3116f684eaf7031fd3dd6ded3149/operator_py/cython/setup.py#L27-L57
almarklein/visvis
766ed97767b44a55a6ff72c742d7385e074d3d55
utils/pypoints.py
python
Point.yi
(self)
return int(self._data[1]+0.5)
Get p[1] rounded to the nearest integer, for indexing.
Get p[1] rounded to the nearest integer, for indexing.
[ "Get", "p", "[", "1", "]", "rounded", "to", "the", "nearest", "integer", "for", "indexing", "." ]
def yi(self): """ Get p[1] rounded to the nearest integer, for indexing. """ return int(self._data[1]+0.5)
[ "def", "yi", "(", "self", ")", ":", "return", "int", "(", "self", ".", "_data", "[", "1", "]", "+", "0.5", ")" ]
https://github.com/almarklein/visvis/blob/766ed97767b44a55a6ff72c742d7385e074d3d55/utils/pypoints.py#L795-L797
partho-maple/coding-interview-gym
f9b28916da31935a27900794cfb8b91be3b38b9b
leetcode.com/python/378_Kth_Smallest_Element_in_a_Sorted_Matrix.py
python
Solution.kthSmallest
(self, matrix, k)
return currentNumber
:type matrix: List[List[int]] :type k: int :rtype: int
:type matrix: List[List[int]] :type k: int :rtype: int
[ ":", "type", "matrix", ":", "List", "[", "List", "[", "int", "]]", ":", "type", "k", ":", "int", ":", "rtype", ":", "int" ]
def kthSmallest(self, matrix, k): """ :type matrix: List[List[int]] :type k: int :rtype: int """ minHeap = [] # put the 1st element of each row in the min heap # we don't need to push more than 'k' elements in the heap for rowIdx in range(min(k, len(matrix))): heapq.heappush(minHeap, (matrix[rowIdx][0], 0, rowIdx)) currentNumber, currentNumerCount = 0, 0 while minHeap: currentNumber, columnIdx, rowIdx = heapq.heappop(minHeap) currentNumerCount += 1 if currentNumerCount == k: break else: if len(matrix[rowIdx]) > columnIdx + 1: heapq.heappush(minHeap, (matrix[rowIdx][columnIdx + 1], columnIdx + 1, rowIdx)) return currentNumber
[ "def", "kthSmallest", "(", "self", ",", "matrix", ",", "k", ")", ":", "minHeap", "=", "[", "]", "# put the 1st element of each row in the min heap", "# we don't need to push more than 'k' elements in the heap", "for", "rowIdx", "in", "range", "(", "min", "(", "k", ",", "len", "(", "matrix", ")", ")", ")", ":", "heapq", ".", "heappush", "(", "minHeap", ",", "(", "matrix", "[", "rowIdx", "]", "[", "0", "]", ",", "0", ",", "rowIdx", ")", ")", "currentNumber", ",", "currentNumerCount", "=", "0", ",", "0", "while", "minHeap", ":", "currentNumber", ",", "columnIdx", ",", "rowIdx", "=", "heapq", ".", "heappop", "(", "minHeap", ")", "currentNumerCount", "+=", "1", "if", "currentNumerCount", "==", "k", ":", "break", "else", ":", "if", "len", "(", "matrix", "[", "rowIdx", "]", ")", ">", "columnIdx", "+", "1", ":", "heapq", ".", "heappush", "(", "minHeap", ",", "(", "matrix", "[", "rowIdx", "]", "[", "columnIdx", "+", "1", "]", ",", "columnIdx", "+", "1", ",", "rowIdx", ")", ")", "return", "currentNumber" ]
https://github.com/partho-maple/coding-interview-gym/blob/f9b28916da31935a27900794cfb8b91be3b38b9b/leetcode.com/python/378_Kth_Smallest_Element_in_a_Sorted_Matrix.py#L4-L26
jkszw2014/bert-kbqa-NLPCC2017
c09511829377b959a8ad5c81f5581e742ba13dc9
AttributeMap-BERT-Classification/run_classifier.py
python
MrpcProcessor.get_test_examples
(self, data_dir)
return self._create_examples( self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
See base class.
See base class.
[ "See", "base", "class", "." ]
def get_test_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
[ "def", "get_test_examples", "(", "self", ",", "data_dir", ")", ":", "return", "self", ".", "_create_examples", "(", "self", ".", "_read_tsv", "(", "os", ".", "path", ".", "join", "(", "data_dir", ",", "\"test.tsv\"", ")", ")", ",", "\"test\"", ")" ]
https://github.com/jkszw2014/bert-kbqa-NLPCC2017/blob/c09511829377b959a8ad5c81f5581e742ba13dc9/AttributeMap-BERT-Classification/run_classifier.py#L309-L312
raffaele-forte/climber
5530a780446e35b1ce977bae140557050fe0b47c
Exscript/protocols/Protocol.py
python
Protocol.app_authorize
(self, account = None, flush = True, bailout = False)
Like app_authenticate(), but uses the authorization password of the account. For the difference between authentication and authorization please google for AAA. @type account: Account @param account: An account object, like login(). @type flush: bool @param flush: Whether to flush the last prompt from the buffer. @type bailout: bool @param bailout: Whether to wait for a prompt after sending the password.
Like app_authenticate(), but uses the authorization password of the account.
[ "Like", "app_authenticate", "()", "but", "uses", "the", "authorization", "password", "of", "the", "account", "." ]
def app_authorize(self, account = None, flush = True, bailout = False): """ Like app_authenticate(), but uses the authorization password of the account. For the difference between authentication and authorization please google for AAA. @type account: Account @param account: An account object, like login(). @type flush: bool @param flush: Whether to flush the last prompt from the buffer. @type bailout: bool @param bailout: Whether to wait for a prompt after sending the password. """ with self._get_account(account) as account: user = account.get_name() password = account.get_authorization_password() if password is None: password = account.get_password() self._dbg(1, "Attempting to app-authorize %s." % user) self._app_authenticate(account, password, flush, bailout) self.app_authorized = True
[ "def", "app_authorize", "(", "self", ",", "account", "=", "None", ",", "flush", "=", "True", ",", "bailout", "=", "False", ")", ":", "with", "self", ".", "_get_account", "(", "account", ")", "as", "account", ":", "user", "=", "account", ".", "get_name", "(", ")", "password", "=", "account", ".", "get_authorization_password", "(", ")", "if", "password", "is", "None", ":", "password", "=", "account", ".", "get_password", "(", ")", "self", ".", "_dbg", "(", "1", ",", "\"Attempting to app-authorize %s.\"", "%", "user", ")", "self", ".", "_app_authenticate", "(", "account", ",", "password", ",", "flush", ",", "bailout", ")", "self", ".", "app_authorized", "=", "True" ]
https://github.com/raffaele-forte/climber/blob/5530a780446e35b1ce977bae140557050fe0b47c/Exscript/protocols/Protocol.py#L832-L854
avrae/avrae
6ebe46a1ec3d4dfaa2f9b18fac948325f39f87de
cogsmisc/customization.py
python
Customization.gvar_create
(self, ctx, *, value)
Creates a global variable. A name will be randomly assigned upon creation.
Creates a global variable. A name will be randomly assigned upon creation.
[ "Creates", "a", "global", "variable", ".", "A", "name", "will", "be", "randomly", "assigned", "upon", "creation", "." ]
async def gvar_create(self, ctx, *, value): """Creates a global variable. A name will be randomly assigned upon creation.""" name = await helpers.create_gvar(ctx, value) await ctx.send(f"Created global variable `{name}`.")
[ "async", "def", "gvar_create", "(", "self", ",", "ctx", ",", "*", ",", "value", ")", ":", "name", "=", "await", "helpers", ".", "create_gvar", "(", "ctx", ",", "value", ")", "await", "ctx", ".", "send", "(", "f\"Created global variable `{name}`.\"", ")" ]
https://github.com/avrae/avrae/blob/6ebe46a1ec3d4dfaa2f9b18fac948325f39f87de/cogsmisc/customization.py#L949-L953
CastagnaIT/plugin.video.netflix
5cf5fa436eb9956576c0f62aa31a4c7d6c5b8a4a
packages/httpcore/_async/http11.py
python
AsyncHTTP11Connection._server_disconnected
(self)
return self._state == ConnectionState.IDLE and self.socket.is_readable()
Return True if the connection is idle, and the underlying socket is readable. The only valid state the socket can be readable here is when the b"" EOF marker is about to be returned, indicating a server disconnect.
Return True if the connection is idle, and the underlying socket is readable. The only valid state the socket can be readable here is when the b"" EOF marker is about to be returned, indicating a server disconnect.
[ "Return", "True", "if", "the", "connection", "is", "idle", "and", "the", "underlying", "socket", "is", "readable", ".", "The", "only", "valid", "state", "the", "socket", "can", "be", "readable", "here", "is", "when", "the", "b", "EOF", "marker", "is", "about", "to", "be", "returned", "indicating", "a", "server", "disconnect", "." ]
def _server_disconnected(self) -> bool: """ Return True if the connection is idle, and the underlying socket is readable. The only valid state the socket can be readable here is when the b"" EOF marker is about to be returned, indicating a server disconnect. """ return self._state == ConnectionState.IDLE and self.socket.is_readable()
[ "def", "_server_disconnected", "(", "self", ")", "->", "bool", ":", "return", "self", ".", "_state", "==", "ConnectionState", ".", "IDLE", "and", "self", ".", "socket", ".", "is_readable", "(", ")" ]
https://github.com/CastagnaIT/plugin.video.netflix/blob/5cf5fa436eb9956576c0f62aa31a4c7d6c5b8a4a/packages/httpcore/_async/http11.py#L53-L59
IJDykeman/wangTiles
7c1ee2095ebdf7f72bce07d94c6484915d5cae8b
experimental_code/tiles_3d/venv_mac_py3/lib/python2.7/site-packages/pip/_vendor/urllib3/connectionpool.py
python
HTTPConnectionPool._new_conn
(self)
return conn
Return a fresh :class:`HTTPConnection`.
Return a fresh :class:`HTTPConnection`.
[ "Return", "a", "fresh", ":", "class", ":", "HTTPConnection", "." ]
def _new_conn(self): """ Return a fresh :class:`HTTPConnection`. """ self.num_connections += 1 log.debug("Starting new HTTP connection (%d): %s:%s", self.num_connections, self.host, self.port or "80") conn = self.ConnectionCls(host=self.host, port=self.port, timeout=self.timeout.connect_timeout, strict=self.strict, **self.conn_kw) return conn
[ "def", "_new_conn", "(", "self", ")", ":", "self", ".", "num_connections", "+=", "1", "log", ".", "debug", "(", "\"Starting new HTTP connection (%d): %s:%s\"", ",", "self", ".", "num_connections", ",", "self", ".", "host", ",", "self", ".", "port", "or", "\"80\"", ")", "conn", "=", "self", ".", "ConnectionCls", "(", "host", "=", "self", ".", "host", ",", "port", "=", "self", ".", "port", ",", "timeout", "=", "self", ".", "timeout", ".", "connect_timeout", ",", "strict", "=", "self", ".", "strict", ",", "*", "*", "self", ".", "conn_kw", ")", "return", "conn" ]
https://github.com/IJDykeman/wangTiles/blob/7c1ee2095ebdf7f72bce07d94c6484915d5cae8b/experimental_code/tiles_3d/venv_mac_py3/lib/python2.7/site-packages/pip/_vendor/urllib3/connectionpool.py#L199-L210
buke/GreenOdoo
3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df
runtime/python/lib/python2.7/site-packages/PyWebDAV-0.9.8-py2.7.egg/pywebdav/lib/propfind.py
python
PROPFIND.createResponse
(self)
return df
Create the multistatus response This will be delegated to the specific method depending on which request (allprop, propname, prop) was found. If we get a PROPNAME then we simply return the list with empty values which we get from the interface class If we get an ALLPROP we first get the list of properties and then we do the same as with a PROP method.
Create the multistatus response
[ "Create", "the", "multistatus", "response" ]
def createResponse(self): """ Create the multistatus response This will be delegated to the specific method depending on which request (allprop, propname, prop) was found. If we get a PROPNAME then we simply return the list with empty values which we get from the interface class If we get an ALLPROP we first get the list of properties and then we do the same as with a PROP method. """ # check if resource exists if not self._dataclass.exists(self._uri): raise DAV_NotFound df = None if self.request_type == RT_ALLPROP: df = self.create_allprop() if self.request_type == RT_PROPNAME: df = self.create_propname() if self.request_type == RT_PROP: df = self.create_prop() if df != None: return df # no body means ALLPROP! df = self.create_allprop() return df
[ "def", "createResponse", "(", "self", ")", ":", "# check if resource exists", "if", "not", "self", ".", "_dataclass", ".", "exists", "(", "self", ".", "_uri", ")", ":", "raise", "DAV_NotFound", "df", "=", "None", "if", "self", ".", "request_type", "==", "RT_ALLPROP", ":", "df", "=", "self", ".", "create_allprop", "(", ")", "if", "self", ".", "request_type", "==", "RT_PROPNAME", ":", "df", "=", "self", ".", "create_propname", "(", ")", "if", "self", ".", "request_type", "==", "RT_PROP", ":", "df", "=", "self", ".", "create_prop", "(", ")", "if", "df", "!=", "None", ":", "return", "df", "# no body means ALLPROP!", "df", "=", "self", ".", "create_allprop", "(", ")", "return", "df" ]
https://github.com/buke/GreenOdoo/blob/3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df/runtime/python/lib/python2.7/site-packages/PyWebDAV-0.9.8-py2.7.egg/pywebdav/lib/propfind.py#L48-L82
home-assistant/core
265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1
homeassistant/components/decora_wifi/light.py
python
DecoraWifiLight.__init__
(self, switch)
Initialize the switch.
Initialize the switch.
[ "Initialize", "the", "switch", "." ]
def __init__(self, switch): """Initialize the switch.""" self._switch = switch
[ "def", "__init__", "(", "self", ",", "switch", ")", ":", "self", ".", "_switch", "=", "switch" ]
https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/components/decora_wifi/light.py#L96-L98
lovelylain/pyctp
fd304de4b50c4ddc31a4190b1caaeb5dec66bc5d
example/ctp/futures/__init__.py
python
TraderApi.OnRspQryCFMMCTradingAccountKey
(self, pCFMMCTradingAccountKey, pRspInfo, nRequestID, bIsLast)
查询保证金监管系统经纪公司资金账户密钥响应
查询保证金监管系统经纪公司资金账户密钥响应
[ "查询保证金监管系统经纪公司资金账户密钥响应" ]
def OnRspQryCFMMCTradingAccountKey(self, pCFMMCTradingAccountKey, pRspInfo, nRequestID, bIsLast): """查询保证金监管系统经纪公司资金账户密钥响应"""
[ "def", "OnRspQryCFMMCTradingAccountKey", "(", "self", ",", "pCFMMCTradingAccountKey", ",", "pRspInfo", ",", "nRequestID", ",", "bIsLast", ")", ":" ]
https://github.com/lovelylain/pyctp/blob/fd304de4b50c4ddc31a4190b1caaeb5dec66bc5d/example/ctp/futures/__init__.py#L629-L630
google-research/language
61fa7260ac7d690d11ef72ca863e45a37c0bdc80
language/labs/drkit/model_fns.py
python
create_hotpotqa_model
(bert_config, qa_config, mips_config, is_training, features, ent2ment_ind, ent2ment_val, ment2ent_map, entity_ids, entity_mask, use_one_hot_embeddings, summary_obj, num_hops=2)
return total_loss, predictions
Creates a classification model.
Creates a classification model.
[ "Creates", "a", "classification", "model", "." ]
def create_hotpotqa_model(bert_config, qa_config, mips_config, is_training, features, ent2ment_ind, ent2ment_val, ment2ent_map, entity_ids, entity_mask, use_one_hot_embeddings, summary_obj, num_hops=2): """Creates a classification model.""" qas_ids = features["qas_ids"] qry_input_ids = features["qry_input_ids"] qry_input_mask = features["qry_input_mask"] batch_size = tf.shape(qry_input_ids)[0] qry_entity_ids = features["qry_entity_id"] if not isinstance(qry_entity_ids, tf.SparseTensor): # This assumes batch_size == 1. num_ents = features["num_entities"][0] qry_entity_ids = tf.SparseTensor( indices=tf.concat([ tf.zeros((num_ents, 1), dtype=tf.int64), tf.expand_dims(tf.range(num_ents, dtype=tf.int64), 1) ], axis=1), values=qry_entity_ids[0, :num_ents], dense_shape=[1, qa_config.num_entities]) answer_entities = None if is_training: answer_entities = features["answer_entities"] answer_index = tf.SparseTensor( indices=tf.concat([ answer_entities.indices[:, 0:1], tf.cast(tf.expand_dims(answer_entities.values, 1), tf.int64) ], axis=1), values=tf.ones_like(answer_entities.values, dtype=tf.float32), dense_shape=[batch_size, qa_config.num_entities]) layer_entities, _, _, _, el, qry_seq_emb = multi_hop( qry_input_ids, qry_input_mask, qry_entity_ids, entity_ids, entity_mask, ent2ment_ind, ent2ment_val, ment2ent_map, is_training, use_one_hot_embeddings, bert_config, qa_config, mips_config, num_hops=num_hops, exclude_set=None) layer_entities = [el] + layer_entities # Compute weights for each layer. with tf.name_scope("classifier"): qry_emb, _ = layer_qry_encoder( qry_seq_emb, qry_input_ids, qry_input_mask, is_training, bert_config, qa_config, suffix="_cl") output_weights = tf.get_variable( "cl_weights", [qa_config.projection_dim, len(layer_entities)], initializer=tf.truncated_normal_initializer(stddev=0.02)) output_bias = tf.get_variable( "cl_bias", [len(layer_entities)], initializer=tf.zeros_initializer()) logits = tf.matmul(qry_emb, output_weights) logits = tf.nn.bias_add(logits, output_bias) probabilities = tf.nn.softmax(logits, axis=-1) if is_training: nrows = qa_config.train_batch_size else: nrows = qa_config.predict_batch_size def _to_ragged(sp_tensor): r_ind = tf.RaggedTensor.from_value_rowids( value_rowids=sp_tensor.indices[:, 0], values=sp_tensor.indices[:, 1], nrows=nrows) r_val = tf.RaggedTensor.from_value_rowids( value_rowids=sp_tensor.indices[:, 0], values=sp_tensor.values, nrows=nrows) return r_ind, r_val def _layer_softmax(entities): uniq_entity_ids, uniq_entity_scs = aggregate_sparse_indices( entities.indices, entities.values, entities.dense_shape, qa_config.entity_score_aggregation_fn) uniq_entity_scs /= qa_config.softmax_temperature logits = tf.SparseTensor(uniq_entity_ids, uniq_entity_scs, entities.dense_shape) return tf.sparse.softmax(tf.sparse.reorder(logits)) predictions = {"qas_ids": qas_ids} layer_entities_weighted = [] for i, layer_entity in enumerate(layer_entities): ent_ind, ent_val = _to_ragged(layer_entity) predictions.update({ "layer_%d_ent" % i: ent_ind.to_tensor(default_value=-1), "layer_%d_scs" % i: ent_val.to_tensor(default_value=-1), }) layer_entities_weighted.append( batch_multiply(_layer_softmax(layer_entity), probabilities[:, i])) probs = tf.sparse.add(layer_entities_weighted[0], layer_entities_weighted[1]) for i in range(2, len(layer_entities_weighted)): probs = tf.sparse.add(probs, layer_entities_weighted[i]) probs_dense = tf.sparse.to_dense( probs, default_value=DEFAULT_VALUE, validate_indices=False) answer_preds = tf.argmax(probs_dense, axis=1) top_vals, top_idx = tf.nn.top_k(probs_dense, k=100, sorted=True) total_loss = None if is_training: sp_loss = compute_loss_from_sptensors(probs, answer_index) total_loss = tf.reduce_sum(sp_loss.values) / tf.cast(batch_size, tf.float32) num_answers_ret = tf.shape(sp_loss.values)[0] if summary_obj is not None: for i in range(len(layer_entities)): num_ents = tf.cast(tf.shape(layer_entities[i].indices)[0], tf.float32) / tf.cast(batch_size, tf.float32) summary_obj.scalar("train/layer_weight_%d" % i, tf.reduce_mean(probabilities[:, i], keepdims=True)) summary_obj.scalar("train/num_entities_%d" % i, tf.expand_dims(num_ents, 0)) summary_obj.scalar("train/total_loss", tf.expand_dims(total_loss, 0)) summary_obj.scalar("train/ans_in_ret", tf.expand_dims(num_answers_ret, 0)) summary_obj.scalar("train/total_prob_mass", tf.reduce_sum(probs.values, keepdims=True)) predictions.update({ "layer_probs": probabilities, "top_vals": top_vals, "top_idx": top_idx, "predictions": answer_preds, }) return total_loss, predictions
[ "def", "create_hotpotqa_model", "(", "bert_config", ",", "qa_config", ",", "mips_config", ",", "is_training", ",", "features", ",", "ent2ment_ind", ",", "ent2ment_val", ",", "ment2ent_map", ",", "entity_ids", ",", "entity_mask", ",", "use_one_hot_embeddings", ",", "summary_obj", ",", "num_hops", "=", "2", ")", ":", "qas_ids", "=", "features", "[", "\"qas_ids\"", "]", "qry_input_ids", "=", "features", "[", "\"qry_input_ids\"", "]", "qry_input_mask", "=", "features", "[", "\"qry_input_mask\"", "]", "batch_size", "=", "tf", ".", "shape", "(", "qry_input_ids", ")", "[", "0", "]", "qry_entity_ids", "=", "features", "[", "\"qry_entity_id\"", "]", "if", "not", "isinstance", "(", "qry_entity_ids", ",", "tf", ".", "SparseTensor", ")", ":", "# This assumes batch_size == 1.", "num_ents", "=", "features", "[", "\"num_entities\"", "]", "[", "0", "]", "qry_entity_ids", "=", "tf", ".", "SparseTensor", "(", "indices", "=", "tf", ".", "concat", "(", "[", "tf", ".", "zeros", "(", "(", "num_ents", ",", "1", ")", ",", "dtype", "=", "tf", ".", "int64", ")", ",", "tf", ".", "expand_dims", "(", "tf", ".", "range", "(", "num_ents", ",", "dtype", "=", "tf", ".", "int64", ")", ",", "1", ")", "]", ",", "axis", "=", "1", ")", ",", "values", "=", "qry_entity_ids", "[", "0", ",", ":", "num_ents", "]", ",", "dense_shape", "=", "[", "1", ",", "qa_config", ".", "num_entities", "]", ")", "answer_entities", "=", "None", "if", "is_training", ":", "answer_entities", "=", "features", "[", "\"answer_entities\"", "]", "answer_index", "=", "tf", ".", "SparseTensor", "(", "indices", "=", "tf", ".", "concat", "(", "[", "answer_entities", ".", "indices", "[", ":", ",", "0", ":", "1", "]", ",", "tf", ".", "cast", "(", "tf", ".", "expand_dims", "(", "answer_entities", ".", "values", ",", "1", ")", ",", "tf", ".", "int64", ")", "]", ",", "axis", "=", "1", ")", ",", "values", "=", "tf", ".", "ones_like", "(", "answer_entities", ".", "values", ",", "dtype", "=", "tf", ".", "float32", ")", ",", "dense_shape", "=", "[", "batch_size", ",", "qa_config", ".", "num_entities", "]", ")", "layer_entities", ",", "_", ",", "_", ",", "_", ",", "el", ",", "qry_seq_emb", "=", "multi_hop", "(", "qry_input_ids", ",", "qry_input_mask", ",", "qry_entity_ids", ",", "entity_ids", ",", "entity_mask", ",", "ent2ment_ind", ",", "ent2ment_val", ",", "ment2ent_map", ",", "is_training", ",", "use_one_hot_embeddings", ",", "bert_config", ",", "qa_config", ",", "mips_config", ",", "num_hops", "=", "num_hops", ",", "exclude_set", "=", "None", ")", "layer_entities", "=", "[", "el", "]", "+", "layer_entities", "# Compute weights for each layer.", "with", "tf", ".", "name_scope", "(", "\"classifier\"", ")", ":", "qry_emb", ",", "_", "=", "layer_qry_encoder", "(", "qry_seq_emb", ",", "qry_input_ids", ",", "qry_input_mask", ",", "is_training", ",", "bert_config", ",", "qa_config", ",", "suffix", "=", "\"_cl\"", ")", "output_weights", "=", "tf", ".", "get_variable", "(", "\"cl_weights\"", ",", "[", "qa_config", ".", "projection_dim", ",", "len", "(", "layer_entities", ")", "]", ",", "initializer", "=", "tf", ".", "truncated_normal_initializer", "(", "stddev", "=", "0.02", ")", ")", "output_bias", "=", "tf", ".", "get_variable", "(", "\"cl_bias\"", ",", "[", "len", "(", "layer_entities", ")", "]", ",", "initializer", "=", "tf", ".", "zeros_initializer", "(", ")", ")", "logits", "=", "tf", ".", "matmul", "(", "qry_emb", ",", "output_weights", ")", "logits", "=", "tf", ".", "nn", ".", "bias_add", "(", "logits", ",", "output_bias", ")", "probabilities", "=", "tf", ".", "nn", ".", "softmax", "(", "logits", ",", "axis", "=", "-", "1", ")", "if", "is_training", ":", "nrows", "=", "qa_config", ".", "train_batch_size", "else", ":", "nrows", "=", "qa_config", ".", "predict_batch_size", "def", "_to_ragged", "(", "sp_tensor", ")", ":", "r_ind", "=", "tf", ".", "RaggedTensor", ".", "from_value_rowids", "(", "value_rowids", "=", "sp_tensor", ".", "indices", "[", ":", ",", "0", "]", ",", "values", "=", "sp_tensor", ".", "indices", "[", ":", ",", "1", "]", ",", "nrows", "=", "nrows", ")", "r_val", "=", "tf", ".", "RaggedTensor", ".", "from_value_rowids", "(", "value_rowids", "=", "sp_tensor", ".", "indices", "[", ":", ",", "0", "]", ",", "values", "=", "sp_tensor", ".", "values", ",", "nrows", "=", "nrows", ")", "return", "r_ind", ",", "r_val", "def", "_layer_softmax", "(", "entities", ")", ":", "uniq_entity_ids", ",", "uniq_entity_scs", "=", "aggregate_sparse_indices", "(", "entities", ".", "indices", ",", "entities", ".", "values", ",", "entities", ".", "dense_shape", ",", "qa_config", ".", "entity_score_aggregation_fn", ")", "uniq_entity_scs", "/=", "qa_config", ".", "softmax_temperature", "logits", "=", "tf", ".", "SparseTensor", "(", "uniq_entity_ids", ",", "uniq_entity_scs", ",", "entities", ".", "dense_shape", ")", "return", "tf", ".", "sparse", ".", "softmax", "(", "tf", ".", "sparse", ".", "reorder", "(", "logits", ")", ")", "predictions", "=", "{", "\"qas_ids\"", ":", "qas_ids", "}", "layer_entities_weighted", "=", "[", "]", "for", "i", ",", "layer_entity", "in", "enumerate", "(", "layer_entities", ")", ":", "ent_ind", ",", "ent_val", "=", "_to_ragged", "(", "layer_entity", ")", "predictions", ".", "update", "(", "{", "\"layer_%d_ent\"", "%", "i", ":", "ent_ind", ".", "to_tensor", "(", "default_value", "=", "-", "1", ")", ",", "\"layer_%d_scs\"", "%", "i", ":", "ent_val", ".", "to_tensor", "(", "default_value", "=", "-", "1", ")", ",", "}", ")", "layer_entities_weighted", ".", "append", "(", "batch_multiply", "(", "_layer_softmax", "(", "layer_entity", ")", ",", "probabilities", "[", ":", ",", "i", "]", ")", ")", "probs", "=", "tf", ".", "sparse", ".", "add", "(", "layer_entities_weighted", "[", "0", "]", ",", "layer_entities_weighted", "[", "1", "]", ")", "for", "i", "in", "range", "(", "2", ",", "len", "(", "layer_entities_weighted", ")", ")", ":", "probs", "=", "tf", ".", "sparse", ".", "add", "(", "probs", ",", "layer_entities_weighted", "[", "i", "]", ")", "probs_dense", "=", "tf", ".", "sparse", ".", "to_dense", "(", "probs", ",", "default_value", "=", "DEFAULT_VALUE", ",", "validate_indices", "=", "False", ")", "answer_preds", "=", "tf", ".", "argmax", "(", "probs_dense", ",", "axis", "=", "1", ")", "top_vals", ",", "top_idx", "=", "tf", ".", "nn", ".", "top_k", "(", "probs_dense", ",", "k", "=", "100", ",", "sorted", "=", "True", ")", "total_loss", "=", "None", "if", "is_training", ":", "sp_loss", "=", "compute_loss_from_sptensors", "(", "probs", ",", "answer_index", ")", "total_loss", "=", "tf", ".", "reduce_sum", "(", "sp_loss", ".", "values", ")", "/", "tf", ".", "cast", "(", "batch_size", ",", "tf", ".", "float32", ")", "num_answers_ret", "=", "tf", ".", "shape", "(", "sp_loss", ".", "values", ")", "[", "0", "]", "if", "summary_obj", "is", "not", "None", ":", "for", "i", "in", "range", "(", "len", "(", "layer_entities", ")", ")", ":", "num_ents", "=", "tf", ".", "cast", "(", "tf", ".", "shape", "(", "layer_entities", "[", "i", "]", ".", "indices", ")", "[", "0", "]", ",", "tf", ".", "float32", ")", "/", "tf", ".", "cast", "(", "batch_size", ",", "tf", ".", "float32", ")", "summary_obj", ".", "scalar", "(", "\"train/layer_weight_%d\"", "%", "i", ",", "tf", ".", "reduce_mean", "(", "probabilities", "[", ":", ",", "i", "]", ",", "keepdims", "=", "True", ")", ")", "summary_obj", ".", "scalar", "(", "\"train/num_entities_%d\"", "%", "i", ",", "tf", ".", "expand_dims", "(", "num_ents", ",", "0", ")", ")", "summary_obj", ".", "scalar", "(", "\"train/total_loss\"", ",", "tf", ".", "expand_dims", "(", "total_loss", ",", "0", ")", ")", "summary_obj", ".", "scalar", "(", "\"train/ans_in_ret\"", ",", "tf", ".", "expand_dims", "(", "num_answers_ret", ",", "0", ")", ")", "summary_obj", ".", "scalar", "(", "\"train/total_prob_mass\"", ",", "tf", ".", "reduce_sum", "(", "probs", ".", "values", ",", "keepdims", "=", "True", ")", ")", "predictions", ".", "update", "(", "{", "\"layer_probs\"", ":", "probabilities", ",", "\"top_vals\"", ":", "top_vals", ",", "\"top_idx\"", ":", "top_idx", ",", "\"predictions\"", ":", "answer_preds", ",", "}", ")", "return", "total_loss", ",", "predictions" ]
https://github.com/google-research/language/blob/61fa7260ac7d690d11ef72ca863e45a37c0bdc80/language/labs/drkit/model_fns.py#L1401-L1552
googleads/google-ads-python
2a1d6062221f6aad1992a6bcca0e7e4a93d2db86
google/ads/googleads/v8/services/services/campaign_draft_service/client.py
python
CampaignDraftServiceClient.parse_campaign_draft_path
(path: str)
return m.groupdict() if m else {}
Parse a campaign_draft path into its component segments.
Parse a campaign_draft path into its component segments.
[ "Parse", "a", "campaign_draft", "path", "into", "its", "component", "segments", "." ]
def parse_campaign_draft_path(path: str) -> Dict[str, str]: """Parse a campaign_draft path into its component segments.""" m = re.match( r"^customers/(?P<customer_id>.+?)/campaignDrafts/(?P<base_campaign_id>.+?)~(?P<draft_id>.+?)$", path, ) return m.groupdict() if m else {}
[ "def", "parse_campaign_draft_path", "(", "path", ":", "str", ")", "->", "Dict", "[", "str", ",", "str", "]", ":", "m", "=", "re", ".", "match", "(", "r\"^customers/(?P<customer_id>.+?)/campaignDrafts/(?P<base_campaign_id>.+?)~(?P<draft_id>.+?)$\"", ",", "path", ",", ")", "return", "m", ".", "groupdict", "(", ")", "if", "m", "else", "{", "}" ]
https://github.com/googleads/google-ads-python/blob/2a1d6062221f6aad1992a6bcca0e7e4a93d2db86/google/ads/googleads/v8/services/services/campaign_draft_service/client.py#L193-L199
nathanlopez/Stitch
8e22e91c94237959c02d521aab58dc7e3d994cea
Application/stitch_winshell.py
python
st_winshell.do_avscan
(self,line)
[]
def do_avscan(self,line): self.stlib.avscan()
[ "def", "do_avscan", "(", "self", ",", "line", ")", ":", "self", ".", "stlib", ".", "avscan", "(", ")" ]
https://github.com/nathanlopez/Stitch/blob/8e22e91c94237959c02d521aab58dc7e3d994cea/Application/stitch_winshell.py#L73-L73
ralphbean/bugwarrior
aa660b258f95e29b07508f555ddc639e9cbdab82
bugwarrior/services/trello.py
python
TrelloService.annotations
(self, card_json)
return annotations
A wrapper around get_comments that build the taskwarrior annotations.
A wrapper around get_comments that build the taskwarrior annotations.
[ "A", "wrapper", "around", "get_comments", "that", "build", "the", "taskwarrior", "annotations", "." ]
def annotations(self, card_json): """ A wrapper around get_comments that build the taskwarrior annotations. """ comments = self.get_comments(card_json['id']) annotations = self.build_annotations( ((c['memberCreator']['username'], c['data']['text']) for c in comments), card_json["shortUrl"]) return annotations
[ "def", "annotations", "(", "self", ",", "card_json", ")", ":", "comments", "=", "self", ".", "get_comments", "(", "card_json", "[", "'id'", "]", ")", "annotations", "=", "self", ".", "build_annotations", "(", "(", "(", "c", "[", "'memberCreator'", "]", "[", "'username'", "]", ",", "c", "[", "'data'", "]", "[", "'text'", "]", ")", "for", "c", "in", "comments", ")", ",", "card_json", "[", "\"shortUrl\"", "]", ")", "return", "annotations" ]
https://github.com/ralphbean/bugwarrior/blob/aa660b258f95e29b07508f555ddc639e9cbdab82/bugwarrior/services/trello.py#L137-L144
openedx/edx-platform
68dd185a0ab45862a2a61e0f803d7e03d2be71b5
openedx/core/djangoapps/content_libraries/api.py
python
EdxApiImportClient.get_block_static_data
(self, asset_file)
return resp.content
See parent's docstring.
See parent's docstring.
[ "See", "parent", "s", "docstring", "." ]
def get_block_static_data(self, asset_file): """ See parent's docstring. """ if (asset_file['url'].startswith(self.studio_url) and 'export-file' in asset_file['url']): # We must call download this file with authentication. But # we only want to pass the auth headers if this is the same # studio instance, or else we could leak credentials to a # third party. path = asset_file['url'][len(self.studio_url):] resp = self._call('get', path) else: resp = requests.get(asset_file['url']) resp.raise_for_status() return resp.content
[ "def", "get_block_static_data", "(", "self", ",", "asset_file", ")", ":", "if", "(", "asset_file", "[", "'url'", "]", ".", "startswith", "(", "self", ".", "studio_url", ")", "and", "'export-file'", "in", "asset_file", "[", "'url'", "]", ")", ":", "# We must call download this file with authentication. But", "# we only want to pass the auth headers if this is the same", "# studio instance, or else we could leak credentials to a", "# third party.", "path", "=", "asset_file", "[", "'url'", "]", "[", "len", "(", "self", ".", "studio_url", ")", ":", "]", "resp", "=", "self", ".", "_call", "(", "'get'", ",", "path", ")", "else", ":", "resp", "=", "requests", ".", "get", "(", "asset_file", "[", "'url'", "]", ")", "resp", ".", "raise_for_status", "(", ")", "return", "resp", ".", "content" ]
https://github.com/openedx/edx-platform/blob/68dd185a0ab45862a2a61e0f803d7e03d2be71b5/openedx/core/djangoapps/content_libraries/api.py#L1335-L1350