repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
KnorrFG/pyparadigm | pyparadigm/extras.py | https://github.com/KnorrFG/pyparadigm/blob/69944cdf3ce2f6414ae1aa1d27a0d8c6e5fb3fd3/pyparadigm/extras.py#L26-L31 | def apply_color_map(name: str, mat: np.ndarray = None):
"""returns an RGB matrix scaled by a matplotlib color map"""
def apply_map(mat):
return (cm.get_cmap(name)(_normalize(mat))[:, :, :3] * 255).astype(np.uint8)
return apply_map if mat is None else apply_map(mat) | [
"def",
"apply_color_map",
"(",
"name",
":",
"str",
",",
"mat",
":",
"np",
".",
"ndarray",
"=",
"None",
")",
":",
"def",
"apply_map",
"(",
"mat",
")",
":",
"return",
"(",
"cm",
".",
"get_cmap",
"(",
"name",
")",
"(",
"_normalize",
"(",
"mat",
")",
")",
"[",
":",
",",
":",
",",
":",
"3",
"]",
"*",
"255",
")",
".",
"astype",
"(",
"np",
".",
"uint8",
")",
"return",
"apply_map",
"if",
"mat",
"is",
"None",
"else",
"apply_map",
"(",
"mat",
")"
] | returns an RGB matrix scaled by a matplotlib color map | [
"returns",
"an",
"RGB",
"matrix",
"scaled",
"by",
"a",
"matplotlib",
"color",
"map"
] | python | train |
infothrill/python-dyndnsc | dyndnsc/core.py | https://github.com/infothrill/python-dyndnsc/blob/2196d48aa6098da9835a7611fbdb0b5f0fbf51e4/dyndnsc/core.py#L85-L113 | def has_state_changed(self):
"""
Detect changes in offline detector and real DNS value.
Detect a change either in the offline detector or a
difference between the real DNS value and what the online
detector last got.
This is efficient, since it only generates minimal dns traffic
for online detectors and no traffic at all for offline detectors.
:rtype: boolean
"""
self.lastcheck = time.time()
# prefer offline state change detection:
if self.detector.can_detect_offline():
self.detector.detect()
elif not self.dns.detect() == self.detector.get_current_value():
# The following produces traffic, but probably less traffic
# overall than the detector
self.detector.detect()
if self.detector.has_changed():
LOG.debug("detector changed")
return True
elif self.dns.has_changed():
LOG.debug("dns changed")
return True
return False | [
"def",
"has_state_changed",
"(",
"self",
")",
":",
"self",
".",
"lastcheck",
"=",
"time",
".",
"time",
"(",
")",
"# prefer offline state change detection:",
"if",
"self",
".",
"detector",
".",
"can_detect_offline",
"(",
")",
":",
"self",
".",
"detector",
".",
"detect",
"(",
")",
"elif",
"not",
"self",
".",
"dns",
".",
"detect",
"(",
")",
"==",
"self",
".",
"detector",
".",
"get_current_value",
"(",
")",
":",
"# The following produces traffic, but probably less traffic",
"# overall than the detector",
"self",
".",
"detector",
".",
"detect",
"(",
")",
"if",
"self",
".",
"detector",
".",
"has_changed",
"(",
")",
":",
"LOG",
".",
"debug",
"(",
"\"detector changed\"",
")",
"return",
"True",
"elif",
"self",
".",
"dns",
".",
"has_changed",
"(",
")",
":",
"LOG",
".",
"debug",
"(",
"\"dns changed\"",
")",
"return",
"True",
"return",
"False"
] | Detect changes in offline detector and real DNS value.
Detect a change either in the offline detector or a
difference between the real DNS value and what the online
detector last got.
This is efficient, since it only generates minimal dns traffic
for online detectors and no traffic at all for offline detectors.
:rtype: boolean | [
"Detect",
"changes",
"in",
"offline",
"detector",
"and",
"real",
"DNS",
"value",
"."
] | python | train |
Dallinger/Dallinger | dallinger/recruiters.py | https://github.com/Dallinger/Dallinger/blob/76ca8217c709989c116d0ebd8fca37bd22f591af/dallinger/recruiters.py#L128-L141 | def open_recruitment(self, n=1):
"""Return initial experiment URL list, plus instructions
for finding subsequent recruitment events in experiemnt logs.
"""
logger.info("Opening CLI recruitment for {} participants".format(n))
recruitments = self.recruit(n)
message = (
'Search for "{}" in the logs for subsequent recruitment URLs.\n'
"Open the logs for this experiment with "
'"dallinger logs --app {}"'.format(
NEW_RECRUIT_LOG_PREFIX, self.config.get("id")
)
)
return {"items": recruitments, "message": message} | [
"def",
"open_recruitment",
"(",
"self",
",",
"n",
"=",
"1",
")",
":",
"logger",
".",
"info",
"(",
"\"Opening CLI recruitment for {} participants\"",
".",
"format",
"(",
"n",
")",
")",
"recruitments",
"=",
"self",
".",
"recruit",
"(",
"n",
")",
"message",
"=",
"(",
"'Search for \"{}\" in the logs for subsequent recruitment URLs.\\n'",
"\"Open the logs for this experiment with \"",
"'\"dallinger logs --app {}\"'",
".",
"format",
"(",
"NEW_RECRUIT_LOG_PREFIX",
",",
"self",
".",
"config",
".",
"get",
"(",
"\"id\"",
")",
")",
")",
"return",
"{",
"\"items\"",
":",
"recruitments",
",",
"\"message\"",
":",
"message",
"}"
] | Return initial experiment URL list, plus instructions
for finding subsequent recruitment events in experiemnt logs. | [
"Return",
"initial",
"experiment",
"URL",
"list",
"plus",
"instructions",
"for",
"finding",
"subsequent",
"recruitment",
"events",
"in",
"experiemnt",
"logs",
"."
] | python | train |
edx/ease | ease/grade.py | https://github.com/edx/ease/blob/a7890ed403da94d03726b0639cd8ebda45af6bbb/ease/grade.py#L167-L188 | def get_confidence_value(algorithm,model,grader_feats,score, scores):
"""
Determines a confidence in a certain score, given proper input parameters
algorithm- from util_functions.AlgorithmTypes
model - a trained model
grader_feats - a row of features used by the model for classification/regression
score - The score assigned to the submission by a prior model
"""
min_score=min(numpy.asarray(scores))
max_score=max(numpy.asarray(scores))
if algorithm == util_functions.AlgorithmTypes.classification and hasattr(model, "predict_proba"):
#If classification, predict with probability, which gives you a matrix of confidences per score point
raw_confidence=model.predict_proba(grader_feats)[0,(float(score)-float(min_score))]
#TODO: Normalize confidence somehow here
confidence=raw_confidence
elif hasattr(model, "predict"):
raw_confidence = model.predict(grader_feats)[0]
confidence = max(float(raw_confidence) - math.floor(float(raw_confidence)), math.ceil(float(raw_confidence)) - float(raw_confidence))
else:
confidence = 0
return confidence | [
"def",
"get_confidence_value",
"(",
"algorithm",
",",
"model",
",",
"grader_feats",
",",
"score",
",",
"scores",
")",
":",
"min_score",
"=",
"min",
"(",
"numpy",
".",
"asarray",
"(",
"scores",
")",
")",
"max_score",
"=",
"max",
"(",
"numpy",
".",
"asarray",
"(",
"scores",
")",
")",
"if",
"algorithm",
"==",
"util_functions",
".",
"AlgorithmTypes",
".",
"classification",
"and",
"hasattr",
"(",
"model",
",",
"\"predict_proba\"",
")",
":",
"#If classification, predict with probability, which gives you a matrix of confidences per score point",
"raw_confidence",
"=",
"model",
".",
"predict_proba",
"(",
"grader_feats",
")",
"[",
"0",
",",
"(",
"float",
"(",
"score",
")",
"-",
"float",
"(",
"min_score",
")",
")",
"]",
"#TODO: Normalize confidence somehow here",
"confidence",
"=",
"raw_confidence",
"elif",
"hasattr",
"(",
"model",
",",
"\"predict\"",
")",
":",
"raw_confidence",
"=",
"model",
".",
"predict",
"(",
"grader_feats",
")",
"[",
"0",
"]",
"confidence",
"=",
"max",
"(",
"float",
"(",
"raw_confidence",
")",
"-",
"math",
".",
"floor",
"(",
"float",
"(",
"raw_confidence",
")",
")",
",",
"math",
".",
"ceil",
"(",
"float",
"(",
"raw_confidence",
")",
")",
"-",
"float",
"(",
"raw_confidence",
")",
")",
"else",
":",
"confidence",
"=",
"0",
"return",
"confidence"
] | Determines a confidence in a certain score, given proper input parameters
algorithm- from util_functions.AlgorithmTypes
model - a trained model
grader_feats - a row of features used by the model for classification/regression
score - The score assigned to the submission by a prior model | [
"Determines",
"a",
"confidence",
"in",
"a",
"certain",
"score",
"given",
"proper",
"input",
"parameters",
"algorithm",
"-",
"from",
"util_functions",
".",
"AlgorithmTypes",
"model",
"-",
"a",
"trained",
"model",
"grader_feats",
"-",
"a",
"row",
"of",
"features",
"used",
"by",
"the",
"model",
"for",
"classification",
"/",
"regression",
"score",
"-",
"The",
"score",
"assigned",
"to",
"the",
"submission",
"by",
"a",
"prior",
"model"
] | python | valid |
ibm-watson-iot/iot-python | src/wiotp/sdk/api/mgmt/extensions.py | https://github.com/ibm-watson-iot/iot-python/blob/195f05adce3fba4ec997017e41e02ebd85c0c4cc/src/wiotp/sdk/api/mgmt/extensions.py#L50-L62 | def delete(self, bundleId):
"""
Delete a device management extension package
It accepts bundleId (string) as parameters
In case of failure it throws APIException
"""
url = "api/v0002/mgmt/custom/bundle/%s" % (bundleId)
r = self._apiClient.delete(url)
if r.status_code == 204:
return True
else:
raise ApiException(r) | [
"def",
"delete",
"(",
"self",
",",
"bundleId",
")",
":",
"url",
"=",
"\"api/v0002/mgmt/custom/bundle/%s\"",
"%",
"(",
"bundleId",
")",
"r",
"=",
"self",
".",
"_apiClient",
".",
"delete",
"(",
"url",
")",
"if",
"r",
".",
"status_code",
"==",
"204",
":",
"return",
"True",
"else",
":",
"raise",
"ApiException",
"(",
"r",
")"
] | Delete a device management extension package
It accepts bundleId (string) as parameters
In case of failure it throws APIException | [
"Delete",
"a",
"device",
"management",
"extension",
"package",
"It",
"accepts",
"bundleId",
"(",
"string",
")",
"as",
"parameters",
"In",
"case",
"of",
"failure",
"it",
"throws",
"APIException"
] | python | test |
seleniumbase/SeleniumBase | seleniumbase/core/tour_helper.py | https://github.com/seleniumbase/SeleniumBase/blob/62e5b43ee1f90a9ed923841bdd53b1b38358f43a/seleniumbase/core/tour_helper.py#L26-L54 | def activate_bootstrap(driver):
""" Allows you to use Bootstrap Tours with SeleniumBase
http://bootstraptour.com/
"""
bootstrap_tour_css = constants.BootstrapTour.MIN_CSS
bootstrap_tour_js = constants.BootstrapTour.MIN_JS
verify_script = ("""// Verify Bootstrap Tour activated
var tour2 = new Tour({
});""")
backdrop_style = style_sheet.bt_backdrop_style
js_utils.add_css_style(driver, backdrop_style)
js_utils.wait_for_ready_state_complete(driver)
js_utils.wait_for_angularjs(driver)
for x in range(4):
js_utils.activate_jquery(driver)
js_utils.add_css_link(driver, bootstrap_tour_css)
js_utils.add_js_link(driver, bootstrap_tour_js)
time.sleep(0.1)
for x in range(int(settings.MINI_TIMEOUT * 2.0)):
# Bootstrap needs a small amount of time to load & activate.
try:
driver.execute_script(verify_script)
time.sleep(0.05)
return
except Exception:
time.sleep(0.15)
raise_unable_to_load_jquery_exception(driver) | [
"def",
"activate_bootstrap",
"(",
"driver",
")",
":",
"bootstrap_tour_css",
"=",
"constants",
".",
"BootstrapTour",
".",
"MIN_CSS",
"bootstrap_tour_js",
"=",
"constants",
".",
"BootstrapTour",
".",
"MIN_JS",
"verify_script",
"=",
"(",
"\"\"\"// Verify Bootstrap Tour activated\n var tour2 = new Tour({\n });\"\"\"",
")",
"backdrop_style",
"=",
"style_sheet",
".",
"bt_backdrop_style",
"js_utils",
".",
"add_css_style",
"(",
"driver",
",",
"backdrop_style",
")",
"js_utils",
".",
"wait_for_ready_state_complete",
"(",
"driver",
")",
"js_utils",
".",
"wait_for_angularjs",
"(",
"driver",
")",
"for",
"x",
"in",
"range",
"(",
"4",
")",
":",
"js_utils",
".",
"activate_jquery",
"(",
"driver",
")",
"js_utils",
".",
"add_css_link",
"(",
"driver",
",",
"bootstrap_tour_css",
")",
"js_utils",
".",
"add_js_link",
"(",
"driver",
",",
"bootstrap_tour_js",
")",
"time",
".",
"sleep",
"(",
"0.1",
")",
"for",
"x",
"in",
"range",
"(",
"int",
"(",
"settings",
".",
"MINI_TIMEOUT",
"*",
"2.0",
")",
")",
":",
"# Bootstrap needs a small amount of time to load & activate.",
"try",
":",
"driver",
".",
"execute_script",
"(",
"verify_script",
")",
"time",
".",
"sleep",
"(",
"0.05",
")",
"return",
"except",
"Exception",
":",
"time",
".",
"sleep",
"(",
"0.15",
")",
"raise_unable_to_load_jquery_exception",
"(",
"driver",
")"
] | Allows you to use Bootstrap Tours with SeleniumBase
http://bootstraptour.com/ | [
"Allows",
"you",
"to",
"use",
"Bootstrap",
"Tours",
"with",
"SeleniumBase",
"http",
":",
"//",
"bootstraptour",
".",
"com",
"/"
] | python | train |
vals/umis | umis/umis.py | https://github.com/vals/umis/blob/e8adb8486d9e9134ab8a6cad9811a7e74dcc4a2c/umis/umis.py#L1360-L1392 | def subset_bamfile(sam, barcodes):
"""
Subset a SAM/BAM file, keeping only alignments from given
cellular barcodes
"""
from pysam import AlignmentFile
start_time = time.time()
sam_file = open_bamfile(sam)
out_file = AlignmentFile("-", "wh", template=sam_file)
track = sam_file.fetch(until_eof=True)
# peek at first alignment to determine the annotations
queryalignment = track.next()
annotations = detect_alignment_annotations(queryalignment)
track = itertools.chain([queryalignment], track)
re_string = construct_transformed_regex(annotations)
parser_re = re.compile(re_string)
barcodes = set(barcode.strip() for barcode in barcodes)
for count, aln in enumerate(track, start=1):
if count and not count % 1000000:
logger.info("Processed %d alignments." % count)
match = parser_re.match(aln.qname)
tags = aln.tags
if "cellular" in annotations:
cb = match.group('CB')
if cb in barcodes:
out_file.write(aln) | [
"def",
"subset_bamfile",
"(",
"sam",
",",
"barcodes",
")",
":",
"from",
"pysam",
"import",
"AlignmentFile",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"sam_file",
"=",
"open_bamfile",
"(",
"sam",
")",
"out_file",
"=",
"AlignmentFile",
"(",
"\"-\"",
",",
"\"wh\"",
",",
"template",
"=",
"sam_file",
")",
"track",
"=",
"sam_file",
".",
"fetch",
"(",
"until_eof",
"=",
"True",
")",
"# peek at first alignment to determine the annotations",
"queryalignment",
"=",
"track",
".",
"next",
"(",
")",
"annotations",
"=",
"detect_alignment_annotations",
"(",
"queryalignment",
")",
"track",
"=",
"itertools",
".",
"chain",
"(",
"[",
"queryalignment",
"]",
",",
"track",
")",
"re_string",
"=",
"construct_transformed_regex",
"(",
"annotations",
")",
"parser_re",
"=",
"re",
".",
"compile",
"(",
"re_string",
")",
"barcodes",
"=",
"set",
"(",
"barcode",
".",
"strip",
"(",
")",
"for",
"barcode",
"in",
"barcodes",
")",
"for",
"count",
",",
"aln",
"in",
"enumerate",
"(",
"track",
",",
"start",
"=",
"1",
")",
":",
"if",
"count",
"and",
"not",
"count",
"%",
"1000000",
":",
"logger",
".",
"info",
"(",
"\"Processed %d alignments.\"",
"%",
"count",
")",
"match",
"=",
"parser_re",
".",
"match",
"(",
"aln",
".",
"qname",
")",
"tags",
"=",
"aln",
".",
"tags",
"if",
"\"cellular\"",
"in",
"annotations",
":",
"cb",
"=",
"match",
".",
"group",
"(",
"'CB'",
")",
"if",
"cb",
"in",
"barcodes",
":",
"out_file",
".",
"write",
"(",
"aln",
")"
] | Subset a SAM/BAM file, keeping only alignments from given
cellular barcodes | [
"Subset",
"a",
"SAM",
"/",
"BAM",
"file",
"keeping",
"only",
"alignments",
"from",
"given",
"cellular",
"barcodes"
] | python | train |
alexhayes/django-toolkit | django_toolkit/date_util.py | https://github.com/alexhayes/django-toolkit/blob/b64106392fad596defc915b8235fe6e1d0013b5b/django_toolkit/date_util.py#L33-L48 | def business_days(start, stop):
"""
Return business days between two inclusive dates - ignoring public holidays.
Note that start must be less than stop or else 0 is returned.
@param start: Start date
@param stop: Stop date
@return int
"""
dates=rrule.rruleset()
# Get dates between start/stop (which are inclusive)
dates.rrule(rrule.rrule(rrule.DAILY, dtstart=start, until=stop))
# Exclude Sat/Sun
dates.exrule(rrule.rrule(rrule.DAILY, byweekday=(rrule.SA, rrule.SU), dtstart=start))
return dates.count() | [
"def",
"business_days",
"(",
"start",
",",
"stop",
")",
":",
"dates",
"=",
"rrule",
".",
"rruleset",
"(",
")",
"# Get dates between start/stop (which are inclusive)",
"dates",
".",
"rrule",
"(",
"rrule",
".",
"rrule",
"(",
"rrule",
".",
"DAILY",
",",
"dtstart",
"=",
"start",
",",
"until",
"=",
"stop",
")",
")",
"# Exclude Sat/Sun ",
"dates",
".",
"exrule",
"(",
"rrule",
".",
"rrule",
"(",
"rrule",
".",
"DAILY",
",",
"byweekday",
"=",
"(",
"rrule",
".",
"SA",
",",
"rrule",
".",
"SU",
")",
",",
"dtstart",
"=",
"start",
")",
")",
"return",
"dates",
".",
"count",
"(",
")"
] | Return business days between two inclusive dates - ignoring public holidays.
Note that start must be less than stop or else 0 is returned.
@param start: Start date
@param stop: Stop date
@return int | [
"Return",
"business",
"days",
"between",
"two",
"inclusive",
"dates",
"-",
"ignoring",
"public",
"holidays",
".",
"Note",
"that",
"start",
"must",
"be",
"less",
"than",
"stop",
"or",
"else",
"0",
"is",
"returned",
"."
] | python | train |
UCBerkeleySETI/blimpy | blimpy/filterbank.py | https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/filterbank.py#L747-L773 | def plot_kurtosis(self, f_start=None, f_stop=None, if_id=0, **kwargs):
""" Plot kurtosis
Args:
f_start (float): start frequency, in MHz
f_stop (float): stop frequency, in MHz
kwargs: keyword args to be passed to matplotlib imshow()
"""
ax = plt.gca()
plot_f, plot_data = self.grab_data(f_start, f_stop, if_id)
#Using accending frequency for all plots.
if self.header[b'foff'] < 0:
plot_data = plot_data[..., ::-1] # Reverse data
plot_f = plot_f[::-1]
try:
plot_kurtosis = scipy.stats.kurtosis(plot_data, axis=0, nan_policy='omit')
except:
plot_kurtosis = plot_data*0.0
plt.plot(plot_f, plot_kurtosis, **kwargs)
plt.ylabel("Kurtosis")
plt.xlabel("Frequency [MHz]")
plt.xlim(plot_f[0], plot_f[-1]) | [
"def",
"plot_kurtosis",
"(",
"self",
",",
"f_start",
"=",
"None",
",",
"f_stop",
"=",
"None",
",",
"if_id",
"=",
"0",
",",
"*",
"*",
"kwargs",
")",
":",
"ax",
"=",
"plt",
".",
"gca",
"(",
")",
"plot_f",
",",
"plot_data",
"=",
"self",
".",
"grab_data",
"(",
"f_start",
",",
"f_stop",
",",
"if_id",
")",
"#Using accending frequency for all plots.",
"if",
"self",
".",
"header",
"[",
"b'foff'",
"]",
"<",
"0",
":",
"plot_data",
"=",
"plot_data",
"[",
"...",
",",
":",
":",
"-",
"1",
"]",
"# Reverse data",
"plot_f",
"=",
"plot_f",
"[",
":",
":",
"-",
"1",
"]",
"try",
":",
"plot_kurtosis",
"=",
"scipy",
".",
"stats",
".",
"kurtosis",
"(",
"plot_data",
",",
"axis",
"=",
"0",
",",
"nan_policy",
"=",
"'omit'",
")",
"except",
":",
"plot_kurtosis",
"=",
"plot_data",
"*",
"0.0",
"plt",
".",
"plot",
"(",
"plot_f",
",",
"plot_kurtosis",
",",
"*",
"*",
"kwargs",
")",
"plt",
".",
"ylabel",
"(",
"\"Kurtosis\"",
")",
"plt",
".",
"xlabel",
"(",
"\"Frequency [MHz]\"",
")",
"plt",
".",
"xlim",
"(",
"plot_f",
"[",
"0",
"]",
",",
"plot_f",
"[",
"-",
"1",
"]",
")"
] | Plot kurtosis
Args:
f_start (float): start frequency, in MHz
f_stop (float): stop frequency, in MHz
kwargs: keyword args to be passed to matplotlib imshow() | [
"Plot",
"kurtosis"
] | python | test |
Fuyukai/ConfigMaster | configmaster/ConfigFile.py | https://github.com/Fuyukai/ConfigMaster/blob/8018aa415da55c84edaa8a49664f674758a14edd/configmaster/ConfigFile.py#L155-L164 | def reload(self):
"""
Automatically reloads the config file.
This is just an alias for self.load()."""
if not self.fd.closed: self.fd.close()
self.fd = open(self.fd.name, 'r')
self.load() | [
"def",
"reload",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"fd",
".",
"closed",
":",
"self",
".",
"fd",
".",
"close",
"(",
")",
"self",
".",
"fd",
"=",
"open",
"(",
"self",
".",
"fd",
".",
"name",
",",
"'r'",
")",
"self",
".",
"load",
"(",
")"
] | Automatically reloads the config file.
This is just an alias for self.load(). | [
"Automatically",
"reloads",
"the",
"config",
"file",
"."
] | python | train |
google/grr | grr/core/grr_response_core/lib/rdfvalues/structs.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/rdfvalues/structs.py#L262-L275 | def _SerializeEntries(entries):
"""Serializes given triplets of python and wire values and a descriptor."""
output = []
for python_format, wire_format, type_descriptor in entries:
if wire_format is None or (python_format and
type_descriptor.IsDirty(python_format)):
wire_format = type_descriptor.ConvertToWireFormat(python_format)
precondition.AssertIterableType(wire_format, bytes)
output.extend(wire_format)
return b"".join(output) | [
"def",
"_SerializeEntries",
"(",
"entries",
")",
":",
"output",
"=",
"[",
"]",
"for",
"python_format",
",",
"wire_format",
",",
"type_descriptor",
"in",
"entries",
":",
"if",
"wire_format",
"is",
"None",
"or",
"(",
"python_format",
"and",
"type_descriptor",
".",
"IsDirty",
"(",
"python_format",
")",
")",
":",
"wire_format",
"=",
"type_descriptor",
".",
"ConvertToWireFormat",
"(",
"python_format",
")",
"precondition",
".",
"AssertIterableType",
"(",
"wire_format",
",",
"bytes",
")",
"output",
".",
"extend",
"(",
"wire_format",
")",
"return",
"b\"\"",
".",
"join",
"(",
"output",
")"
] | Serializes given triplets of python and wire values and a descriptor. | [
"Serializes",
"given",
"triplets",
"of",
"python",
"and",
"wire",
"values",
"and",
"a",
"descriptor",
"."
] | python | train |
pypa/pipenv | pipenv/vendor/distlib/_backport/shutil.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/shutil.py#L130-L139 | def copy(src, dst):
"""Copy data and mode bits ("cp src dst").
The destination may be a directory.
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst)
copymode(src, dst) | [
"def",
"copy",
"(",
"src",
",",
"dst",
")",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"dst",
")",
":",
"dst",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dst",
",",
"os",
".",
"path",
".",
"basename",
"(",
"src",
")",
")",
"copyfile",
"(",
"src",
",",
"dst",
")",
"copymode",
"(",
"src",
",",
"dst",
")"
] | Copy data and mode bits ("cp src dst").
The destination may be a directory. | [
"Copy",
"data",
"and",
"mode",
"bits",
"(",
"cp",
"src",
"dst",
")",
"."
] | python | train |
streamlink/streamlink | src/streamlink/plugins/euronews.py | https://github.com/streamlink/streamlink/blob/c8ed1daff14ac03195870238b9b900c1109dd5c1/src/streamlink/plugins/euronews.py#L50-L60 | def _get_streams(self):
"""
Find the streams for euronews
:return:
"""
match = self._url_re.match(self.url).groupdict()
if match.get("path") == "live":
return self._get_live_streams(match)
else:
return self._get_vod_stream() | [
"def",
"_get_streams",
"(",
"self",
")",
":",
"match",
"=",
"self",
".",
"_url_re",
".",
"match",
"(",
"self",
".",
"url",
")",
".",
"groupdict",
"(",
")",
"if",
"match",
".",
"get",
"(",
"\"path\"",
")",
"==",
"\"live\"",
":",
"return",
"self",
".",
"_get_live_streams",
"(",
"match",
")",
"else",
":",
"return",
"self",
".",
"_get_vod_stream",
"(",
")"
] | Find the streams for euronews
:return: | [
"Find",
"the",
"streams",
"for",
"euronews",
":",
"return",
":"
] | python | test |
bitesofcode/projexui | projexui/widgets/xchartwidget/xchartscene.py | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xchartwidget/xchartscene.py#L488-L582 | def rebuildGrid( self ):
"""
Rebuilds the ruler data.
"""
vruler = self.verticalRuler()
hruler = self.horizontalRuler()
rect = self._buildData['grid_rect']
# process the vertical ruler
h_lines = []
h_alt = []
h_notches = []
vpstart = vruler.padStart()
vnotches = vruler.notches()
vpend = vruler.padEnd()
vcount = len(vnotches) + vpstart + vpend
deltay = rect.height() / max((vcount - 1), 1)
y = rect.bottom()
alt = False
for i in range(vcount):
h_lines.append(QLineF(rect.left(), y, rect.right(), y))
# store alternate color
if ( alt ):
alt_rect = QRectF(rect.left(), y, rect.width(), deltay)
h_alt.append(alt_rect)
# store notch information
nidx = i - vpstart
if ( 0 <= nidx and nidx < len(vnotches) ):
notch = vnotches[nidx]
notch_rect = QRectF(0, y - 3, rect.left() - 3, deltay)
h_notches.append((notch_rect, notch))
y -= deltay
alt = not alt
self._buildData['grid_h_lines'] = h_lines
self._buildData['grid_h_alt'] = h_alt
self._buildData['grid_h_notches'] = h_notches
# process the horizontal ruler
v_lines = []
v_alt = []
v_notches = []
hpstart = hruler.padStart()
hnotches = hruler.notches()
hpend = hruler.padEnd()
hcount = len(hnotches) + hpstart + hpend
deltax = rect.width() / max((hcount - 1), 1)
x = rect.left()
alt = False
for i in range(hcount):
v_lines.append(QLineF(x, rect.top(), x, rect.bottom()))
# store alternate info
if ( alt ):
alt_rect = QRectF(x - deltax, rect.top(), deltax, rect.height())
v_alt.append(alt_rect)
# store notch information
nidx = i - hpstart
if ( 0 <= nidx and nidx < len(hnotches) ):
notch = hnotches[nidx]
notch_rect = QRectF(x - (deltax / 2.0),
rect.bottom() + 3,
deltax,
13)
v_notches.append((notch_rect, notch))
x += deltax
alt = not alt
self._buildData['grid_v_lines'] = v_lines
self._buildData['grid_v_alt'] = v_alt
self._buildData['grid_v_notches'] = v_notches
# draw the axis lines
axis_lines = []
axis_lines.append(QLineF(rect.left(),
rect.top(),
rect.left(),
rect.bottom()))
axis_lines.append(QLineF(rect.left(),
rect.bottom(),
rect.right(),
rect.bottom()))
self._buildData['axis_lines'] = axis_lines | [
"def",
"rebuildGrid",
"(",
"self",
")",
":",
"vruler",
"=",
"self",
".",
"verticalRuler",
"(",
")",
"hruler",
"=",
"self",
".",
"horizontalRuler",
"(",
")",
"rect",
"=",
"self",
".",
"_buildData",
"[",
"'grid_rect'",
"]",
"# process the vertical ruler\r",
"h_lines",
"=",
"[",
"]",
"h_alt",
"=",
"[",
"]",
"h_notches",
"=",
"[",
"]",
"vpstart",
"=",
"vruler",
".",
"padStart",
"(",
")",
"vnotches",
"=",
"vruler",
".",
"notches",
"(",
")",
"vpend",
"=",
"vruler",
".",
"padEnd",
"(",
")",
"vcount",
"=",
"len",
"(",
"vnotches",
")",
"+",
"vpstart",
"+",
"vpend",
"deltay",
"=",
"rect",
".",
"height",
"(",
")",
"/",
"max",
"(",
"(",
"vcount",
"-",
"1",
")",
",",
"1",
")",
"y",
"=",
"rect",
".",
"bottom",
"(",
")",
"alt",
"=",
"False",
"for",
"i",
"in",
"range",
"(",
"vcount",
")",
":",
"h_lines",
".",
"append",
"(",
"QLineF",
"(",
"rect",
".",
"left",
"(",
")",
",",
"y",
",",
"rect",
".",
"right",
"(",
")",
",",
"y",
")",
")",
"# store alternate color\r",
"if",
"(",
"alt",
")",
":",
"alt_rect",
"=",
"QRectF",
"(",
"rect",
".",
"left",
"(",
")",
",",
"y",
",",
"rect",
".",
"width",
"(",
")",
",",
"deltay",
")",
"h_alt",
".",
"append",
"(",
"alt_rect",
")",
"# store notch information\r",
"nidx",
"=",
"i",
"-",
"vpstart",
"if",
"(",
"0",
"<=",
"nidx",
"and",
"nidx",
"<",
"len",
"(",
"vnotches",
")",
")",
":",
"notch",
"=",
"vnotches",
"[",
"nidx",
"]",
"notch_rect",
"=",
"QRectF",
"(",
"0",
",",
"y",
"-",
"3",
",",
"rect",
".",
"left",
"(",
")",
"-",
"3",
",",
"deltay",
")",
"h_notches",
".",
"append",
"(",
"(",
"notch_rect",
",",
"notch",
")",
")",
"y",
"-=",
"deltay",
"alt",
"=",
"not",
"alt",
"self",
".",
"_buildData",
"[",
"'grid_h_lines'",
"]",
"=",
"h_lines",
"self",
".",
"_buildData",
"[",
"'grid_h_alt'",
"]",
"=",
"h_alt",
"self",
".",
"_buildData",
"[",
"'grid_h_notches'",
"]",
"=",
"h_notches",
"# process the horizontal ruler\r",
"v_lines",
"=",
"[",
"]",
"v_alt",
"=",
"[",
"]",
"v_notches",
"=",
"[",
"]",
"hpstart",
"=",
"hruler",
".",
"padStart",
"(",
")",
"hnotches",
"=",
"hruler",
".",
"notches",
"(",
")",
"hpend",
"=",
"hruler",
".",
"padEnd",
"(",
")",
"hcount",
"=",
"len",
"(",
"hnotches",
")",
"+",
"hpstart",
"+",
"hpend",
"deltax",
"=",
"rect",
".",
"width",
"(",
")",
"/",
"max",
"(",
"(",
"hcount",
"-",
"1",
")",
",",
"1",
")",
"x",
"=",
"rect",
".",
"left",
"(",
")",
"alt",
"=",
"False",
"for",
"i",
"in",
"range",
"(",
"hcount",
")",
":",
"v_lines",
".",
"append",
"(",
"QLineF",
"(",
"x",
",",
"rect",
".",
"top",
"(",
")",
",",
"x",
",",
"rect",
".",
"bottom",
"(",
")",
")",
")",
"# store alternate info\r",
"if",
"(",
"alt",
")",
":",
"alt_rect",
"=",
"QRectF",
"(",
"x",
"-",
"deltax",
",",
"rect",
".",
"top",
"(",
")",
",",
"deltax",
",",
"rect",
".",
"height",
"(",
")",
")",
"v_alt",
".",
"append",
"(",
"alt_rect",
")",
"# store notch information\r",
"nidx",
"=",
"i",
"-",
"hpstart",
"if",
"(",
"0",
"<=",
"nidx",
"and",
"nidx",
"<",
"len",
"(",
"hnotches",
")",
")",
":",
"notch",
"=",
"hnotches",
"[",
"nidx",
"]",
"notch_rect",
"=",
"QRectF",
"(",
"x",
"-",
"(",
"deltax",
"/",
"2.0",
")",
",",
"rect",
".",
"bottom",
"(",
")",
"+",
"3",
",",
"deltax",
",",
"13",
")",
"v_notches",
".",
"append",
"(",
"(",
"notch_rect",
",",
"notch",
")",
")",
"x",
"+=",
"deltax",
"alt",
"=",
"not",
"alt",
"self",
".",
"_buildData",
"[",
"'grid_v_lines'",
"]",
"=",
"v_lines",
"self",
".",
"_buildData",
"[",
"'grid_v_alt'",
"]",
"=",
"v_alt",
"self",
".",
"_buildData",
"[",
"'grid_v_notches'",
"]",
"=",
"v_notches",
"# draw the axis lines\r",
"axis_lines",
"=",
"[",
"]",
"axis_lines",
".",
"append",
"(",
"QLineF",
"(",
"rect",
".",
"left",
"(",
")",
",",
"rect",
".",
"top",
"(",
")",
",",
"rect",
".",
"left",
"(",
")",
",",
"rect",
".",
"bottom",
"(",
")",
")",
")",
"axis_lines",
".",
"append",
"(",
"QLineF",
"(",
"rect",
".",
"left",
"(",
")",
",",
"rect",
".",
"bottom",
"(",
")",
",",
"rect",
".",
"right",
"(",
")",
",",
"rect",
".",
"bottom",
"(",
")",
")",
")",
"self",
".",
"_buildData",
"[",
"'axis_lines'",
"]",
"=",
"axis_lines"
] | Rebuilds the ruler data. | [
"Rebuilds",
"the",
"ruler",
"data",
"."
] | python | train |
syrusakbary/promise | promise/dataloader.py | https://github.com/syrusakbary/promise/blob/d80d791fcc86c89713dac57b55e56c0a9024f153/promise/dataloader.py#L80-L109 | def load(self, key=None):
# type: (Hashable) -> Promise
"""
Loads a key, returning a `Promise` for the value represented by that key.
"""
if key is None:
raise TypeError(
(
"The loader.load() function must be called with a value,"
+ "but got: {}."
).format(key)
)
cache_key = self.get_cache_key(key)
# If caching and there is a cache-hit, return cached Promise.
if self.cache:
cached_promise = self._promise_cache.get(cache_key)
if cached_promise:
return cached_promise
# Otherwise, produce a new Promise for this value.
promise = Promise(partial(self.do_resolve_reject, key)) # type: ignore
# If caching, cache this promise.
if self.cache:
self._promise_cache[cache_key] = promise
return promise | [
"def",
"load",
"(",
"self",
",",
"key",
"=",
"None",
")",
":",
"# type: (Hashable) -> Promise",
"if",
"key",
"is",
"None",
":",
"raise",
"TypeError",
"(",
"(",
"\"The loader.load() function must be called with a value,\"",
"+",
"\"but got: {}.\"",
")",
".",
"format",
"(",
"key",
")",
")",
"cache_key",
"=",
"self",
".",
"get_cache_key",
"(",
"key",
")",
"# If caching and there is a cache-hit, return cached Promise.",
"if",
"self",
".",
"cache",
":",
"cached_promise",
"=",
"self",
".",
"_promise_cache",
".",
"get",
"(",
"cache_key",
")",
"if",
"cached_promise",
":",
"return",
"cached_promise",
"# Otherwise, produce a new Promise for this value.",
"promise",
"=",
"Promise",
"(",
"partial",
"(",
"self",
".",
"do_resolve_reject",
",",
"key",
")",
")",
"# type: ignore",
"# If caching, cache this promise.",
"if",
"self",
".",
"cache",
":",
"self",
".",
"_promise_cache",
"[",
"cache_key",
"]",
"=",
"promise",
"return",
"promise"
] | Loads a key, returning a `Promise` for the value represented by that key. | [
"Loads",
"a",
"key",
"returning",
"a",
"Promise",
"for",
"the",
"value",
"represented",
"by",
"that",
"key",
"."
] | python | train |
openstax/cnx-archive | cnxarchive/database.py | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/database.py#L449-L480 | def republish_module_trigger(plpy, td):
"""Trigger called from postgres database when republishing a module.
When a module is republished, the versions of the collections that it is
part of will need to be updated (a minor update).
e.g. there is a collection c1 v2.1, which contains module m1 v3
m1 is updated, we have a new row in the modules table with m1 v4
this trigger will create increment the minor version of c1, so we'll have
c1 v2.2
we need to create a collection tree for c1 v2.2 which is exactly the same
as c1 v2.1, but with m1 v4 instead of m1 v3, and c1 v2.2 instead of c1 v2.2
"""
# Is this an insert from legacy? Legacy always supplies the version.
is_legacy_publication = td['new']['version'] is not None
if not is_legacy_publication:
# Bail out, because this trigger only applies to legacy publications.
return "OK"
plpy.log('Trigger fired on %s' % (td['new']['moduleid'],))
modified = republish_module(td, plpy)
plpy.log('modified: {}'.format(modified))
plpy.log('insert values:\n{}\n'.format('\n'.join([
'{}: {}'.format(key, value)
for key, value in td['new'].items()])))
return modified | [
"def",
"republish_module_trigger",
"(",
"plpy",
",",
"td",
")",
":",
"# Is this an insert from legacy? Legacy always supplies the version.",
"is_legacy_publication",
"=",
"td",
"[",
"'new'",
"]",
"[",
"'version'",
"]",
"is",
"not",
"None",
"if",
"not",
"is_legacy_publication",
":",
"# Bail out, because this trigger only applies to legacy publications.",
"return",
"\"OK\"",
"plpy",
".",
"log",
"(",
"'Trigger fired on %s'",
"%",
"(",
"td",
"[",
"'new'",
"]",
"[",
"'moduleid'",
"]",
",",
")",
")",
"modified",
"=",
"republish_module",
"(",
"td",
",",
"plpy",
")",
"plpy",
".",
"log",
"(",
"'modified: {}'",
".",
"format",
"(",
"modified",
")",
")",
"plpy",
".",
"log",
"(",
"'insert values:\\n{}\\n'",
".",
"format",
"(",
"'\\n'",
".",
"join",
"(",
"[",
"'{}: {}'",
".",
"format",
"(",
"key",
",",
"value",
")",
"for",
"key",
",",
"value",
"in",
"td",
"[",
"'new'",
"]",
".",
"items",
"(",
")",
"]",
")",
")",
")",
"return",
"modified"
] | Trigger called from postgres database when republishing a module.
When a module is republished, the versions of the collections that it is
part of will need to be updated (a minor update).
e.g. there is a collection c1 v2.1, which contains module m1 v3
m1 is updated, we have a new row in the modules table with m1 v4
this trigger will create increment the minor version of c1, so we'll have
c1 v2.2
we need to create a collection tree for c1 v2.2 which is exactly the same
as c1 v2.1, but with m1 v4 instead of m1 v3, and c1 v2.2 instead of c1 v2.2 | [
"Trigger",
"called",
"from",
"postgres",
"database",
"when",
"republishing",
"a",
"module",
"."
] | python | train |
Hackerfleet/hfos | hfos/component.py | https://github.com/Hackerfleet/hfos/blob/b6df14eacaffb6be5c844108873ff8763ec7f0c9/hfos/component.py#L250-L253 | def unregister(self):
"""Removes the unique name from the systems unique name list"""
self.names.remove(self.uniquename)
super(ConfigurableMeta, self).unregister() | [
"def",
"unregister",
"(",
"self",
")",
":",
"self",
".",
"names",
".",
"remove",
"(",
"self",
".",
"uniquename",
")",
"super",
"(",
"ConfigurableMeta",
",",
"self",
")",
".",
"unregister",
"(",
")"
] | Removes the unique name from the systems unique name list | [
"Removes",
"the",
"unique",
"name",
"from",
"the",
"systems",
"unique",
"name",
"list"
] | python | train |
bykof/billomapy | billomapy/billomapy.py | https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L139-L152 | def _handle_failed_response(self, response):
"""
Handle the failed response and check for rate limit exceeded
If rate limit exceeded it runs the rate_limit_exceeded function which you should overwrite
:param response: requests.Response
:type response: requests.Reponse
:return: None
:rtype: None
"""
if response.status_code == requests.codes.too_many_requests:
return self.rate_limit_exceeded(response)
else:
response.raise_for_status() | [
"def",
"_handle_failed_response",
"(",
"self",
",",
"response",
")",
":",
"if",
"response",
".",
"status_code",
"==",
"requests",
".",
"codes",
".",
"too_many_requests",
":",
"return",
"self",
".",
"rate_limit_exceeded",
"(",
"response",
")",
"else",
":",
"response",
".",
"raise_for_status",
"(",
")"
] | Handle the failed response and check for rate limit exceeded
If rate limit exceeded it runs the rate_limit_exceeded function which you should overwrite
:param response: requests.Response
:type response: requests.Reponse
:return: None
:rtype: None | [
"Handle",
"the",
"failed",
"response",
"and",
"check",
"for",
"rate",
"limit",
"exceeded",
"If",
"rate",
"limit",
"exceeded",
"it",
"runs",
"the",
"rate_limit_exceeded",
"function",
"which",
"you",
"should",
"overwrite"
] | python | train |
codelv/enaml-native | src/enamlnative/android/android_radio_group.py | https://github.com/codelv/enaml-native/blob/c33986e9eda468c508806e0a3e73c771401e5718/src/enamlnative/android/android_radio_group.py#L69-L84 | def on_checked_changed(self, group, checked_id):
""" Set the checked property based on the checked state
of all the children
"""
d = self.declaration
if checked_id < 0:
with self.widget.clearCheck.suppressed():
d.checked = None
return
else:
for c in self.children():
if c.widget.getId() == checked_id:
with self.widget.check.suppressed():
d.checked = c.declaration
return | [
"def",
"on_checked_changed",
"(",
"self",
",",
"group",
",",
"checked_id",
")",
":",
"d",
"=",
"self",
".",
"declaration",
"if",
"checked_id",
"<",
"0",
":",
"with",
"self",
".",
"widget",
".",
"clearCheck",
".",
"suppressed",
"(",
")",
":",
"d",
".",
"checked",
"=",
"None",
"return",
"else",
":",
"for",
"c",
"in",
"self",
".",
"children",
"(",
")",
":",
"if",
"c",
".",
"widget",
".",
"getId",
"(",
")",
"==",
"checked_id",
":",
"with",
"self",
".",
"widget",
".",
"check",
".",
"suppressed",
"(",
")",
":",
"d",
".",
"checked",
"=",
"c",
".",
"declaration",
"return"
] | Set the checked property based on the checked state
of all the children | [
"Set",
"the",
"checked",
"property",
"based",
"on",
"the",
"checked",
"state",
"of",
"all",
"the",
"children"
] | python | train |
NoviceLive/intellicoder | intellicoder/transformers.py | https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/transformers.py#L80-L91 | def transform_sources(self, sources, with_string=False):
"""Get the defintions of needed strings and functions
after replacement.
"""
modules = {}
updater = partial(
self.replace_source, modules=modules, prefix='string_')
for filename in sources:
updated = update_func_body(sources[filename], updater)
sources[filename] = EXTERN_AND_SEG + updated
logging.debug('modules: %s', modules)
return sources, self.build_funcs(modules) | [
"def",
"transform_sources",
"(",
"self",
",",
"sources",
",",
"with_string",
"=",
"False",
")",
":",
"modules",
"=",
"{",
"}",
"updater",
"=",
"partial",
"(",
"self",
".",
"replace_source",
",",
"modules",
"=",
"modules",
",",
"prefix",
"=",
"'string_'",
")",
"for",
"filename",
"in",
"sources",
":",
"updated",
"=",
"update_func_body",
"(",
"sources",
"[",
"filename",
"]",
",",
"updater",
")",
"sources",
"[",
"filename",
"]",
"=",
"EXTERN_AND_SEG",
"+",
"updated",
"logging",
".",
"debug",
"(",
"'modules: %s'",
",",
"modules",
")",
"return",
"sources",
",",
"self",
".",
"build_funcs",
"(",
"modules",
")"
] | Get the defintions of needed strings and functions
after replacement. | [
"Get",
"the",
"defintions",
"of",
"needed",
"strings",
"and",
"functions",
"after",
"replacement",
"."
] | python | train |
praekelt/django-profile | profile/utils.py | https://github.com/praekelt/django-profile/blob/52a3d3f7e776742c5333f8fab67b5af3cdbc878b/profile/utils.py#L4-L16 | def get_profile_model():
"""
Returns configured user profile model or None if not found
"""
auth_profile_module = getattr(settings, 'AUTH_PROFILE_MODULE', None)
profile_model = None
if auth_profile_module:
# get the profile model. TODO: super flacky, refactor
app_label, model = auth_profile_module.split('.')
profile_model = getattr(__import__("%s.models" % app_label, \
globals(), locals(), [model, ], -1), model, None)
return profile_model | [
"def",
"get_profile_model",
"(",
")",
":",
"auth_profile_module",
"=",
"getattr",
"(",
"settings",
",",
"'AUTH_PROFILE_MODULE'",
",",
"None",
")",
"profile_model",
"=",
"None",
"if",
"auth_profile_module",
":",
"# get the profile model. TODO: super flacky, refactor",
"app_label",
",",
"model",
"=",
"auth_profile_module",
".",
"split",
"(",
"'.'",
")",
"profile_model",
"=",
"getattr",
"(",
"__import__",
"(",
"\"%s.models\"",
"%",
"app_label",
",",
"globals",
"(",
")",
",",
"locals",
"(",
")",
",",
"[",
"model",
",",
"]",
",",
"-",
"1",
")",
",",
"model",
",",
"None",
")",
"return",
"profile_model"
] | Returns configured user profile model or None if not found | [
"Returns",
"configured",
"user",
"profile",
"model",
"or",
"None",
"if",
"not",
"found"
] | python | train |
mbj4668/pyang | pyang/translators/dsdl.py | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/translators/dsdl.py#L548-L555 | def qname(self, stmt):
"""Return (prefixed) node name of `stmt`.
The result is prefixed with the local prefix unless we are
inside a global grouping.
"""
if self.gg_level: return stmt.arg
return self.prefix_stack[-1] + ":" + stmt.arg | [
"def",
"qname",
"(",
"self",
",",
"stmt",
")",
":",
"if",
"self",
".",
"gg_level",
":",
"return",
"stmt",
".",
"arg",
"return",
"self",
".",
"prefix_stack",
"[",
"-",
"1",
"]",
"+",
"\":\"",
"+",
"stmt",
".",
"arg"
] | Return (prefixed) node name of `stmt`.
The result is prefixed with the local prefix unless we are
inside a global grouping. | [
"Return",
"(",
"prefixed",
")",
"node",
"name",
"of",
"stmt",
"."
] | python | train |
openstack/stacktach-stackdistiller | stackdistiller/distiller.py | https://github.com/openstack/stacktach-stackdistiller/blob/38cc32994cc5411c3f7c76f31ef3ea8b3245e871/stackdistiller/distiller.py#L259-L271 | def _extract_when(body):
"""Extract the generated datetime from the notification."""
# NOTE: I am keeping the logic the same as it was in openstack
# code, However, *ALL* notifications should have a 'timestamp'
# field, it's part of the notification envelope spec. If this was
# put here because some openstack project is generating notifications
# without a timestamp, then that needs to be filed as a bug with the
# offending project (mdragon)
when = body.get('timestamp', body.get('_context_timestamp'))
if when:
return Datatype.datetime.convert(when)
return utcnow() | [
"def",
"_extract_when",
"(",
"body",
")",
":",
"# NOTE: I am keeping the logic the same as it was in openstack",
"# code, However, *ALL* notifications should have a 'timestamp'",
"# field, it's part of the notification envelope spec. If this was",
"# put here because some openstack project is generating notifications",
"# without a timestamp, then that needs to be filed as a bug with the",
"# offending project (mdragon)",
"when",
"=",
"body",
".",
"get",
"(",
"'timestamp'",
",",
"body",
".",
"get",
"(",
"'_context_timestamp'",
")",
")",
"if",
"when",
":",
"return",
"Datatype",
".",
"datetime",
".",
"convert",
"(",
"when",
")",
"return",
"utcnow",
"(",
")"
] | Extract the generated datetime from the notification. | [
"Extract",
"the",
"generated",
"datetime",
"from",
"the",
"notification",
"."
] | python | train |
has2k1/plotnine | plotnine/facets/facet.py | https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/facets/facet.py#L352-L364 | def make_axes(self, figure, layout, coordinates):
"""
Create and return Matplotlib axes
"""
axs = self._create_subplots(figure, layout)
# Used for labelling the x and y axes, the first and
# last axes according to how MPL creates them.
self.first_ax = figure.axes[0]
self.last_ax = figure.axes[-1]
self.figure = figure
self.axs = axs
return axs | [
"def",
"make_axes",
"(",
"self",
",",
"figure",
",",
"layout",
",",
"coordinates",
")",
":",
"axs",
"=",
"self",
".",
"_create_subplots",
"(",
"figure",
",",
"layout",
")",
"# Used for labelling the x and y axes, the first and",
"# last axes according to how MPL creates them.",
"self",
".",
"first_ax",
"=",
"figure",
".",
"axes",
"[",
"0",
"]",
"self",
".",
"last_ax",
"=",
"figure",
".",
"axes",
"[",
"-",
"1",
"]",
"self",
".",
"figure",
"=",
"figure",
"self",
".",
"axs",
"=",
"axs",
"return",
"axs"
] | Create and return Matplotlib axes | [
"Create",
"and",
"return",
"Matplotlib",
"axes"
] | python | train |
quandyfactory/dicttoxml | dicttoxml.py | https://github.com/quandyfactory/dicttoxml/blob/2016fe9817ad03b26aa5f1a475f5b79ad6757b96/dicttoxml.py#L257-L321 | def convert_list(items, ids, parent, attr_type, item_func, cdata):
"""Converts a list into an XML string."""
LOG.info('Inside convert_list()')
output = []
addline = output.append
item_name = item_func(parent)
if ids:
this_id = get_unique_id(parent)
for i, item in enumerate(items):
LOG.info('Looping inside convert_list(): item="%s", item_name="%s", type="%s"' % (
unicode_me(item), item_name, type(item).__name__)
)
attr = {} if not ids else { 'id': '%s_%s' % (this_id, i+1) }
if isinstance(item, numbers.Number) or type(item) in (str, unicode):
addline(convert_kv(item_name, item, attr_type, attr, cdata))
elif hasattr(item, 'isoformat'): # datetime
addline(convert_kv(item_name, item.isoformat(), attr_type, attr, cdata))
elif type(item) == bool:
addline(convert_bool(item_name, item, attr_type, attr, cdata))
elif isinstance(item, dict):
if not attr_type:
addline('<%s>%s</%s>' % (
item_name,
convert_dict(item, ids, parent, attr_type, item_func, cdata),
item_name,
)
)
else:
addline('<%s type="dict">%s</%s>' % (
item_name,
convert_dict(item, ids, parent, attr_type, item_func, cdata),
item_name,
)
)
elif isinstance(item, collections.Iterable):
if not attr_type:
addline('<%s %s>%s</%s>' % (
item_name, make_attrstring(attr),
convert_list(item, ids, item_name, attr_type, item_func, cdata),
item_name,
)
)
else:
addline('<%s type="list"%s>%s</%s>' % (
item_name, make_attrstring(attr),
convert_list(item, ids, item_name, attr_type, item_func, cdata),
item_name,
)
)
elif item is None:
addline(convert_none(item_name, None, attr_type, attr, cdata))
else:
raise TypeError('Unsupported data type: %s (%s)' % (
item, type(item).__name__)
)
return ''.join(output) | [
"def",
"convert_list",
"(",
"items",
",",
"ids",
",",
"parent",
",",
"attr_type",
",",
"item_func",
",",
"cdata",
")",
":",
"LOG",
".",
"info",
"(",
"'Inside convert_list()'",
")",
"output",
"=",
"[",
"]",
"addline",
"=",
"output",
".",
"append",
"item_name",
"=",
"item_func",
"(",
"parent",
")",
"if",
"ids",
":",
"this_id",
"=",
"get_unique_id",
"(",
"parent",
")",
"for",
"i",
",",
"item",
"in",
"enumerate",
"(",
"items",
")",
":",
"LOG",
".",
"info",
"(",
"'Looping inside convert_list(): item=\"%s\", item_name=\"%s\", type=\"%s\"'",
"%",
"(",
"unicode_me",
"(",
"item",
")",
",",
"item_name",
",",
"type",
"(",
"item",
")",
".",
"__name__",
")",
")",
"attr",
"=",
"{",
"}",
"if",
"not",
"ids",
"else",
"{",
"'id'",
":",
"'%s_%s'",
"%",
"(",
"this_id",
",",
"i",
"+",
"1",
")",
"}",
"if",
"isinstance",
"(",
"item",
",",
"numbers",
".",
"Number",
")",
"or",
"type",
"(",
"item",
")",
"in",
"(",
"str",
",",
"unicode",
")",
":",
"addline",
"(",
"convert_kv",
"(",
"item_name",
",",
"item",
",",
"attr_type",
",",
"attr",
",",
"cdata",
")",
")",
"elif",
"hasattr",
"(",
"item",
",",
"'isoformat'",
")",
":",
"# datetime",
"addline",
"(",
"convert_kv",
"(",
"item_name",
",",
"item",
".",
"isoformat",
"(",
")",
",",
"attr_type",
",",
"attr",
",",
"cdata",
")",
")",
"elif",
"type",
"(",
"item",
")",
"==",
"bool",
":",
"addline",
"(",
"convert_bool",
"(",
"item_name",
",",
"item",
",",
"attr_type",
",",
"attr",
",",
"cdata",
")",
")",
"elif",
"isinstance",
"(",
"item",
",",
"dict",
")",
":",
"if",
"not",
"attr_type",
":",
"addline",
"(",
"'<%s>%s</%s>'",
"%",
"(",
"item_name",
",",
"convert_dict",
"(",
"item",
",",
"ids",
",",
"parent",
",",
"attr_type",
",",
"item_func",
",",
"cdata",
")",
",",
"item_name",
",",
")",
")",
"else",
":",
"addline",
"(",
"'<%s type=\"dict\">%s</%s>'",
"%",
"(",
"item_name",
",",
"convert_dict",
"(",
"item",
",",
"ids",
",",
"parent",
",",
"attr_type",
",",
"item_func",
",",
"cdata",
")",
",",
"item_name",
",",
")",
")",
"elif",
"isinstance",
"(",
"item",
",",
"collections",
".",
"Iterable",
")",
":",
"if",
"not",
"attr_type",
":",
"addline",
"(",
"'<%s %s>%s</%s>'",
"%",
"(",
"item_name",
",",
"make_attrstring",
"(",
"attr",
")",
",",
"convert_list",
"(",
"item",
",",
"ids",
",",
"item_name",
",",
"attr_type",
",",
"item_func",
",",
"cdata",
")",
",",
"item_name",
",",
")",
")",
"else",
":",
"addline",
"(",
"'<%s type=\"list\"%s>%s</%s>'",
"%",
"(",
"item_name",
",",
"make_attrstring",
"(",
"attr",
")",
",",
"convert_list",
"(",
"item",
",",
"ids",
",",
"item_name",
",",
"attr_type",
",",
"item_func",
",",
"cdata",
")",
",",
"item_name",
",",
")",
")",
"elif",
"item",
"is",
"None",
":",
"addline",
"(",
"convert_none",
"(",
"item_name",
",",
"None",
",",
"attr_type",
",",
"attr",
",",
"cdata",
")",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'Unsupported data type: %s (%s)'",
"%",
"(",
"item",
",",
"type",
"(",
"item",
")",
".",
"__name__",
")",
")",
"return",
"''",
".",
"join",
"(",
"output",
")"
] | Converts a list into an XML string. | [
"Converts",
"a",
"list",
"into",
"an",
"XML",
"string",
"."
] | python | train |
saltstack/salt | salt/engines/libvirt_events.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/engines/libvirt_events.py#L156-L175 | def _get_libvirt_enum_string(prefix, value):
'''
Convert the libvirt enum integer value into a human readable string.
:param prefix: start of the libvirt attribute to look for.
:param value: integer to convert to string
'''
attributes = [attr[len(prefix):] for attr in libvirt.__dict__ if attr.startswith(prefix)]
# Filter out the values starting with a common base as they match another enum
prefixes = [_compute_subprefix(p) for p in attributes]
counts = {p: prefixes.count(p) for p in prefixes}
sub_prefixes = [p for p, count in counts.items() if count > 1 or (p.endswith('_') and p[:-1] in prefixes)]
filtered = [attr for attr in attributes if _compute_subprefix(attr) not in sub_prefixes]
for candidate in filtered:
if value == getattr(libvirt, ''.join((prefix, candidate))):
name = candidate.lower().replace('_', ' ')
return name
return 'unknown' | [
"def",
"_get_libvirt_enum_string",
"(",
"prefix",
",",
"value",
")",
":",
"attributes",
"=",
"[",
"attr",
"[",
"len",
"(",
"prefix",
")",
":",
"]",
"for",
"attr",
"in",
"libvirt",
".",
"__dict__",
"if",
"attr",
".",
"startswith",
"(",
"prefix",
")",
"]",
"# Filter out the values starting with a common base as they match another enum",
"prefixes",
"=",
"[",
"_compute_subprefix",
"(",
"p",
")",
"for",
"p",
"in",
"attributes",
"]",
"counts",
"=",
"{",
"p",
":",
"prefixes",
".",
"count",
"(",
"p",
")",
"for",
"p",
"in",
"prefixes",
"}",
"sub_prefixes",
"=",
"[",
"p",
"for",
"p",
",",
"count",
"in",
"counts",
".",
"items",
"(",
")",
"if",
"count",
">",
"1",
"or",
"(",
"p",
".",
"endswith",
"(",
"'_'",
")",
"and",
"p",
"[",
":",
"-",
"1",
"]",
"in",
"prefixes",
")",
"]",
"filtered",
"=",
"[",
"attr",
"for",
"attr",
"in",
"attributes",
"if",
"_compute_subprefix",
"(",
"attr",
")",
"not",
"in",
"sub_prefixes",
"]",
"for",
"candidate",
"in",
"filtered",
":",
"if",
"value",
"==",
"getattr",
"(",
"libvirt",
",",
"''",
".",
"join",
"(",
"(",
"prefix",
",",
"candidate",
")",
")",
")",
":",
"name",
"=",
"candidate",
".",
"lower",
"(",
")",
".",
"replace",
"(",
"'_'",
",",
"' '",
")",
"return",
"name",
"return",
"'unknown'"
] | Convert the libvirt enum integer value into a human readable string.
:param prefix: start of the libvirt attribute to look for.
:param value: integer to convert to string | [
"Convert",
"the",
"libvirt",
"enum",
"integer",
"value",
"into",
"a",
"human",
"readable",
"string",
"."
] | python | train |
Yipit/elasticfun | elasticfun/queryset.py | https://github.com/Yipit/elasticfun/blob/dc85b93d49818d09c26fb3a5015fdb25535bd2d7/elasticfun/queryset.py#L32-L50 | def search(self, query, index='default', **kwargs):
"""
kwargs supported are the parameters listed at:
http://www.elasticsearch.org/guide/reference/api/search/request-body/
Namely: timeout, from, size and search_type.
IMPORTANT: prepend ALL keys with "es_" as pyelasticsearch requires this
"""
# Looking up the index
if index not in self.conf.indexes:
self.raise_improperly_configured(index=index)
# Calling the backend search method
esurl = self.conf.connections[index]['URL']
esinst = pyelasticsearch.ElasticSearch(esurl)
query = isinstance(query, Query) and str(query) or query
self.raw_results = esinst.search(query, index=index, **kwargs)
return self | [
"def",
"search",
"(",
"self",
",",
"query",
",",
"index",
"=",
"'default'",
",",
"*",
"*",
"kwargs",
")",
":",
"# Looking up the index",
"if",
"index",
"not",
"in",
"self",
".",
"conf",
".",
"indexes",
":",
"self",
".",
"raise_improperly_configured",
"(",
"index",
"=",
"index",
")",
"# Calling the backend search method",
"esurl",
"=",
"self",
".",
"conf",
".",
"connections",
"[",
"index",
"]",
"[",
"'URL'",
"]",
"esinst",
"=",
"pyelasticsearch",
".",
"ElasticSearch",
"(",
"esurl",
")",
"query",
"=",
"isinstance",
"(",
"query",
",",
"Query",
")",
"and",
"str",
"(",
"query",
")",
"or",
"query",
"self",
".",
"raw_results",
"=",
"esinst",
".",
"search",
"(",
"query",
",",
"index",
"=",
"index",
",",
"*",
"*",
"kwargs",
")",
"return",
"self"
] | kwargs supported are the parameters listed at:
http://www.elasticsearch.org/guide/reference/api/search/request-body/
Namely: timeout, from, size and search_type.
IMPORTANT: prepend ALL keys with "es_" as pyelasticsearch requires this | [
"kwargs",
"supported",
"are",
"the",
"parameters",
"listed",
"at",
":",
"http",
":",
"//",
"www",
".",
"elasticsearch",
".",
"org",
"/",
"guide",
"/",
"reference",
"/",
"api",
"/",
"search",
"/",
"request",
"-",
"body",
"/",
"Namely",
":",
"timeout",
"from",
"size",
"and",
"search_type",
".",
"IMPORTANT",
":",
"prepend",
"ALL",
"keys",
"with",
"es_",
"as",
"pyelasticsearch",
"requires",
"this"
] | python | train |
jciskey/pygraph | pygraph/functions/spanning_tree.py | https://github.com/jciskey/pygraph/blob/037bb2f32503fecb60d62921f9766d54109f15e2/pygraph/functions/spanning_tree.py#L65-L101 | def kruskal_mst(graph):
"""Implements Kruskal's Algorithm for finding minimum spanning trees.
Assumes a non-empty, connected graph.
"""
edges_accepted = 0
ds = DisjointSet()
pq = PriorityQueue()
accepted_edges = []
label_lookup = {}
nodes = graph.get_all_node_ids()
num_vertices = len(nodes)
for n in nodes:
label = ds.add_set()
label_lookup[n] = label
edges = graph.get_all_edge_objects()
for e in edges:
pq.put(e['id'], e['cost'])
while edges_accepted < (num_vertices - 1):
edge_id = pq.get()
edge = graph.get_edge(edge_id)
node_a, node_b = edge['vertices']
label_a = label_lookup[node_a]
label_b = label_lookup[node_b]
a_set = ds.find(label_a)
b_set = ds.find(label_b)
if a_set != b_set:
edges_accepted += 1
accepted_edges.append(edge_id)
ds.union(a_set, b_set)
return accepted_edges | [
"def",
"kruskal_mst",
"(",
"graph",
")",
":",
"edges_accepted",
"=",
"0",
"ds",
"=",
"DisjointSet",
"(",
")",
"pq",
"=",
"PriorityQueue",
"(",
")",
"accepted_edges",
"=",
"[",
"]",
"label_lookup",
"=",
"{",
"}",
"nodes",
"=",
"graph",
".",
"get_all_node_ids",
"(",
")",
"num_vertices",
"=",
"len",
"(",
"nodes",
")",
"for",
"n",
"in",
"nodes",
":",
"label",
"=",
"ds",
".",
"add_set",
"(",
")",
"label_lookup",
"[",
"n",
"]",
"=",
"label",
"edges",
"=",
"graph",
".",
"get_all_edge_objects",
"(",
")",
"for",
"e",
"in",
"edges",
":",
"pq",
".",
"put",
"(",
"e",
"[",
"'id'",
"]",
",",
"e",
"[",
"'cost'",
"]",
")",
"while",
"edges_accepted",
"<",
"(",
"num_vertices",
"-",
"1",
")",
":",
"edge_id",
"=",
"pq",
".",
"get",
"(",
")",
"edge",
"=",
"graph",
".",
"get_edge",
"(",
"edge_id",
")",
"node_a",
",",
"node_b",
"=",
"edge",
"[",
"'vertices'",
"]",
"label_a",
"=",
"label_lookup",
"[",
"node_a",
"]",
"label_b",
"=",
"label_lookup",
"[",
"node_b",
"]",
"a_set",
"=",
"ds",
".",
"find",
"(",
"label_a",
")",
"b_set",
"=",
"ds",
".",
"find",
"(",
"label_b",
")",
"if",
"a_set",
"!=",
"b_set",
":",
"edges_accepted",
"+=",
"1",
"accepted_edges",
".",
"append",
"(",
"edge_id",
")",
"ds",
".",
"union",
"(",
"a_set",
",",
"b_set",
")",
"return",
"accepted_edges"
] | Implements Kruskal's Algorithm for finding minimum spanning trees.
Assumes a non-empty, connected graph. | [
"Implements",
"Kruskal",
"s",
"Algorithm",
"for",
"finding",
"minimum",
"spanning",
"trees",
".",
"Assumes",
"a",
"non",
"-",
"empty",
"connected",
"graph",
"."
] | python | train |
veeti/decent | decent/validators.py | https://github.com/veeti/decent/blob/07b11536953b9cf4402c65f241706ab717b90bff/decent/validators.py#L50-L61 | def Msg(validator, message):
"""
Wraps the given validator callable, replacing any error messages raised.
"""
@wraps(Msg)
def built(value):
try:
return validator(value)
except Error as e:
e.message = message
raise e
return built | [
"def",
"Msg",
"(",
"validator",
",",
"message",
")",
":",
"@",
"wraps",
"(",
"Msg",
")",
"def",
"built",
"(",
"value",
")",
":",
"try",
":",
"return",
"validator",
"(",
"value",
")",
"except",
"Error",
"as",
"e",
":",
"e",
".",
"message",
"=",
"message",
"raise",
"e",
"return",
"built"
] | Wraps the given validator callable, replacing any error messages raised. | [
"Wraps",
"the",
"given",
"validator",
"callable",
"replacing",
"any",
"error",
"messages",
"raised",
"."
] | python | train |
housecanary/hc-api-python | housecanary/excel/__init__.py | https://github.com/housecanary/hc-api-python/blob/2bb9e2208b34e8617575de45934357ee33b8531c/housecanary/excel/__init__.py#L18-L28 | def export_analytics_data_to_excel(data, output_file_name, result_info_key, identifier_keys):
"""Creates an Excel file containing data returned by the Analytics API
Args:
data: Analytics API data as a list of dicts
output_file_name: File name for output Excel file (use .xlsx extension).
"""
workbook = create_excel_workbook(data, result_info_key, identifier_keys)
workbook.save(output_file_name)
print('Saved Excel file to {}'.format(output_file_name)) | [
"def",
"export_analytics_data_to_excel",
"(",
"data",
",",
"output_file_name",
",",
"result_info_key",
",",
"identifier_keys",
")",
":",
"workbook",
"=",
"create_excel_workbook",
"(",
"data",
",",
"result_info_key",
",",
"identifier_keys",
")",
"workbook",
".",
"save",
"(",
"output_file_name",
")",
"print",
"(",
"'Saved Excel file to {}'",
".",
"format",
"(",
"output_file_name",
")",
")"
] | Creates an Excel file containing data returned by the Analytics API
Args:
data: Analytics API data as a list of dicts
output_file_name: File name for output Excel file (use .xlsx extension). | [
"Creates",
"an",
"Excel",
"file",
"containing",
"data",
"returned",
"by",
"the",
"Analytics",
"API"
] | python | train |
frictionlessdata/goodtables-py | goodtables/inspector.py | https://github.com/frictionlessdata/goodtables-py/blob/3e7d6891d2f4e342dfafbe0e951e204ccc252a44/goodtables/inspector.py#L330-L340 | def _clean_empty(d):
"""Remove None values from a dict."""
if not isinstance(d, (dict, list)):
return d
if isinstance(d, list):
return [v for v in (_clean_empty(v) for v in d) if v is not None]
return {
k: v for k, v in
((k, _clean_empty(v)) for k, v in d.items())
if v is not None
} | [
"def",
"_clean_empty",
"(",
"d",
")",
":",
"if",
"not",
"isinstance",
"(",
"d",
",",
"(",
"dict",
",",
"list",
")",
")",
":",
"return",
"d",
"if",
"isinstance",
"(",
"d",
",",
"list",
")",
":",
"return",
"[",
"v",
"for",
"v",
"in",
"(",
"_clean_empty",
"(",
"v",
")",
"for",
"v",
"in",
"d",
")",
"if",
"v",
"is",
"not",
"None",
"]",
"return",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"(",
"(",
"k",
",",
"_clean_empty",
"(",
"v",
")",
")",
"for",
"k",
",",
"v",
"in",
"d",
".",
"items",
"(",
")",
")",
"if",
"v",
"is",
"not",
"None",
"}"
] | Remove None values from a dict. | [
"Remove",
"None",
"values",
"from",
"a",
"dict",
"."
] | python | train |
chaoss/grimoirelab-manuscripts | manuscripts/report.py | https://github.com/chaoss/grimoirelab-manuscripts/blob/94a3ad4f11bfbcd6c5190e01cb5d3e47a5187cd9/manuscripts/report.py#L753-L767 | def create_data_figs(self):
"""
Generate the data and figs files for the report
:return:
"""
logger.info("Generating the report data and figs from %s to %s",
self.start, self.end)
for section in self.sections():
logger.info("Generating %s", section)
self.sections()[section]()
logger.info("Data and figs done") | [
"def",
"create_data_figs",
"(",
"self",
")",
":",
"logger",
".",
"info",
"(",
"\"Generating the report data and figs from %s to %s\"",
",",
"self",
".",
"start",
",",
"self",
".",
"end",
")",
"for",
"section",
"in",
"self",
".",
"sections",
"(",
")",
":",
"logger",
".",
"info",
"(",
"\"Generating %s\"",
",",
"section",
")",
"self",
".",
"sections",
"(",
")",
"[",
"section",
"]",
"(",
")",
"logger",
".",
"info",
"(",
"\"Data and figs done\"",
")"
] | Generate the data and figs files for the report
:return: | [
"Generate",
"the",
"data",
"and",
"figs",
"files",
"for",
"the",
"report"
] | python | train |
google/grr | grr/server/grr_response_server/gui/api_auth_manager.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/gui/api_auth_manager.py#L71-L83 | def _CreateRouter(self, router_cls, params=None):
"""Creates a router with a given name and params."""
if not router_cls.params_type and params:
raise ApiCallRouterDoesNotExpectParameters(
"%s is not configurable" % router_cls)
rdf_params = None
if router_cls.params_type:
rdf_params = router_cls.params_type()
if params:
rdf_params.FromDict(params)
return router_cls(params=rdf_params) | [
"def",
"_CreateRouter",
"(",
"self",
",",
"router_cls",
",",
"params",
"=",
"None",
")",
":",
"if",
"not",
"router_cls",
".",
"params_type",
"and",
"params",
":",
"raise",
"ApiCallRouterDoesNotExpectParameters",
"(",
"\"%s is not configurable\"",
"%",
"router_cls",
")",
"rdf_params",
"=",
"None",
"if",
"router_cls",
".",
"params_type",
":",
"rdf_params",
"=",
"router_cls",
".",
"params_type",
"(",
")",
"if",
"params",
":",
"rdf_params",
".",
"FromDict",
"(",
"params",
")",
"return",
"router_cls",
"(",
"params",
"=",
"rdf_params",
")"
] | Creates a router with a given name and params. | [
"Creates",
"a",
"router",
"with",
"a",
"given",
"name",
"and",
"params",
"."
] | python | train |
shoebot/shoebot | lib/cornu/__init__.py | https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/lib/cornu/__init__.py#L272-L288 | def draw_cornu_flat(x0, y0, t0, t1, s0, c0, flip, cs, ss, cmd):
""" Raph Levien's code draws fast LINETO segments.
"""
for j in range(0, 100):
t = j * .01
s, c = eval_cornu(t0 + t * (t1 - t0))
s *= flip
s -= s0
c -= c0
#print '%', c, s
x = c * cs - s * ss
y = s * cs + c * ss
print_pt(x0 + x, y0 + y, cmd)
cmd = 'lineto'
return cmd | [
"def",
"draw_cornu_flat",
"(",
"x0",
",",
"y0",
",",
"t0",
",",
"t1",
",",
"s0",
",",
"c0",
",",
"flip",
",",
"cs",
",",
"ss",
",",
"cmd",
")",
":",
"for",
"j",
"in",
"range",
"(",
"0",
",",
"100",
")",
":",
"t",
"=",
"j",
"*",
".01",
"s",
",",
"c",
"=",
"eval_cornu",
"(",
"t0",
"+",
"t",
"*",
"(",
"t1",
"-",
"t0",
")",
")",
"s",
"*=",
"flip",
"s",
"-=",
"s0",
"c",
"-=",
"c0",
"#print '%', c, s",
"x",
"=",
"c",
"*",
"cs",
"-",
"s",
"*",
"ss",
"y",
"=",
"s",
"*",
"cs",
"+",
"c",
"*",
"ss",
"print_pt",
"(",
"x0",
"+",
"x",
",",
"y0",
"+",
"y",
",",
"cmd",
")",
"cmd",
"=",
"'lineto'",
"return",
"cmd"
] | Raph Levien's code draws fast LINETO segments. | [
"Raph",
"Levien",
"s",
"code",
"draws",
"fast",
"LINETO",
"segments",
"."
] | python | valid |
mikedh/trimesh | trimesh/graph.py | https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/graph.py#L136-L186 | def face_adjacency_radius(mesh):
"""
Compute an approximate radius between adjacent faces.
Parameters
--------------
mesh : trimesh.Trimesh
Returns
-------------
radii : (len(self.face_adjacency),) float
Approximate radius between faces
Parallel faces will have a value of np.inf
span : (len(self.face_adjacency),) float
Perpendicular projection distance of two
unshared vertices onto the shared edge
"""
# solve for the radius of the adjacent faces
# distance
# R = ------------------
# 2 * sin(theta / 2)
nonzero = mesh.face_adjacency_angles > np.radians(.01)
denominator = np.abs(
2.0 * np.sin(mesh.face_adjacency_angles[nonzero] / 1.0))
# consider the distance between the non- shared vertices of the
# face adjacency pair as the key distance
point_pairs = mesh.vertices[mesh.face_adjacency_unshared]
vectors = np.diff(point_pairs,
axis=1).reshape((-1, 3))
# the vertex indices of the shared edge for the adjacency pairx
edges = mesh.face_adjacency_edges
# unit vector along shared the edge
edges_vec = util.unitize(np.diff(mesh.vertices[edges],
axis=1).reshape((-1, 3)))
# the vector of the perpendicular projection to the shared edge
perp = np.subtract(
vectors, (util.diagonal_dot(
vectors, edges_vec).reshape(
(-1, 1)) * edges_vec))
# the length of the perpendicular projection
span = np.linalg.norm(perp, axis=1)
# complete the values for non- infinite radii
radii = np.ones(len(mesh.face_adjacency)) * np.inf
radii[nonzero] = span[nonzero] / denominator
return radii, span | [
"def",
"face_adjacency_radius",
"(",
"mesh",
")",
":",
"# solve for the radius of the adjacent faces",
"# distance",
"# R = ------------------",
"# 2 * sin(theta / 2)",
"nonzero",
"=",
"mesh",
".",
"face_adjacency_angles",
">",
"np",
".",
"radians",
"(",
".01",
")",
"denominator",
"=",
"np",
".",
"abs",
"(",
"2.0",
"*",
"np",
".",
"sin",
"(",
"mesh",
".",
"face_adjacency_angles",
"[",
"nonzero",
"]",
"/",
"1.0",
")",
")",
"# consider the distance between the non- shared vertices of the",
"# face adjacency pair as the key distance",
"point_pairs",
"=",
"mesh",
".",
"vertices",
"[",
"mesh",
".",
"face_adjacency_unshared",
"]",
"vectors",
"=",
"np",
".",
"diff",
"(",
"point_pairs",
",",
"axis",
"=",
"1",
")",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"3",
")",
")",
"# the vertex indices of the shared edge for the adjacency pairx",
"edges",
"=",
"mesh",
".",
"face_adjacency_edges",
"# unit vector along shared the edge",
"edges_vec",
"=",
"util",
".",
"unitize",
"(",
"np",
".",
"diff",
"(",
"mesh",
".",
"vertices",
"[",
"edges",
"]",
",",
"axis",
"=",
"1",
")",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"3",
")",
")",
")",
"# the vector of the perpendicular projection to the shared edge",
"perp",
"=",
"np",
".",
"subtract",
"(",
"vectors",
",",
"(",
"util",
".",
"diagonal_dot",
"(",
"vectors",
",",
"edges_vec",
")",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"1",
")",
")",
"*",
"edges_vec",
")",
")",
"# the length of the perpendicular projection",
"span",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"perp",
",",
"axis",
"=",
"1",
")",
"# complete the values for non- infinite radii",
"radii",
"=",
"np",
".",
"ones",
"(",
"len",
"(",
"mesh",
".",
"face_adjacency",
")",
")",
"*",
"np",
".",
"inf",
"radii",
"[",
"nonzero",
"]",
"=",
"span",
"[",
"nonzero",
"]",
"/",
"denominator",
"return",
"radii",
",",
"span"
] | Compute an approximate radius between adjacent faces.
Parameters
--------------
mesh : trimesh.Trimesh
Returns
-------------
radii : (len(self.face_adjacency),) float
Approximate radius between faces
Parallel faces will have a value of np.inf
span : (len(self.face_adjacency),) float
Perpendicular projection distance of two
unshared vertices onto the shared edge | [
"Compute",
"an",
"approximate",
"radius",
"between",
"adjacent",
"faces",
"."
] | python | train |
abw333/dominoes | dominoes/search.py | https://github.com/abw333/dominoes/blob/ea9f532c9b834117a5c07d214711515872f7537e/dominoes/search.py#L5-L38 | def make_moves(game, player=dominoes.players.identity):
'''
For each of a Game object's valid moves, yields
a tuple containing the move and the Game object
obtained by playing the move on the original Game
object. The original Game object will be modified.
:param Game game: the game to make moves on
:param callable player: a player to call on the
game before making any
moves, to determine the
order in which they get
made. The identity
player is the default.
'''
# game is over - do not yield anything
if game.result is not None:
return
# determine the order in which to make moves
player(game)
# copy the original game before making all
# but the last move
for move in game.valid_moves[:-1]:
new_game = copy.deepcopy(game)
new_game.make_move(*move)
yield move, new_game
# don't copy the original game before making
# the last move
move = game.valid_moves[-1]
game.make_move(*move)
yield move, game | [
"def",
"make_moves",
"(",
"game",
",",
"player",
"=",
"dominoes",
".",
"players",
".",
"identity",
")",
":",
"# game is over - do not yield anything",
"if",
"game",
".",
"result",
"is",
"not",
"None",
":",
"return",
"# determine the order in which to make moves",
"player",
"(",
"game",
")",
"# copy the original game before making all",
"# but the last move",
"for",
"move",
"in",
"game",
".",
"valid_moves",
"[",
":",
"-",
"1",
"]",
":",
"new_game",
"=",
"copy",
".",
"deepcopy",
"(",
"game",
")",
"new_game",
".",
"make_move",
"(",
"*",
"move",
")",
"yield",
"move",
",",
"new_game",
"# don't copy the original game before making",
"# the last move",
"move",
"=",
"game",
".",
"valid_moves",
"[",
"-",
"1",
"]",
"game",
".",
"make_move",
"(",
"*",
"move",
")",
"yield",
"move",
",",
"game"
] | For each of a Game object's valid moves, yields
a tuple containing the move and the Game object
obtained by playing the move on the original Game
object. The original Game object will be modified.
:param Game game: the game to make moves on
:param callable player: a player to call on the
game before making any
moves, to determine the
order in which they get
made. The identity
player is the default. | [
"For",
"each",
"of",
"a",
"Game",
"object",
"s",
"valid",
"moves",
"yields",
"a",
"tuple",
"containing",
"the",
"move",
"and",
"the",
"Game",
"object",
"obtained",
"by",
"playing",
"the",
"move",
"on",
"the",
"original",
"Game",
"object",
".",
"The",
"original",
"Game",
"object",
"will",
"be",
"modified",
"."
] | python | train |
thiagopbueno/pyrddl | pyrddl/parser.py | https://github.com/thiagopbueno/pyrddl/blob/3bcfa850b1a7532c7744358f3c6b9e0f8ab978c9/pyrddl/parser.py#L273-L276 | def p_domain_block(self, p):
'''domain_block : DOMAIN IDENT LCURLY req_section domain_list RCURLY'''
d = Domain(p[2], p[4], p[5])
p[0] = ('domain', d) | [
"def",
"p_domain_block",
"(",
"self",
",",
"p",
")",
":",
"d",
"=",
"Domain",
"(",
"p",
"[",
"2",
"]",
",",
"p",
"[",
"4",
"]",
",",
"p",
"[",
"5",
"]",
")",
"p",
"[",
"0",
"]",
"=",
"(",
"'domain'",
",",
"d",
")"
] | domain_block : DOMAIN IDENT LCURLY req_section domain_list RCURLY | [
"domain_block",
":",
"DOMAIN",
"IDENT",
"LCURLY",
"req_section",
"domain_list",
"RCURLY"
] | python | train |
sp4ke/howto | howto/howto.py | https://github.com/sp4ke/howto/blob/2588144a587be5138d45ca9db0ce6ab125fa7d0c/howto/howto.py#L78-L84 | def cli_run():
"""docstring for argparse"""
parser = argparse.ArgumentParser(description='Stupidly simple code answers from StackOverflow')
parser.add_argument('query', help="What's the problem ?", type=str, nargs='+')
parser.add_argument('-t','--tags', help='semicolon separated tags -> python;lambda')
args = parser.parse_args()
main(args) | [
"def",
"cli_run",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'Stupidly simple code answers from StackOverflow'",
")",
"parser",
".",
"add_argument",
"(",
"'query'",
",",
"help",
"=",
"\"What's the problem ?\"",
",",
"type",
"=",
"str",
",",
"nargs",
"=",
"'+'",
")",
"parser",
".",
"add_argument",
"(",
"'-t'",
",",
"'--tags'",
",",
"help",
"=",
"'semicolon separated tags -> python;lambda'",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"main",
"(",
"args",
")"
] | docstring for argparse | [
"docstring",
"for",
"argparse"
] | python | test |
wummel/linkchecker | linkcheck/containers.py | https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/containers.py#L60-L64 | def pop (self, key):
"""Remove key from dict and return value."""
if key in self._keys:
self._keys.remove(key)
super(ListDict, self).pop(key) | [
"def",
"pop",
"(",
"self",
",",
"key",
")",
":",
"if",
"key",
"in",
"self",
".",
"_keys",
":",
"self",
".",
"_keys",
".",
"remove",
"(",
"key",
")",
"super",
"(",
"ListDict",
",",
"self",
")",
".",
"pop",
"(",
"key",
")"
] | Remove key from dict and return value. | [
"Remove",
"key",
"from",
"dict",
"and",
"return",
"value",
"."
] | python | train |
sighingnow/parsec.py | src/parsec/__init__.py | https://github.com/sighingnow/parsec.py/blob/ed50e1e259142757470b925f8d20dfe5ad223af0/src/parsec/__init__.py#L165-L174 | def choice(self, other):
'''(|) This combinator implements choice. The parser p | q first applies p.
If it succeeds, the value of p is returned.
If p fails **without consuming any input**, parser q is tried.
NOTICE: without backtrack.'''
@Parser
def choice_parser(text, index):
res = self(text, index)
return res if res.status or res.index != index else other(text, index)
return choice_parser | [
"def",
"choice",
"(",
"self",
",",
"other",
")",
":",
"@",
"Parser",
"def",
"choice_parser",
"(",
"text",
",",
"index",
")",
":",
"res",
"=",
"self",
"(",
"text",
",",
"index",
")",
"return",
"res",
"if",
"res",
".",
"status",
"or",
"res",
".",
"index",
"!=",
"index",
"else",
"other",
"(",
"text",
",",
"index",
")",
"return",
"choice_parser"
] | (|) This combinator implements choice. The parser p | q first applies p.
If it succeeds, the value of p is returned.
If p fails **without consuming any input**, parser q is tried.
NOTICE: without backtrack. | [
"(",
"|",
")",
"This",
"combinator",
"implements",
"choice",
".",
"The",
"parser",
"p",
"|",
"q",
"first",
"applies",
"p",
".",
"If",
"it",
"succeeds",
"the",
"value",
"of",
"p",
"is",
"returned",
".",
"If",
"p",
"fails",
"**",
"without",
"consuming",
"any",
"input",
"**",
"parser",
"q",
"is",
"tried",
".",
"NOTICE",
":",
"without",
"backtrack",
"."
] | python | train |
apache/incubator-mxnet | example/ssd/config/utils.py | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/config/utils.py#L78-L90 | def zip_namedtuple(nt_list):
""" accept list of namedtuple, return a dict of zipped fields """
if not nt_list:
return dict()
if not isinstance(nt_list, list):
nt_list = [nt_list]
for nt in nt_list:
assert type(nt) == type(nt_list[0])
ret = {k : [v] for k, v in nt_list[0]._asdict().items()}
for nt in nt_list[1:]:
for k, v in nt._asdict().items():
ret[k].append(v)
return ret | [
"def",
"zip_namedtuple",
"(",
"nt_list",
")",
":",
"if",
"not",
"nt_list",
":",
"return",
"dict",
"(",
")",
"if",
"not",
"isinstance",
"(",
"nt_list",
",",
"list",
")",
":",
"nt_list",
"=",
"[",
"nt_list",
"]",
"for",
"nt",
"in",
"nt_list",
":",
"assert",
"type",
"(",
"nt",
")",
"==",
"type",
"(",
"nt_list",
"[",
"0",
"]",
")",
"ret",
"=",
"{",
"k",
":",
"[",
"v",
"]",
"for",
"k",
",",
"v",
"in",
"nt_list",
"[",
"0",
"]",
".",
"_asdict",
"(",
")",
".",
"items",
"(",
")",
"}",
"for",
"nt",
"in",
"nt_list",
"[",
"1",
":",
"]",
":",
"for",
"k",
",",
"v",
"in",
"nt",
".",
"_asdict",
"(",
")",
".",
"items",
"(",
")",
":",
"ret",
"[",
"k",
"]",
".",
"append",
"(",
"v",
")",
"return",
"ret"
] | accept list of namedtuple, return a dict of zipped fields | [
"accept",
"list",
"of",
"namedtuple",
"return",
"a",
"dict",
"of",
"zipped",
"fields"
] | python | train |
terrycain/aioboto3 | aioboto3/s3/inject.py | https://github.com/terrycain/aioboto3/blob/0fd192175461f7bb192f3ed9a872591caf8474ac/aioboto3/s3/inject.py#L206-L219 | async def upload_file(self, Filename, Bucket, Key, ExtraArgs=None, Callback=None, Config=None):
"""Upload a file to an S3 object.
Usage::
import boto3
s3 = boto3.resource('s3')
s3.meta.client.upload_file('/tmp/hello.txt', 'mybucket', 'hello.txt')
Similar behavior as S3Transfer's upload_file() method,
except that parameters are capitalized.
"""
with open(Filename, 'rb') as open_file:
await upload_fileobj(self, open_file, Bucket, Key, ExtraArgs=ExtraArgs, Callback=Callback, Config=Config) | [
"async",
"def",
"upload_file",
"(",
"self",
",",
"Filename",
",",
"Bucket",
",",
"Key",
",",
"ExtraArgs",
"=",
"None",
",",
"Callback",
"=",
"None",
",",
"Config",
"=",
"None",
")",
":",
"with",
"open",
"(",
"Filename",
",",
"'rb'",
")",
"as",
"open_file",
":",
"await",
"upload_fileobj",
"(",
"self",
",",
"open_file",
",",
"Bucket",
",",
"Key",
",",
"ExtraArgs",
"=",
"ExtraArgs",
",",
"Callback",
"=",
"Callback",
",",
"Config",
"=",
"Config",
")"
] | Upload a file to an S3 object.
Usage::
import boto3
s3 = boto3.resource('s3')
s3.meta.client.upload_file('/tmp/hello.txt', 'mybucket', 'hello.txt')
Similar behavior as S3Transfer's upload_file() method,
except that parameters are capitalized. | [
"Upload",
"a",
"file",
"to",
"an",
"S3",
"object",
"."
] | python | train |
serge-sans-paille/pythran | setup.py | https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/setup.py#L75-L95 | def copy_pkg(self, pkg, src_only=False):
"Install boost deps from the third_party directory"
if getattr(self, 'no_' + pkg) is None:
print('Copying boost dependencies')
to_copy = pkg,
else:
return
src = os.path.join('third_party', *to_copy)
# copy to the build tree
if not src_only:
target = os.path.join(self.build_lib, 'pythran', *to_copy)
shutil.rmtree(target, True)
shutil.copytree(src, target)
# copy them to the source tree too, needed for sdist
target = os.path.join('pythran', *to_copy)
shutil.rmtree(target, True)
shutil.copytree(src, target) | [
"def",
"copy_pkg",
"(",
"self",
",",
"pkg",
",",
"src_only",
"=",
"False",
")",
":",
"if",
"getattr",
"(",
"self",
",",
"'no_'",
"+",
"pkg",
")",
"is",
"None",
":",
"print",
"(",
"'Copying boost dependencies'",
")",
"to_copy",
"=",
"pkg",
",",
"else",
":",
"return",
"src",
"=",
"os",
".",
"path",
".",
"join",
"(",
"'third_party'",
",",
"*",
"to_copy",
")",
"# copy to the build tree",
"if",
"not",
"src_only",
":",
"target",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"build_lib",
",",
"'pythran'",
",",
"*",
"to_copy",
")",
"shutil",
".",
"rmtree",
"(",
"target",
",",
"True",
")",
"shutil",
".",
"copytree",
"(",
"src",
",",
"target",
")",
"# copy them to the source tree too, needed for sdist",
"target",
"=",
"os",
".",
"path",
".",
"join",
"(",
"'pythran'",
",",
"*",
"to_copy",
")",
"shutil",
".",
"rmtree",
"(",
"target",
",",
"True",
")",
"shutil",
".",
"copytree",
"(",
"src",
",",
"target",
")"
] | Install boost deps from the third_party directory | [
"Install",
"boost",
"deps",
"from",
"the",
"third_party",
"directory"
] | python | train |
brocade/pynos | pynos/versions/ver_7/ver_7_1_0/yang/brocade_tunnels.py | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_tunnels.py#L476-L489 | def overlay_gateway_enable_statistics_vlan_action(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
enable = ET.SubElement(overlay_gateway, "enable")
statistics = ET.SubElement(enable, "statistics")
vlan_action = ET.SubElement(statistics, "vlan-action")
vlan_action.text = kwargs.pop('vlan_action')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"overlay_gateway_enable_statistics_vlan_action",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"overlay_gateway",
"=",
"ET",
".",
"SubElement",
"(",
"config",
",",
"\"overlay-gateway\"",
",",
"xmlns",
"=",
"\"urn:brocade.com:mgmt:brocade-tunnels\"",
")",
"name_key",
"=",
"ET",
".",
"SubElement",
"(",
"overlay_gateway",
",",
"\"name\"",
")",
"name_key",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'name'",
")",
"enable",
"=",
"ET",
".",
"SubElement",
"(",
"overlay_gateway",
",",
"\"enable\"",
")",
"statistics",
"=",
"ET",
".",
"SubElement",
"(",
"enable",
",",
"\"statistics\"",
")",
"vlan_action",
"=",
"ET",
".",
"SubElement",
"(",
"statistics",
",",
"\"vlan-action\"",
")",
"vlan_action",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'vlan_action'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] | Auto Generated Code | [
"Auto",
"Generated",
"Code"
] | python | train |
nschloe/orthopy | orthopy/disk/orth.py | https://github.com/nschloe/orthopy/blob/64713d0533b0af042810a7535fff411b8e0aea9e/orthopy/disk/orth.py#L9-L89 | def tree(X, n, symbolic=False):
"""Evaluates the entire tree of orthogonal polynomials on the unit disk.
The return value is a list of arrays, where `out[k]` hosts the `2*k+1`
values of the `k`th level of the tree
(0, 0)
(0, 1) (1, 1)
(0, 2) (1, 2) (2, 2)
... ... ...
"""
frac = sympy.Rational if symbolic else lambda x, y: x / y
sqrt = sympy.sqrt if symbolic else numpy.sqrt
pi = sympy.pi if symbolic else numpy.pi
mu = frac(1, 2)
p0 = 1 / sqrt(pi)
def alpha(n):
return numpy.array(
[
2
* sqrt(
frac(
(n + mu + frac(1, 2)) * (n + mu - frac(1, 2)),
(n - k) * (n + k + 2 * mu),
)
)
for k in range(n)
]
)
def beta(n):
return 2 * sqrt(
frac((n + mu - 1) * (n + mu + frac(1, 2)), (n + 2 * mu - 1) * n)
)
def gamma(n):
return numpy.array(
[
sqrt(
frac(
(n - 1 - k) * (n + mu + frac(1, 2)) * (n + k + 2 * mu - 1),
(n - k) * (n + mu - frac(3, 2)) * (n + k + 2 * mu),
)
)
for k in range(n - 1)
]
)
def delta(n):
return sqrt(
frac(
(n - 1)
* (n + 2 * mu - 2)
* (n + mu - frac(1, 2))
* (n + mu + frac(1, 2)),
n * (n + 2 * mu - 1) * (n + mu - 1) * (n + mu - 2),
)
)
out = [numpy.array([0 * X[0] + p0])]
one_min_x2 = 1 - X[0] ** 2
for L in range(1, n + 1):
out.append(
numpy.concatenate(
[
out[L - 1] * numpy.multiply.outer(alpha(L), X[0]),
[out[L - 1][L - 1] * beta(L) * X[1]],
]
)
)
if L > 1:
out[-1][: L - 1] -= (out[L - 2].T * gamma(L)).T
out[-1][-1] -= out[L - 2][L - 2] * delta(L) * one_min_x2
return out | [
"def",
"tree",
"(",
"X",
",",
"n",
",",
"symbolic",
"=",
"False",
")",
":",
"frac",
"=",
"sympy",
".",
"Rational",
"if",
"symbolic",
"else",
"lambda",
"x",
",",
"y",
":",
"x",
"/",
"y",
"sqrt",
"=",
"sympy",
".",
"sqrt",
"if",
"symbolic",
"else",
"numpy",
".",
"sqrt",
"pi",
"=",
"sympy",
".",
"pi",
"if",
"symbolic",
"else",
"numpy",
".",
"pi",
"mu",
"=",
"frac",
"(",
"1",
",",
"2",
")",
"p0",
"=",
"1",
"/",
"sqrt",
"(",
"pi",
")",
"def",
"alpha",
"(",
"n",
")",
":",
"return",
"numpy",
".",
"array",
"(",
"[",
"2",
"*",
"sqrt",
"(",
"frac",
"(",
"(",
"n",
"+",
"mu",
"+",
"frac",
"(",
"1",
",",
"2",
")",
")",
"*",
"(",
"n",
"+",
"mu",
"-",
"frac",
"(",
"1",
",",
"2",
")",
")",
",",
"(",
"n",
"-",
"k",
")",
"*",
"(",
"n",
"+",
"k",
"+",
"2",
"*",
"mu",
")",
",",
")",
")",
"for",
"k",
"in",
"range",
"(",
"n",
")",
"]",
")",
"def",
"beta",
"(",
"n",
")",
":",
"return",
"2",
"*",
"sqrt",
"(",
"frac",
"(",
"(",
"n",
"+",
"mu",
"-",
"1",
")",
"*",
"(",
"n",
"+",
"mu",
"+",
"frac",
"(",
"1",
",",
"2",
")",
")",
",",
"(",
"n",
"+",
"2",
"*",
"mu",
"-",
"1",
")",
"*",
"n",
")",
")",
"def",
"gamma",
"(",
"n",
")",
":",
"return",
"numpy",
".",
"array",
"(",
"[",
"sqrt",
"(",
"frac",
"(",
"(",
"n",
"-",
"1",
"-",
"k",
")",
"*",
"(",
"n",
"+",
"mu",
"+",
"frac",
"(",
"1",
",",
"2",
")",
")",
"*",
"(",
"n",
"+",
"k",
"+",
"2",
"*",
"mu",
"-",
"1",
")",
",",
"(",
"n",
"-",
"k",
")",
"*",
"(",
"n",
"+",
"mu",
"-",
"frac",
"(",
"3",
",",
"2",
")",
")",
"*",
"(",
"n",
"+",
"k",
"+",
"2",
"*",
"mu",
")",
",",
")",
")",
"for",
"k",
"in",
"range",
"(",
"n",
"-",
"1",
")",
"]",
")",
"def",
"delta",
"(",
"n",
")",
":",
"return",
"sqrt",
"(",
"frac",
"(",
"(",
"n",
"-",
"1",
")",
"*",
"(",
"n",
"+",
"2",
"*",
"mu",
"-",
"2",
")",
"*",
"(",
"n",
"+",
"mu",
"-",
"frac",
"(",
"1",
",",
"2",
")",
")",
"*",
"(",
"n",
"+",
"mu",
"+",
"frac",
"(",
"1",
",",
"2",
")",
")",
",",
"n",
"*",
"(",
"n",
"+",
"2",
"*",
"mu",
"-",
"1",
")",
"*",
"(",
"n",
"+",
"mu",
"-",
"1",
")",
"*",
"(",
"n",
"+",
"mu",
"-",
"2",
")",
",",
")",
")",
"out",
"=",
"[",
"numpy",
".",
"array",
"(",
"[",
"0",
"*",
"X",
"[",
"0",
"]",
"+",
"p0",
"]",
")",
"]",
"one_min_x2",
"=",
"1",
"-",
"X",
"[",
"0",
"]",
"**",
"2",
"for",
"L",
"in",
"range",
"(",
"1",
",",
"n",
"+",
"1",
")",
":",
"out",
".",
"append",
"(",
"numpy",
".",
"concatenate",
"(",
"[",
"out",
"[",
"L",
"-",
"1",
"]",
"*",
"numpy",
".",
"multiply",
".",
"outer",
"(",
"alpha",
"(",
"L",
")",
",",
"X",
"[",
"0",
"]",
")",
",",
"[",
"out",
"[",
"L",
"-",
"1",
"]",
"[",
"L",
"-",
"1",
"]",
"*",
"beta",
"(",
"L",
")",
"*",
"X",
"[",
"1",
"]",
"]",
",",
"]",
")",
")",
"if",
"L",
">",
"1",
":",
"out",
"[",
"-",
"1",
"]",
"[",
":",
"L",
"-",
"1",
"]",
"-=",
"(",
"out",
"[",
"L",
"-",
"2",
"]",
".",
"T",
"*",
"gamma",
"(",
"L",
")",
")",
".",
"T",
"out",
"[",
"-",
"1",
"]",
"[",
"-",
"1",
"]",
"-=",
"out",
"[",
"L",
"-",
"2",
"]",
"[",
"L",
"-",
"2",
"]",
"*",
"delta",
"(",
"L",
")",
"*",
"one_min_x2",
"return",
"out"
] | Evaluates the entire tree of orthogonal polynomials on the unit disk.
The return value is a list of arrays, where `out[k]` hosts the `2*k+1`
values of the `k`th level of the tree
(0, 0)
(0, 1) (1, 1)
(0, 2) (1, 2) (2, 2)
... ... ... | [
"Evaluates",
"the",
"entire",
"tree",
"of",
"orthogonal",
"polynomials",
"on",
"the",
"unit",
"disk",
"."
] | python | train |
aouyar/PyMunin | pymunin/__init__.py | https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/__init__.py#L768-L791 | def run(self):
"""Implements main entry point for plugin execution."""
if len(self._argv) > 1 and len(self._argv[1]) > 0:
oper = self._argv[1]
else:
oper = 'fetch'
if oper == 'fetch':
ret = self.fetch()
elif oper == 'config':
ret = self.config()
if ret and self._dirtyConfig:
ret = self.fetch()
elif oper == 'autoconf':
ret = self.autoconf()
if ret:
print "yes"
else:
print "no"
ret = True
elif oper == 'suggest':
ret = self.suggest()
else:
raise AttributeError("Invalid command argument: %s" % oper)
return ret | [
"def",
"run",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"_argv",
")",
">",
"1",
"and",
"len",
"(",
"self",
".",
"_argv",
"[",
"1",
"]",
")",
">",
"0",
":",
"oper",
"=",
"self",
".",
"_argv",
"[",
"1",
"]",
"else",
":",
"oper",
"=",
"'fetch'",
"if",
"oper",
"==",
"'fetch'",
":",
"ret",
"=",
"self",
".",
"fetch",
"(",
")",
"elif",
"oper",
"==",
"'config'",
":",
"ret",
"=",
"self",
".",
"config",
"(",
")",
"if",
"ret",
"and",
"self",
".",
"_dirtyConfig",
":",
"ret",
"=",
"self",
".",
"fetch",
"(",
")",
"elif",
"oper",
"==",
"'autoconf'",
":",
"ret",
"=",
"self",
".",
"autoconf",
"(",
")",
"if",
"ret",
":",
"print",
"\"yes\"",
"else",
":",
"print",
"\"no\"",
"ret",
"=",
"True",
"elif",
"oper",
"==",
"'suggest'",
":",
"ret",
"=",
"self",
".",
"suggest",
"(",
")",
"else",
":",
"raise",
"AttributeError",
"(",
"\"Invalid command argument: %s\"",
"%",
"oper",
")",
"return",
"ret"
] | Implements main entry point for plugin execution. | [
"Implements",
"main",
"entry",
"point",
"for",
"plugin",
"execution",
"."
] | python | train |
mrcagney/gtfstk | gtfstk/miscellany.py | https://github.com/mrcagney/gtfstk/blob/c91494e6fefc02523889655a0dc92d1c0eee8d03/gtfstk/miscellany.py#L808-L891 | def restrict_to_polygon(feed: "Feed", polygon: Polygon) -> "Feed":
"""
Build a new feed by restricting this one to only the trips
that have at least one stop intersecting the given Shapely polygon,
then restricting stops, routes, stop times, etc. to those
associated with that subset of trips.
Return the resulting feed.
Requires GeoPandas.
Assume the following feed attributes are not ``None``:
- ``feed.stop_times``
- ``feed.trips``
- ``feed.stops``
- ``feed.routes``
- Those used in :func:`.stops.get_stops_in_polygon`
"""
# Initialize the new feed as the old feed.
# Restrict its DataFrames below.
feed = feed.copy()
# Get IDs of stops within the polygon
stop_ids = feed.get_stops_in_polygon(polygon)["stop_id"]
# Get all trips that stop at at least one of those stops
st = feed.stop_times.copy()
trip_ids = st[st["stop_id"].isin(stop_ids)]["trip_id"]
feed.trips = feed.trips[feed.trips["trip_id"].isin(trip_ids)].copy()
# Get stop times for trips
feed.stop_times = st[st["trip_id"].isin(trip_ids)].copy()
# Get stops for trips
stop_ids = feed.stop_times["stop_id"]
feed.stops = feed.stops[feed.stops["stop_id"].isin(stop_ids)].copy()
# Get routes for trips
route_ids = feed.trips["route_id"]
feed.routes = feed.routes[feed.routes["route_id"].isin(route_ids)].copy()
# Get calendar for trips
service_ids = feed.trips["service_id"]
if feed.calendar is not None:
feed.calendar = feed.calendar[
feed.calendar["service_id"].isin(service_ids)
].copy()
# Get agency for trips
if "agency_id" in feed.routes.columns:
agency_ids = feed.routes["agency_id"]
if len(agency_ids):
feed.agency = feed.agency[
feed.agency["agency_id"].isin(agency_ids)
].copy()
# Now for the optional files.
# Get calendar dates for trips.
cd = feed.calendar_dates
if cd is not None:
feed.calendar_dates = cd[cd["service_id"].isin(service_ids)].copy()
# Get frequencies for trips
if feed.frequencies is not None:
feed.frequencies = feed.frequencies[
feed.frequencies["trip_id"].isin(trip_ids)
].copy()
# Get shapes for trips
if feed.shapes is not None:
shape_ids = feed.trips["shape_id"]
feed.shapes = feed.shapes[
feed.shapes["shape_id"].isin(shape_ids)
].copy()
# Get transfers for stops
if feed.transfers is not None:
t = feed.transfers
feed.transfers = t[
t["from_stop_id"].isin(stop_ids) | t["to_stop_id"].isin(stop_ids)
].copy()
return feed | [
"def",
"restrict_to_polygon",
"(",
"feed",
":",
"\"Feed\"",
",",
"polygon",
":",
"Polygon",
")",
"->",
"\"Feed\"",
":",
"# Initialize the new feed as the old feed.",
"# Restrict its DataFrames below.",
"feed",
"=",
"feed",
".",
"copy",
"(",
")",
"# Get IDs of stops within the polygon",
"stop_ids",
"=",
"feed",
".",
"get_stops_in_polygon",
"(",
"polygon",
")",
"[",
"\"stop_id\"",
"]",
"# Get all trips that stop at at least one of those stops",
"st",
"=",
"feed",
".",
"stop_times",
".",
"copy",
"(",
")",
"trip_ids",
"=",
"st",
"[",
"st",
"[",
"\"stop_id\"",
"]",
".",
"isin",
"(",
"stop_ids",
")",
"]",
"[",
"\"trip_id\"",
"]",
"feed",
".",
"trips",
"=",
"feed",
".",
"trips",
"[",
"feed",
".",
"trips",
"[",
"\"trip_id\"",
"]",
".",
"isin",
"(",
"trip_ids",
")",
"]",
".",
"copy",
"(",
")",
"# Get stop times for trips",
"feed",
".",
"stop_times",
"=",
"st",
"[",
"st",
"[",
"\"trip_id\"",
"]",
".",
"isin",
"(",
"trip_ids",
")",
"]",
".",
"copy",
"(",
")",
"# Get stops for trips",
"stop_ids",
"=",
"feed",
".",
"stop_times",
"[",
"\"stop_id\"",
"]",
"feed",
".",
"stops",
"=",
"feed",
".",
"stops",
"[",
"feed",
".",
"stops",
"[",
"\"stop_id\"",
"]",
".",
"isin",
"(",
"stop_ids",
")",
"]",
".",
"copy",
"(",
")",
"# Get routes for trips",
"route_ids",
"=",
"feed",
".",
"trips",
"[",
"\"route_id\"",
"]",
"feed",
".",
"routes",
"=",
"feed",
".",
"routes",
"[",
"feed",
".",
"routes",
"[",
"\"route_id\"",
"]",
".",
"isin",
"(",
"route_ids",
")",
"]",
".",
"copy",
"(",
")",
"# Get calendar for trips",
"service_ids",
"=",
"feed",
".",
"trips",
"[",
"\"service_id\"",
"]",
"if",
"feed",
".",
"calendar",
"is",
"not",
"None",
":",
"feed",
".",
"calendar",
"=",
"feed",
".",
"calendar",
"[",
"feed",
".",
"calendar",
"[",
"\"service_id\"",
"]",
".",
"isin",
"(",
"service_ids",
")",
"]",
".",
"copy",
"(",
")",
"# Get agency for trips",
"if",
"\"agency_id\"",
"in",
"feed",
".",
"routes",
".",
"columns",
":",
"agency_ids",
"=",
"feed",
".",
"routes",
"[",
"\"agency_id\"",
"]",
"if",
"len",
"(",
"agency_ids",
")",
":",
"feed",
".",
"agency",
"=",
"feed",
".",
"agency",
"[",
"feed",
".",
"agency",
"[",
"\"agency_id\"",
"]",
".",
"isin",
"(",
"agency_ids",
")",
"]",
".",
"copy",
"(",
")",
"# Now for the optional files.",
"# Get calendar dates for trips.",
"cd",
"=",
"feed",
".",
"calendar_dates",
"if",
"cd",
"is",
"not",
"None",
":",
"feed",
".",
"calendar_dates",
"=",
"cd",
"[",
"cd",
"[",
"\"service_id\"",
"]",
".",
"isin",
"(",
"service_ids",
")",
"]",
".",
"copy",
"(",
")",
"# Get frequencies for trips",
"if",
"feed",
".",
"frequencies",
"is",
"not",
"None",
":",
"feed",
".",
"frequencies",
"=",
"feed",
".",
"frequencies",
"[",
"feed",
".",
"frequencies",
"[",
"\"trip_id\"",
"]",
".",
"isin",
"(",
"trip_ids",
")",
"]",
".",
"copy",
"(",
")",
"# Get shapes for trips",
"if",
"feed",
".",
"shapes",
"is",
"not",
"None",
":",
"shape_ids",
"=",
"feed",
".",
"trips",
"[",
"\"shape_id\"",
"]",
"feed",
".",
"shapes",
"=",
"feed",
".",
"shapes",
"[",
"feed",
".",
"shapes",
"[",
"\"shape_id\"",
"]",
".",
"isin",
"(",
"shape_ids",
")",
"]",
".",
"copy",
"(",
")",
"# Get transfers for stops",
"if",
"feed",
".",
"transfers",
"is",
"not",
"None",
":",
"t",
"=",
"feed",
".",
"transfers",
"feed",
".",
"transfers",
"=",
"t",
"[",
"t",
"[",
"\"from_stop_id\"",
"]",
".",
"isin",
"(",
"stop_ids",
")",
"|",
"t",
"[",
"\"to_stop_id\"",
"]",
".",
"isin",
"(",
"stop_ids",
")",
"]",
".",
"copy",
"(",
")",
"return",
"feed"
] | Build a new feed by restricting this one to only the trips
that have at least one stop intersecting the given Shapely polygon,
then restricting stops, routes, stop times, etc. to those
associated with that subset of trips.
Return the resulting feed.
Requires GeoPandas.
Assume the following feed attributes are not ``None``:
- ``feed.stop_times``
- ``feed.trips``
- ``feed.stops``
- ``feed.routes``
- Those used in :func:`.stops.get_stops_in_polygon` | [
"Build",
"a",
"new",
"feed",
"by",
"restricting",
"this",
"one",
"to",
"only",
"the",
"trips",
"that",
"have",
"at",
"least",
"one",
"stop",
"intersecting",
"the",
"given",
"Shapely",
"polygon",
"then",
"restricting",
"stops",
"routes",
"stop",
"times",
"etc",
".",
"to",
"those",
"associated",
"with",
"that",
"subset",
"of",
"trips",
".",
"Return",
"the",
"resulting",
"feed",
"."
] | python | train |
Erotemic/utool | utool/Printable.py | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/Printable.py#L96-L118 | def printableType(val, name=None, parent=None):
"""
Tries to make a nice type string for a value.
Can also pass in a Printable parent object
"""
import numpy as np
if parent is not None and hasattr(parent, 'customPrintableType'):
# Hack for non - trivial preference types
_typestr = parent.customPrintableType(name)
if _typestr is not None:
return _typestr
if isinstance(val, np.ndarray):
info = npArrInfo(val)
_typestr = info.dtypestr
elif isinstance(val, object):
_typestr = val.__class__.__name__
else:
_typestr = str(type(val))
_typestr = _typestr.replace('type', '')
_typestr = re.sub('[\'><]', '', _typestr)
_typestr = re.sub(' *', ' ', _typestr)
_typestr = _typestr.strip()
return _typestr | [
"def",
"printableType",
"(",
"val",
",",
"name",
"=",
"None",
",",
"parent",
"=",
"None",
")",
":",
"import",
"numpy",
"as",
"np",
"if",
"parent",
"is",
"not",
"None",
"and",
"hasattr",
"(",
"parent",
",",
"'customPrintableType'",
")",
":",
"# Hack for non - trivial preference types",
"_typestr",
"=",
"parent",
".",
"customPrintableType",
"(",
"name",
")",
"if",
"_typestr",
"is",
"not",
"None",
":",
"return",
"_typestr",
"if",
"isinstance",
"(",
"val",
",",
"np",
".",
"ndarray",
")",
":",
"info",
"=",
"npArrInfo",
"(",
"val",
")",
"_typestr",
"=",
"info",
".",
"dtypestr",
"elif",
"isinstance",
"(",
"val",
",",
"object",
")",
":",
"_typestr",
"=",
"val",
".",
"__class__",
".",
"__name__",
"else",
":",
"_typestr",
"=",
"str",
"(",
"type",
"(",
"val",
")",
")",
"_typestr",
"=",
"_typestr",
".",
"replace",
"(",
"'type'",
",",
"''",
")",
"_typestr",
"=",
"re",
".",
"sub",
"(",
"'[\\'><]'",
",",
"''",
",",
"_typestr",
")",
"_typestr",
"=",
"re",
".",
"sub",
"(",
"' *'",
",",
"' '",
",",
"_typestr",
")",
"_typestr",
"=",
"_typestr",
".",
"strip",
"(",
")",
"return",
"_typestr"
] | Tries to make a nice type string for a value.
Can also pass in a Printable parent object | [
"Tries",
"to",
"make",
"a",
"nice",
"type",
"string",
"for",
"a",
"value",
".",
"Can",
"also",
"pass",
"in",
"a",
"Printable",
"parent",
"object"
] | python | train |
pyopenapi/pyswagger | pyswagger/scanner/v1_2/validate.py | https://github.com/pyopenapi/pyswagger/blob/333c4ca08e758cd2194943d9904a3eda3fe43977/pyswagger/scanner/v1_2/validate.py#L115-L122 | def _validate_granttype(self, path, obj, _):
""" make sure either implicit or authorization_code is defined """
errs = []
if not obj.implicit and not obj.authorization_code:
errs.append('Either implicit or authorization_code should be defined.')
return path, obj.__class__.__name__, errs | [
"def",
"_validate_granttype",
"(",
"self",
",",
"path",
",",
"obj",
",",
"_",
")",
":",
"errs",
"=",
"[",
"]",
"if",
"not",
"obj",
".",
"implicit",
"and",
"not",
"obj",
".",
"authorization_code",
":",
"errs",
".",
"append",
"(",
"'Either implicit or authorization_code should be defined.'",
")",
"return",
"path",
",",
"obj",
".",
"__class__",
".",
"__name__",
",",
"errs"
] | make sure either implicit or authorization_code is defined | [
"make",
"sure",
"either",
"implicit",
"or",
"authorization_code",
"is",
"defined"
] | python | train |
IAMconsortium/pyam | pyam/core.py | https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/core.py#L749-L801 | def aggregate_region(self, variable, region='World', subregions=None,
components=None, append=False):
"""Compute the aggregate of timeseries over a number of regions
including variable components only defined at the `region` level
Parameters
----------
variable: str
variable for which the aggregate should be computed
region: str, default 'World'
dimension
subregions: list of str
list of subregions, defaults to all regions other than `region`
components: list of str
list of variables, defaults to all sub-categories of `variable`
included in `region` but not in any of `subregions`
append: bool, default False
append the aggregate timeseries to `data` and return None,
else return aggregate timeseries
"""
# default subregions to all regions other than `region`
if subregions is None:
rows = self._apply_filters(variable=variable)
subregions = set(self.data[rows].region) - set([region])
if not len(subregions):
msg = 'cannot aggregate variable `{}` to `{}` because it does not'\
' exist in any subregion'
logger().info(msg.format(variable, region))
return
# compute aggregate over all subregions
subregion_df = self.filter(region=subregions)
cols = ['region', 'variable']
_data = _aggregate(subregion_df.filter(variable=variable).data, cols)
# add components at the `region` level, defaults to all variables one
# level below `variable` that are only present in `region`
region_df = self.filter(region=region)
components = components or (
set(region_df._variable_components(variable)).difference(
subregion_df._variable_components(variable)))
if len(components):
rows = region_df._apply_filters(variable=components)
_data = _data.add(_aggregate(region_df.data[rows], cols),
fill_value=0)
if append is True:
self.append(_data, region=region, variable=variable, inplace=True)
else:
return _data | [
"def",
"aggregate_region",
"(",
"self",
",",
"variable",
",",
"region",
"=",
"'World'",
",",
"subregions",
"=",
"None",
",",
"components",
"=",
"None",
",",
"append",
"=",
"False",
")",
":",
"# default subregions to all regions other than `region`",
"if",
"subregions",
"is",
"None",
":",
"rows",
"=",
"self",
".",
"_apply_filters",
"(",
"variable",
"=",
"variable",
")",
"subregions",
"=",
"set",
"(",
"self",
".",
"data",
"[",
"rows",
"]",
".",
"region",
")",
"-",
"set",
"(",
"[",
"region",
"]",
")",
"if",
"not",
"len",
"(",
"subregions",
")",
":",
"msg",
"=",
"'cannot aggregate variable `{}` to `{}` because it does not'",
"' exist in any subregion'",
"logger",
"(",
")",
".",
"info",
"(",
"msg",
".",
"format",
"(",
"variable",
",",
"region",
")",
")",
"return",
"# compute aggregate over all subregions",
"subregion_df",
"=",
"self",
".",
"filter",
"(",
"region",
"=",
"subregions",
")",
"cols",
"=",
"[",
"'region'",
",",
"'variable'",
"]",
"_data",
"=",
"_aggregate",
"(",
"subregion_df",
".",
"filter",
"(",
"variable",
"=",
"variable",
")",
".",
"data",
",",
"cols",
")",
"# add components at the `region` level, defaults to all variables one",
"# level below `variable` that are only present in `region`",
"region_df",
"=",
"self",
".",
"filter",
"(",
"region",
"=",
"region",
")",
"components",
"=",
"components",
"or",
"(",
"set",
"(",
"region_df",
".",
"_variable_components",
"(",
"variable",
")",
")",
".",
"difference",
"(",
"subregion_df",
".",
"_variable_components",
"(",
"variable",
")",
")",
")",
"if",
"len",
"(",
"components",
")",
":",
"rows",
"=",
"region_df",
".",
"_apply_filters",
"(",
"variable",
"=",
"components",
")",
"_data",
"=",
"_data",
".",
"add",
"(",
"_aggregate",
"(",
"region_df",
".",
"data",
"[",
"rows",
"]",
",",
"cols",
")",
",",
"fill_value",
"=",
"0",
")",
"if",
"append",
"is",
"True",
":",
"self",
".",
"append",
"(",
"_data",
",",
"region",
"=",
"region",
",",
"variable",
"=",
"variable",
",",
"inplace",
"=",
"True",
")",
"else",
":",
"return",
"_data"
] | Compute the aggregate of timeseries over a number of regions
including variable components only defined at the `region` level
Parameters
----------
variable: str
variable for which the aggregate should be computed
region: str, default 'World'
dimension
subregions: list of str
list of subregions, defaults to all regions other than `region`
components: list of str
list of variables, defaults to all sub-categories of `variable`
included in `region` but not in any of `subregions`
append: bool, default False
append the aggregate timeseries to `data` and return None,
else return aggregate timeseries | [
"Compute",
"the",
"aggregate",
"of",
"timeseries",
"over",
"a",
"number",
"of",
"regions",
"including",
"variable",
"components",
"only",
"defined",
"at",
"the",
"region",
"level"
] | python | train |
awslabs/aws-sam-cli | samcli/local/lambdafn/zip.py | https://github.com/awslabs/aws-sam-cli/blob/c05af5e7378c6f05f7d82ad3f0bca17204177db6/samcli/local/lambdafn/zip.py#L94-L130 | def unzip_from_uri(uri, layer_zip_path, unzip_output_dir, progressbar_label):
"""
Download the LayerVersion Zip to the Layer Pkg Cache
Parameters
----------
uri str
Uri to download from
layer_zip_path str
Path to where the content from the uri should be downloaded to
unzip_output_dir str
Path to unzip the zip to
progressbar_label str
Label to use in the Progressbar
"""
try:
get_request = requests.get(uri, stream=True, verify=os.environ.get('AWS_CA_BUNDLE', True))
with open(layer_zip_path, 'wb') as local_layer_file:
file_length = int(get_request.headers['Content-length'])
with progressbar(file_length, progressbar_label) as p_bar:
# Set the chunk size to None. Since we are streaming the request, None will allow the data to be
# read as it arrives in whatever size the chunks are received.
for data in get_request.iter_content(chunk_size=None):
local_layer_file.write(data)
p_bar.update(len(data))
# Forcefully set the permissions to 700 on files and directories. This is to ensure the owner
# of the files is the only one that can read, write, or execute the files.
unzip(layer_zip_path, unzip_output_dir, permission=0o700)
finally:
# Remove the downloaded zip file
path_to_layer = Path(layer_zip_path)
if path_to_layer.exists():
path_to_layer.unlink() | [
"def",
"unzip_from_uri",
"(",
"uri",
",",
"layer_zip_path",
",",
"unzip_output_dir",
",",
"progressbar_label",
")",
":",
"try",
":",
"get_request",
"=",
"requests",
".",
"get",
"(",
"uri",
",",
"stream",
"=",
"True",
",",
"verify",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'AWS_CA_BUNDLE'",
",",
"True",
")",
")",
"with",
"open",
"(",
"layer_zip_path",
",",
"'wb'",
")",
"as",
"local_layer_file",
":",
"file_length",
"=",
"int",
"(",
"get_request",
".",
"headers",
"[",
"'Content-length'",
"]",
")",
"with",
"progressbar",
"(",
"file_length",
",",
"progressbar_label",
")",
"as",
"p_bar",
":",
"# Set the chunk size to None. Since we are streaming the request, None will allow the data to be",
"# read as it arrives in whatever size the chunks are received.",
"for",
"data",
"in",
"get_request",
".",
"iter_content",
"(",
"chunk_size",
"=",
"None",
")",
":",
"local_layer_file",
".",
"write",
"(",
"data",
")",
"p_bar",
".",
"update",
"(",
"len",
"(",
"data",
")",
")",
"# Forcefully set the permissions to 700 on files and directories. This is to ensure the owner",
"# of the files is the only one that can read, write, or execute the files.",
"unzip",
"(",
"layer_zip_path",
",",
"unzip_output_dir",
",",
"permission",
"=",
"0o700",
")",
"finally",
":",
"# Remove the downloaded zip file",
"path_to_layer",
"=",
"Path",
"(",
"layer_zip_path",
")",
"if",
"path_to_layer",
".",
"exists",
"(",
")",
":",
"path_to_layer",
".",
"unlink",
"(",
")"
] | Download the LayerVersion Zip to the Layer Pkg Cache
Parameters
----------
uri str
Uri to download from
layer_zip_path str
Path to where the content from the uri should be downloaded to
unzip_output_dir str
Path to unzip the zip to
progressbar_label str
Label to use in the Progressbar | [
"Download",
"the",
"LayerVersion",
"Zip",
"to",
"the",
"Layer",
"Pkg",
"Cache"
] | python | train |
d0c-s4vage/pfp | pfp/bitwrap.py | https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/bitwrap.py#L70-L80 | def is_eof(self):
"""Return if the stream has reached EOF or not
without discarding any unflushed bits
:returns: True/False
"""
pos = self._stream.tell()
byte = self._stream.read(1)
self._stream.seek(pos, 0)
return utils.binary(byte) == utils.binary("") | [
"def",
"is_eof",
"(",
"self",
")",
":",
"pos",
"=",
"self",
".",
"_stream",
".",
"tell",
"(",
")",
"byte",
"=",
"self",
".",
"_stream",
".",
"read",
"(",
"1",
")",
"self",
".",
"_stream",
".",
"seek",
"(",
"pos",
",",
"0",
")",
"return",
"utils",
".",
"binary",
"(",
"byte",
")",
"==",
"utils",
".",
"binary",
"(",
"\"\"",
")"
] | Return if the stream has reached EOF or not
without discarding any unflushed bits
:returns: True/False | [
"Return",
"if",
"the",
"stream",
"has",
"reached",
"EOF",
"or",
"not",
"without",
"discarding",
"any",
"unflushed",
"bits"
] | python | train |
GNS3/gns3-server | gns3server/controller/__init__.py | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/controller/__init__.py#L580-L586 | def _project_auto_open(self):
"""
Auto open the project with auto open enable
"""
for project in self._projects.values():
if project.auto_open:
yield from project.open() | [
"def",
"_project_auto_open",
"(",
"self",
")",
":",
"for",
"project",
"in",
"self",
".",
"_projects",
".",
"values",
"(",
")",
":",
"if",
"project",
".",
"auto_open",
":",
"yield",
"from",
"project",
".",
"open",
"(",
")"
] | Auto open the project with auto open enable | [
"Auto",
"open",
"the",
"project",
"with",
"auto",
"open",
"enable"
] | python | train |
DLR-RM/RAFCON | source/rafcon/core/states/container_state.py | https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/states/container_state.py#L1047-L1070 | def change_state_type(self, state, new_state_class):
""" Changes the type of the state to another type
:param state: the state to be changed
:param new_state_class: the new type of the state
:return: the new state having the new state type
:rtype: :py:class:`rafcon.core.states.state.State`
:raises exceptions.ValueError: if the state does not exist in the container state
"""
from rafcon.gui.helpers.state import create_new_state_from_state_with_type
state_id = state.state_id
if state_id not in self.states:
raise ValueError("State '{0}' with id '{1}' does not exist".format(state.name, state_id))
new_state = create_new_state_from_state_with_type(state, new_state_class)
new_state.parent = self
assert new_state.state_id == state_id
self.states[state_id] = new_state
return new_state | [
"def",
"change_state_type",
"(",
"self",
",",
"state",
",",
"new_state_class",
")",
":",
"from",
"rafcon",
".",
"gui",
".",
"helpers",
".",
"state",
"import",
"create_new_state_from_state_with_type",
"state_id",
"=",
"state",
".",
"state_id",
"if",
"state_id",
"not",
"in",
"self",
".",
"states",
":",
"raise",
"ValueError",
"(",
"\"State '{0}' with id '{1}' does not exist\"",
".",
"format",
"(",
"state",
".",
"name",
",",
"state_id",
")",
")",
"new_state",
"=",
"create_new_state_from_state_with_type",
"(",
"state",
",",
"new_state_class",
")",
"new_state",
".",
"parent",
"=",
"self",
"assert",
"new_state",
".",
"state_id",
"==",
"state_id",
"self",
".",
"states",
"[",
"state_id",
"]",
"=",
"new_state",
"return",
"new_state"
] | Changes the type of the state to another type
:param state: the state to be changed
:param new_state_class: the new type of the state
:return: the new state having the new state type
:rtype: :py:class:`rafcon.core.states.state.State`
:raises exceptions.ValueError: if the state does not exist in the container state | [
"Changes",
"the",
"type",
"of",
"the",
"state",
"to",
"another",
"type"
] | python | train |
tdryer/hangups | hangups/conversation.py | https://github.com/tdryer/hangups/blob/85c0bf0a57698d077461283895707260f9dbf931/hangups/conversation.py#L874-L906 | async def _get_or_fetch_conversation(self, conv_id):
"""Get a cached conversation or fetch a missing conversation.
Args:
conv_id: string, conversation identifier
Raises:
NetworkError: If the request to fetch the conversation fails.
Returns:
:class:`.Conversation` with matching ID.
"""
conv = self._conv_dict.get(conv_id, None)
if conv is None:
logger.info('Fetching unknown conversation %s', conv_id)
res = await self._client.get_conversation(
hangouts_pb2.GetConversationRequest(
request_header=self._client.get_request_header(),
conversation_spec=hangouts_pb2.ConversationSpec(
conversation_id=hangouts_pb2.ConversationId(
id=conv_id
)
), include_event=False
)
)
conv_state = res.conversation_state
event_cont_token = None
if conv_state.HasField('event_continuation_token'):
event_cont_token = conv_state.event_continuation_token
return self._add_conversation(conv_state.conversation,
event_cont_token=event_cont_token)
else:
return conv | [
"async",
"def",
"_get_or_fetch_conversation",
"(",
"self",
",",
"conv_id",
")",
":",
"conv",
"=",
"self",
".",
"_conv_dict",
".",
"get",
"(",
"conv_id",
",",
"None",
")",
"if",
"conv",
"is",
"None",
":",
"logger",
".",
"info",
"(",
"'Fetching unknown conversation %s'",
",",
"conv_id",
")",
"res",
"=",
"await",
"self",
".",
"_client",
".",
"get_conversation",
"(",
"hangouts_pb2",
".",
"GetConversationRequest",
"(",
"request_header",
"=",
"self",
".",
"_client",
".",
"get_request_header",
"(",
")",
",",
"conversation_spec",
"=",
"hangouts_pb2",
".",
"ConversationSpec",
"(",
"conversation_id",
"=",
"hangouts_pb2",
".",
"ConversationId",
"(",
"id",
"=",
"conv_id",
")",
")",
",",
"include_event",
"=",
"False",
")",
")",
"conv_state",
"=",
"res",
".",
"conversation_state",
"event_cont_token",
"=",
"None",
"if",
"conv_state",
".",
"HasField",
"(",
"'event_continuation_token'",
")",
":",
"event_cont_token",
"=",
"conv_state",
".",
"event_continuation_token",
"return",
"self",
".",
"_add_conversation",
"(",
"conv_state",
".",
"conversation",
",",
"event_cont_token",
"=",
"event_cont_token",
")",
"else",
":",
"return",
"conv"
] | Get a cached conversation or fetch a missing conversation.
Args:
conv_id: string, conversation identifier
Raises:
NetworkError: If the request to fetch the conversation fails.
Returns:
:class:`.Conversation` with matching ID. | [
"Get",
"a",
"cached",
"conversation",
"or",
"fetch",
"a",
"missing",
"conversation",
"."
] | python | valid |
openvax/mhcnames | mhcnames/class2.py | https://github.com/openvax/mhcnames/blob/71694b9d620db68ceee44da1b8422ff436f15bd3/mhcnames/class2.py#L21-L39 | def infer_alpha_chain(beta):
"""
Given a parsed beta chain of a class II MHC, infer the most frequent
corresponding alpha chain.
"""
if beta.gene.startswith("DRB"):
return AlleleName(species="HLA", gene="DRA1", allele_family="01", allele_code="01")
elif beta.gene.startswith("DPB"):
# Most common alpha chain for DP is DPA*01:03 but we really
# need to change this logic to use a lookup table of pairwise
# frequencies for inferring the alpha-beta pairing
return AlleleName(
species="HLA", gene="DPA1", allele_family="01", allele_code="03")
elif beta.gene.startswith("DQB"):
# Most common DQ alpha (according to wikipedia)
# DQA1*01:02
return AlleleName(
species="HLA", gene="DQA1", allele_family="01", allele_code="02")
return None | [
"def",
"infer_alpha_chain",
"(",
"beta",
")",
":",
"if",
"beta",
".",
"gene",
".",
"startswith",
"(",
"\"DRB\"",
")",
":",
"return",
"AlleleName",
"(",
"species",
"=",
"\"HLA\"",
",",
"gene",
"=",
"\"DRA1\"",
",",
"allele_family",
"=",
"\"01\"",
",",
"allele_code",
"=",
"\"01\"",
")",
"elif",
"beta",
".",
"gene",
".",
"startswith",
"(",
"\"DPB\"",
")",
":",
"# Most common alpha chain for DP is DPA*01:03 but we really",
"# need to change this logic to use a lookup table of pairwise",
"# frequencies for inferring the alpha-beta pairing",
"return",
"AlleleName",
"(",
"species",
"=",
"\"HLA\"",
",",
"gene",
"=",
"\"DPA1\"",
",",
"allele_family",
"=",
"\"01\"",
",",
"allele_code",
"=",
"\"03\"",
")",
"elif",
"beta",
".",
"gene",
".",
"startswith",
"(",
"\"DQB\"",
")",
":",
"# Most common DQ alpha (according to wikipedia)",
"# DQA1*01:02",
"return",
"AlleleName",
"(",
"species",
"=",
"\"HLA\"",
",",
"gene",
"=",
"\"DQA1\"",
",",
"allele_family",
"=",
"\"01\"",
",",
"allele_code",
"=",
"\"02\"",
")",
"return",
"None"
] | Given a parsed beta chain of a class II MHC, infer the most frequent
corresponding alpha chain. | [
"Given",
"a",
"parsed",
"beta",
"chain",
"of",
"a",
"class",
"II",
"MHC",
"infer",
"the",
"most",
"frequent",
"corresponding",
"alpha",
"chain",
"."
] | python | train |
allenai/allennlp | allennlp/common/util.py | https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/common/util.py#L177-L206 | def prepare_environment(params: Params):
"""
Sets random seeds for reproducible experiments. This may not work as expected
if you use this from within a python project in which you have already imported Pytorch.
If you use the scripts/run_model.py entry point to training models with this library,
your experiments should be reasonably reproducible. If you are using this from your own
project, you will want to call this function before importing Pytorch. Complete determinism
is very difficult to achieve with libraries doing optimized linear algebra due to massively
parallel execution, which is exacerbated by using GPUs.
Parameters
----------
params: Params object or dict, required.
A ``Params`` object or dict holding the json parameters.
"""
seed = params.pop_int("random_seed", 13370)
numpy_seed = params.pop_int("numpy_seed", 1337)
torch_seed = params.pop_int("pytorch_seed", 133)
if seed is not None:
random.seed(seed)
if numpy_seed is not None:
numpy.random.seed(numpy_seed)
if torch_seed is not None:
torch.manual_seed(torch_seed)
# Seed all GPUs with the same seed if available.
if torch.cuda.is_available():
torch.cuda.manual_seed_all(torch_seed)
log_pytorch_version_info() | [
"def",
"prepare_environment",
"(",
"params",
":",
"Params",
")",
":",
"seed",
"=",
"params",
".",
"pop_int",
"(",
"\"random_seed\"",
",",
"13370",
")",
"numpy_seed",
"=",
"params",
".",
"pop_int",
"(",
"\"numpy_seed\"",
",",
"1337",
")",
"torch_seed",
"=",
"params",
".",
"pop_int",
"(",
"\"pytorch_seed\"",
",",
"133",
")",
"if",
"seed",
"is",
"not",
"None",
":",
"random",
".",
"seed",
"(",
"seed",
")",
"if",
"numpy_seed",
"is",
"not",
"None",
":",
"numpy",
".",
"random",
".",
"seed",
"(",
"numpy_seed",
")",
"if",
"torch_seed",
"is",
"not",
"None",
":",
"torch",
".",
"manual_seed",
"(",
"torch_seed",
")",
"# Seed all GPUs with the same seed if available.",
"if",
"torch",
".",
"cuda",
".",
"is_available",
"(",
")",
":",
"torch",
".",
"cuda",
".",
"manual_seed_all",
"(",
"torch_seed",
")",
"log_pytorch_version_info",
"(",
")"
] | Sets random seeds for reproducible experiments. This may not work as expected
if you use this from within a python project in which you have already imported Pytorch.
If you use the scripts/run_model.py entry point to training models with this library,
your experiments should be reasonably reproducible. If you are using this from your own
project, you will want to call this function before importing Pytorch. Complete determinism
is very difficult to achieve with libraries doing optimized linear algebra due to massively
parallel execution, which is exacerbated by using GPUs.
Parameters
----------
params: Params object or dict, required.
A ``Params`` object or dict holding the json parameters. | [
"Sets",
"random",
"seeds",
"for",
"reproducible",
"experiments",
".",
"This",
"may",
"not",
"work",
"as",
"expected",
"if",
"you",
"use",
"this",
"from",
"within",
"a",
"python",
"project",
"in",
"which",
"you",
"have",
"already",
"imported",
"Pytorch",
".",
"If",
"you",
"use",
"the",
"scripts",
"/",
"run_model",
".",
"py",
"entry",
"point",
"to",
"training",
"models",
"with",
"this",
"library",
"your",
"experiments",
"should",
"be",
"reasonably",
"reproducible",
".",
"If",
"you",
"are",
"using",
"this",
"from",
"your",
"own",
"project",
"you",
"will",
"want",
"to",
"call",
"this",
"function",
"before",
"importing",
"Pytorch",
".",
"Complete",
"determinism",
"is",
"very",
"difficult",
"to",
"achieve",
"with",
"libraries",
"doing",
"optimized",
"linear",
"algebra",
"due",
"to",
"massively",
"parallel",
"execution",
"which",
"is",
"exacerbated",
"by",
"using",
"GPUs",
"."
] | python | train |
Qiskit/qiskit-terra | qiskit/compiler/transpiler.py | https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/compiler/transpiler.py#L153-L178 | def _transpile_circuit(circuit_config_tuple):
"""Select a PassManager and run a single circuit through it.
Args:
circuit_config_tuple (tuple):
circuit (QuantumCircuit): circuit to transpile
transpile_config (TranspileConfig): configuration dictating how to transpile
Returns:
QuantumCircuit: transpiled circuit
"""
circuit, transpile_config = circuit_config_tuple
# if the pass manager is not already selected, choose an appropriate one.
if transpile_config.pass_manager:
pass_manager = transpile_config.pass_manager
elif transpile_config.coupling_map:
pass_manager = default_pass_manager(transpile_config.basis_gates,
transpile_config.coupling_map,
transpile_config.initial_layout,
transpile_config.seed_transpiler)
else:
pass_manager = default_pass_manager_simulator(transpile_config.basis_gates)
return pass_manager.run(circuit) | [
"def",
"_transpile_circuit",
"(",
"circuit_config_tuple",
")",
":",
"circuit",
",",
"transpile_config",
"=",
"circuit_config_tuple",
"# if the pass manager is not already selected, choose an appropriate one.",
"if",
"transpile_config",
".",
"pass_manager",
":",
"pass_manager",
"=",
"transpile_config",
".",
"pass_manager",
"elif",
"transpile_config",
".",
"coupling_map",
":",
"pass_manager",
"=",
"default_pass_manager",
"(",
"transpile_config",
".",
"basis_gates",
",",
"transpile_config",
".",
"coupling_map",
",",
"transpile_config",
".",
"initial_layout",
",",
"transpile_config",
".",
"seed_transpiler",
")",
"else",
":",
"pass_manager",
"=",
"default_pass_manager_simulator",
"(",
"transpile_config",
".",
"basis_gates",
")",
"return",
"pass_manager",
".",
"run",
"(",
"circuit",
")"
] | Select a PassManager and run a single circuit through it.
Args:
circuit_config_tuple (tuple):
circuit (QuantumCircuit): circuit to transpile
transpile_config (TranspileConfig): configuration dictating how to transpile
Returns:
QuantumCircuit: transpiled circuit | [
"Select",
"a",
"PassManager",
"and",
"run",
"a",
"single",
"circuit",
"through",
"it",
"."
] | python | test |
AustralianSynchrotron/lightflow | lightflow/models/workflow.py | https://github.com/AustralianSynchrotron/lightflow/blob/dc53dbc1d961e20fb144273baca258060705c03e/lightflow/models/workflow.py#L230-L257 | def _queue_dag(self, name, *, data=None):
""" Add a new dag to the queue.
If the stop workflow flag is set, no new dag can be queued.
Args:
name (str): The name of the dag that should be queued.
data (MultiTaskData): The data that should be passed on to the new dag.
Raises:
DagNameUnknown: If the specified dag name does not exist
Returns:
str: The name of the queued dag.
"""
if self._stop_workflow:
return None
if name not in self._dags_blueprint:
raise DagNameUnknown()
new_dag = copy.deepcopy(self._dags_blueprint[name])
new_dag.workflow_name = self.name
self._dags_running[new_dag.name] = self._celery_app.send_task(
JobExecPath.Dag, args=(new_dag, self._workflow_id, data),
queue=new_dag.queue, routing_key=new_dag.queue)
return new_dag.name | [
"def",
"_queue_dag",
"(",
"self",
",",
"name",
",",
"*",
",",
"data",
"=",
"None",
")",
":",
"if",
"self",
".",
"_stop_workflow",
":",
"return",
"None",
"if",
"name",
"not",
"in",
"self",
".",
"_dags_blueprint",
":",
"raise",
"DagNameUnknown",
"(",
")",
"new_dag",
"=",
"copy",
".",
"deepcopy",
"(",
"self",
".",
"_dags_blueprint",
"[",
"name",
"]",
")",
"new_dag",
".",
"workflow_name",
"=",
"self",
".",
"name",
"self",
".",
"_dags_running",
"[",
"new_dag",
".",
"name",
"]",
"=",
"self",
".",
"_celery_app",
".",
"send_task",
"(",
"JobExecPath",
".",
"Dag",
",",
"args",
"=",
"(",
"new_dag",
",",
"self",
".",
"_workflow_id",
",",
"data",
")",
",",
"queue",
"=",
"new_dag",
".",
"queue",
",",
"routing_key",
"=",
"new_dag",
".",
"queue",
")",
"return",
"new_dag",
".",
"name"
] | Add a new dag to the queue.
If the stop workflow flag is set, no new dag can be queued.
Args:
name (str): The name of the dag that should be queued.
data (MultiTaskData): The data that should be passed on to the new dag.
Raises:
DagNameUnknown: If the specified dag name does not exist
Returns:
str: The name of the queued dag. | [
"Add",
"a",
"new",
"dag",
"to",
"the",
"queue",
"."
] | python | train |
rhayes777/PyAutoFit | autofit/tools/pipeline.py | https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/tools/pipeline.py#L74-L97 | def from_phase(self, phase_name):
"""
Returns the result of a previous phase by its name
Parameters
----------
phase_name: str
The name of a previous phase
Returns
-------
result: Result
The result of that phase
Raises
------
exc.PipelineException
If no phase with the expected result is found
"""
try:
return self.__result_dict[phase_name]
except KeyError:
raise exc.PipelineException("No previous phase named {} found in results ({})".format(phase_name, ", ".join(
self.__result_dict.keys()))) | [
"def",
"from_phase",
"(",
"self",
",",
"phase_name",
")",
":",
"try",
":",
"return",
"self",
".",
"__result_dict",
"[",
"phase_name",
"]",
"except",
"KeyError",
":",
"raise",
"exc",
".",
"PipelineException",
"(",
"\"No previous phase named {} found in results ({})\"",
".",
"format",
"(",
"phase_name",
",",
"\", \"",
".",
"join",
"(",
"self",
".",
"__result_dict",
".",
"keys",
"(",
")",
")",
")",
")"
] | Returns the result of a previous phase by its name
Parameters
----------
phase_name: str
The name of a previous phase
Returns
-------
result: Result
The result of that phase
Raises
------
exc.PipelineException
If no phase with the expected result is found | [
"Returns",
"the",
"result",
"of",
"a",
"previous",
"phase",
"by",
"its",
"name"
] | python | train |
drslump/pyshould | pyshould/dsl.py | https://github.com/drslump/pyshould/blob/7210859d4c84cfbaa64f91b30c2a541aea788ddf/pyshould/dsl.py#L51-L57 | def none_of(value, *args):
""" None of the items in value should match """
if len(args):
value = (value,) + args
return ExpectationNone(value) | [
"def",
"none_of",
"(",
"value",
",",
"*",
"args",
")",
":",
"if",
"len",
"(",
"args",
")",
":",
"value",
"=",
"(",
"value",
",",
")",
"+",
"args",
"return",
"ExpectationNone",
"(",
"value",
")"
] | None of the items in value should match | [
"None",
"of",
"the",
"items",
"in",
"value",
"should",
"match"
] | python | train |
ContinuumIO/flask-ldap-login | flask_ldap_login/__init__.py | https://github.com/ContinuumIO/flask-ldap-login/blob/09a08be45f861823cb08f95883ee1e092a618c37/flask_ldap_login/__init__.py#L146-L154 | def attrlist(self):
'Transform the KEY_MAP paramiter into an attrlist for ldap filters'
keymap = self.config.get('KEY_MAP')
if keymap:
# https://github.com/ContinuumIO/flask-ldap-login/issues/11
# https://continuumsupport.zendesk.com/agent/tickets/393
return [s.encode('utf-8') for s in keymap.values()]
else:
return None | [
"def",
"attrlist",
"(",
"self",
")",
":",
"keymap",
"=",
"self",
".",
"config",
".",
"get",
"(",
"'KEY_MAP'",
")",
"if",
"keymap",
":",
"# https://github.com/ContinuumIO/flask-ldap-login/issues/11",
"# https://continuumsupport.zendesk.com/agent/tickets/393",
"return",
"[",
"s",
".",
"encode",
"(",
"'utf-8'",
")",
"for",
"s",
"in",
"keymap",
".",
"values",
"(",
")",
"]",
"else",
":",
"return",
"None"
] | Transform the KEY_MAP paramiter into an attrlist for ldap filters | [
"Transform",
"the",
"KEY_MAP",
"paramiter",
"into",
"an",
"attrlist",
"for",
"ldap",
"filters"
] | python | train |
fermiPy/fermipy | fermipy/jobs/file_archive.py | https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/file_archive.py#L96-L119 | def latch_file_info(self, args):
"""Extract the file paths from a set of arguments
"""
self.file_dict.clear()
for key, val in self.file_args.items():
try:
file_path = args[key]
if file_path is None:
continue
# 'args' is special
if key[0:4] == 'args':
if isinstance(file_path, list):
tokens = file_path
elif isinstance(file_path, str):
tokens = file_path.split()
else:
raise TypeError(
"Args has type %s, expect list or str" % type(file_path))
for token in tokens:
self.file_dict[token.replace('.gz', '')] = val
else:
self.file_dict[file_path.replace('.gz', '')] = val
except KeyError:
pass | [
"def",
"latch_file_info",
"(",
"self",
",",
"args",
")",
":",
"self",
".",
"file_dict",
".",
"clear",
"(",
")",
"for",
"key",
",",
"val",
"in",
"self",
".",
"file_args",
".",
"items",
"(",
")",
":",
"try",
":",
"file_path",
"=",
"args",
"[",
"key",
"]",
"if",
"file_path",
"is",
"None",
":",
"continue",
"# 'args' is special",
"if",
"key",
"[",
"0",
":",
"4",
"]",
"==",
"'args'",
":",
"if",
"isinstance",
"(",
"file_path",
",",
"list",
")",
":",
"tokens",
"=",
"file_path",
"elif",
"isinstance",
"(",
"file_path",
",",
"str",
")",
":",
"tokens",
"=",
"file_path",
".",
"split",
"(",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"Args has type %s, expect list or str\"",
"%",
"type",
"(",
"file_path",
")",
")",
"for",
"token",
"in",
"tokens",
":",
"self",
".",
"file_dict",
"[",
"token",
".",
"replace",
"(",
"'.gz'",
",",
"''",
")",
"]",
"=",
"val",
"else",
":",
"self",
".",
"file_dict",
"[",
"file_path",
".",
"replace",
"(",
"'.gz'",
",",
"''",
")",
"]",
"=",
"val",
"except",
"KeyError",
":",
"pass"
] | Extract the file paths from a set of arguments | [
"Extract",
"the",
"file",
"paths",
"from",
"a",
"set",
"of",
"arguments"
] | python | train |
scopus-api/scopus | scopus/abstract_citations.py | https://github.com/scopus-api/scopus/blob/27ce02dd3095bfdab9d3e8475543d7c17767d1ab/scopus/abstract_citations.py#L28-L35 | def cc(self):
"""List of tuples of yearly number of citations
for specified years."""
_years = range(self._start, self._end+1)
try:
return list(zip(_years, [d.get('$') for d in self._citeInfoMatrix['cc']]))
except AttributeError: # No citations
return list(zip(_years, [0]*len(_years))) | [
"def",
"cc",
"(",
"self",
")",
":",
"_years",
"=",
"range",
"(",
"self",
".",
"_start",
",",
"self",
".",
"_end",
"+",
"1",
")",
"try",
":",
"return",
"list",
"(",
"zip",
"(",
"_years",
",",
"[",
"d",
".",
"get",
"(",
"'$'",
")",
"for",
"d",
"in",
"self",
".",
"_citeInfoMatrix",
"[",
"'cc'",
"]",
"]",
")",
")",
"except",
"AttributeError",
":",
"# No citations",
"return",
"list",
"(",
"zip",
"(",
"_years",
",",
"[",
"0",
"]",
"*",
"len",
"(",
"_years",
")",
")",
")"
] | List of tuples of yearly number of citations
for specified years. | [
"List",
"of",
"tuples",
"of",
"yearly",
"number",
"of",
"citations",
"for",
"specified",
"years",
"."
] | python | train |
PMEAL/OpenPNM | openpnm/core/Base.py | https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/core/Base.py#L702-L770 | def pores(self, labels='all', mode='or', asmask=False):
r"""
Returns pore indicies where given labels exist, according to the logic
specified by the ``mode`` argument.
Parameters
----------
labels : string or list of strings
The label(s) whose pores locations are requested. This argument
also accepts '*' for wildcard searches.
mode : string
Specifies how the query should be performed. The options are:
**'or', 'union', 'any'** : (default) Pores with *one or more* of
the given labels are returned.
**'and', 'intersection', 'all'** : Pores with *all* of the given
labels are returned.
**'xor', 'exclusive_or'** : Pores with *only one* of the given
labels are returned.
**'nor', 'none', 'not'** : Pores with *none* of the given labels
are returned.
**'nand'** : Pores with *not all* of the given labels are
returned.
**'xnor'** : Pores with *more than one* of the given labels are
returned.
asmask : boolean
If ``True`` then a boolean array of length Np is returned with
``True`` values indicating the pores that satisfy the query.
Returns
-------
A Numpy array containing pore indices filtered by the logic specified
in ``mode``.
See Also
--------
throats
Notes
-----
Technically, *nand* and *xnor* should also return pores with *none* of
the labels but these are not included. This makes the returned list
more useful.
To perform more complex or compound queries, you can opt to receive
the result a a boolean mask (``asmask=True``), then manipulate the
arrays manually.
Examples
--------
>>> import openpnm as op
>>> pn = op.network.Cubic(shape=[5, 5, 5])
>>> Ps = pn.pores(labels=['top', 'front'], mode='union')
>>> Ps[:5] # Look at first 5 pore indices
array([0, 1, 2, 3, 4])
>>> pn.pores(labels=['top', 'front'], mode='xnor')
array([ 4, 9, 14, 19, 24])
"""
ind = self._get_indices(element='pore', labels=labels, mode=mode)
if asmask:
ind = self.tomask(pores=ind)
return ind | [
"def",
"pores",
"(",
"self",
",",
"labels",
"=",
"'all'",
",",
"mode",
"=",
"'or'",
",",
"asmask",
"=",
"False",
")",
":",
"ind",
"=",
"self",
".",
"_get_indices",
"(",
"element",
"=",
"'pore'",
",",
"labels",
"=",
"labels",
",",
"mode",
"=",
"mode",
")",
"if",
"asmask",
":",
"ind",
"=",
"self",
".",
"tomask",
"(",
"pores",
"=",
"ind",
")",
"return",
"ind"
] | r"""
Returns pore indicies where given labels exist, according to the logic
specified by the ``mode`` argument.
Parameters
----------
labels : string or list of strings
The label(s) whose pores locations are requested. This argument
also accepts '*' for wildcard searches.
mode : string
Specifies how the query should be performed. The options are:
**'or', 'union', 'any'** : (default) Pores with *one or more* of
the given labels are returned.
**'and', 'intersection', 'all'** : Pores with *all* of the given
labels are returned.
**'xor', 'exclusive_or'** : Pores with *only one* of the given
labels are returned.
**'nor', 'none', 'not'** : Pores with *none* of the given labels
are returned.
**'nand'** : Pores with *not all* of the given labels are
returned.
**'xnor'** : Pores with *more than one* of the given labels are
returned.
asmask : boolean
If ``True`` then a boolean array of length Np is returned with
``True`` values indicating the pores that satisfy the query.
Returns
-------
A Numpy array containing pore indices filtered by the logic specified
in ``mode``.
See Also
--------
throats
Notes
-----
Technically, *nand* and *xnor* should also return pores with *none* of
the labels but these are not included. This makes the returned list
more useful.
To perform more complex or compound queries, you can opt to receive
the result a a boolean mask (``asmask=True``), then manipulate the
arrays manually.
Examples
--------
>>> import openpnm as op
>>> pn = op.network.Cubic(shape=[5, 5, 5])
>>> Ps = pn.pores(labels=['top', 'front'], mode='union')
>>> Ps[:5] # Look at first 5 pore indices
array([0, 1, 2, 3, 4])
>>> pn.pores(labels=['top', 'front'], mode='xnor')
array([ 4, 9, 14, 19, 24]) | [
"r",
"Returns",
"pore",
"indicies",
"where",
"given",
"labels",
"exist",
"according",
"to",
"the",
"logic",
"specified",
"by",
"the",
"mode",
"argument",
"."
] | python | train |
s-m-i-t-a/railroad | railroad/guard.py | https://github.com/s-m-i-t-a/railroad/blob/ddb4afa018b8523b5d8c3a86e55388d1ea0ab37c/railroad/guard.py#L36-L56 | def guard(params, guardian, error_class=GuardError, message=''):
'''
A guard function - check parameters
with guardian function on decorated function
:param tuple or string params: guarded function parameter/s
:param function guardian: verifying the conditions for the selected parameter
:param Exception error_class: raised class when guardian return false
:param string message: error message
'''
params = [params] if isinstance(params, string_types) else params
def guard_decorate(f):
@wraps(f)
def _guard_decorate(*args, **kwargs):
if guardian(**_params(f, args, kwargs, params)):
return f(*args, **kwargs)
else:
raise error_class(message)
return _guard_decorate
return guard_decorate | [
"def",
"guard",
"(",
"params",
",",
"guardian",
",",
"error_class",
"=",
"GuardError",
",",
"message",
"=",
"''",
")",
":",
"params",
"=",
"[",
"params",
"]",
"if",
"isinstance",
"(",
"params",
",",
"string_types",
")",
"else",
"params",
"def",
"guard_decorate",
"(",
"f",
")",
":",
"@",
"wraps",
"(",
"f",
")",
"def",
"_guard_decorate",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"guardian",
"(",
"*",
"*",
"_params",
"(",
"f",
",",
"args",
",",
"kwargs",
",",
"params",
")",
")",
":",
"return",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"raise",
"error_class",
"(",
"message",
")",
"return",
"_guard_decorate",
"return",
"guard_decorate"
] | A guard function - check parameters
with guardian function on decorated function
:param tuple or string params: guarded function parameter/s
:param function guardian: verifying the conditions for the selected parameter
:param Exception error_class: raised class when guardian return false
:param string message: error message | [
"A",
"guard",
"function",
"-",
"check",
"parameters",
"with",
"guardian",
"function",
"on",
"decorated",
"function"
] | python | train |
thomasdelaet/python-velbus | velbus/messages/write_module_address_and_serial_number.py | https://github.com/thomasdelaet/python-velbus/blob/af2f8af43f1a24bf854eff9f3126fd7b5c41b3dd/velbus/messages/write_module_address_and_serial_number.py#L31-L45 | def populate(self, priority, address, rtr, data):
"""
:return: None
"""
assert isinstance(data, bytes)
self.needs_firmware_priority(priority)
self.needs_no_rtr(rtr)
self.needs_data(data, 6)
self.set_attributes(priority, address, rtr)
self.module_type = data[0]
prefix = bytes([0, 0])
(self.current_serial,) = struct.unpack(
'>L', prefix + data[1] + data[2])
self.module_address = data[3]
(self.new_serial,) = struct.unpack('>L', prefix + data[4] + data[5]) | [
"def",
"populate",
"(",
"self",
",",
"priority",
",",
"address",
",",
"rtr",
",",
"data",
")",
":",
"assert",
"isinstance",
"(",
"data",
",",
"bytes",
")",
"self",
".",
"needs_firmware_priority",
"(",
"priority",
")",
"self",
".",
"needs_no_rtr",
"(",
"rtr",
")",
"self",
".",
"needs_data",
"(",
"data",
",",
"6",
")",
"self",
".",
"set_attributes",
"(",
"priority",
",",
"address",
",",
"rtr",
")",
"self",
".",
"module_type",
"=",
"data",
"[",
"0",
"]",
"prefix",
"=",
"bytes",
"(",
"[",
"0",
",",
"0",
"]",
")",
"(",
"self",
".",
"current_serial",
",",
")",
"=",
"struct",
".",
"unpack",
"(",
"'>L'",
",",
"prefix",
"+",
"data",
"[",
"1",
"]",
"+",
"data",
"[",
"2",
"]",
")",
"self",
".",
"module_address",
"=",
"data",
"[",
"3",
"]",
"(",
"self",
".",
"new_serial",
",",
")",
"=",
"struct",
".",
"unpack",
"(",
"'>L'",
",",
"prefix",
"+",
"data",
"[",
"4",
"]",
"+",
"data",
"[",
"5",
"]",
")"
] | :return: None | [
":",
"return",
":",
"None"
] | python | train |
mmp2/megaman | megaman/geometry/geometry.py | https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/geometry/geometry.py#L154-L182 | def compute_adjacency_matrix(self, copy=False, **kwargs):
"""
This function will compute the adjacency matrix.
In order to acquire the existing adjacency matrix use
self.adjacency_matrix as comptute_adjacency_matrix() will re-compute
the adjacency matrix.
Parameters
----------
copy : boolean, whether to return a copied version of the adjacency matrix
**kwargs : see distance.py docmuentation for arguments for each method.
Returns
-------
self.adjacency_matrix : sparse matrix (N_obs, N_obs)
Non explicit 0.0 values should be considered not connected.
"""
if self.X is None:
raise ValueError(distance_error_msg)
kwds = self.adjacency_kwds.copy()
kwds.update(kwargs)
self.adjacency_matrix = compute_adjacency_matrix(self.X,
self.adjacency_method,
**kwds)
if copy:
return self.adjacency_matrix.copy()
else:
return self.adjacency_matrix | [
"def",
"compute_adjacency_matrix",
"(",
"self",
",",
"copy",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"X",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"distance_error_msg",
")",
"kwds",
"=",
"self",
".",
"adjacency_kwds",
".",
"copy",
"(",
")",
"kwds",
".",
"update",
"(",
"kwargs",
")",
"self",
".",
"adjacency_matrix",
"=",
"compute_adjacency_matrix",
"(",
"self",
".",
"X",
",",
"self",
".",
"adjacency_method",
",",
"*",
"*",
"kwds",
")",
"if",
"copy",
":",
"return",
"self",
".",
"adjacency_matrix",
".",
"copy",
"(",
")",
"else",
":",
"return",
"self",
".",
"adjacency_matrix"
] | This function will compute the adjacency matrix.
In order to acquire the existing adjacency matrix use
self.adjacency_matrix as comptute_adjacency_matrix() will re-compute
the adjacency matrix.
Parameters
----------
copy : boolean, whether to return a copied version of the adjacency matrix
**kwargs : see distance.py docmuentation for arguments for each method.
Returns
-------
self.adjacency_matrix : sparse matrix (N_obs, N_obs)
Non explicit 0.0 values should be considered not connected. | [
"This",
"function",
"will",
"compute",
"the",
"adjacency",
"matrix",
".",
"In",
"order",
"to",
"acquire",
"the",
"existing",
"adjacency",
"matrix",
"use",
"self",
".",
"adjacency_matrix",
"as",
"comptute_adjacency_matrix",
"()",
"will",
"re",
"-",
"compute",
"the",
"adjacency",
"matrix",
"."
] | python | train |
hyperledger/sawtooth-core | validator/sawtooth_validator/execution/execution_context.py | https://github.com/hyperledger/sawtooth-core/blob/8cf473bc2207e51f02bd182d825158a57d72b098/validator/sawtooth_validator/execution/execution_context.py#L198-L212 | def get_all_if_set(self):
"""Return all the addresses and opaque values set in the context.
Useful in the squash method.
Returns:
(dict of str to bytes): The addresses and bytes that have
been set in the context.
"""
with self._lock:
results = {}
for add, fut in self._state.items():
if self._contains_and_set(add):
results[add] = fut.result()
return results | [
"def",
"get_all_if_set",
"(",
"self",
")",
":",
"with",
"self",
".",
"_lock",
":",
"results",
"=",
"{",
"}",
"for",
"add",
",",
"fut",
"in",
"self",
".",
"_state",
".",
"items",
"(",
")",
":",
"if",
"self",
".",
"_contains_and_set",
"(",
"add",
")",
":",
"results",
"[",
"add",
"]",
"=",
"fut",
".",
"result",
"(",
")",
"return",
"results"
] | Return all the addresses and opaque values set in the context.
Useful in the squash method.
Returns:
(dict of str to bytes): The addresses and bytes that have
been set in the context. | [
"Return",
"all",
"the",
"addresses",
"and",
"opaque",
"values",
"set",
"in",
"the",
"context",
".",
"Useful",
"in",
"the",
"squash",
"method",
"."
] | python | train |
materialsproject/pymatgen | pymatgen/io/vasp/inputs.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/vasp/inputs.py#L1081-L1116 | def automatic_gamma_density(structure, kppa):
"""
Returns an automatic Kpoint object based on a structure and a kpoint
density. Uses Gamma centered meshes always. For GW.
Algorithm:
Uses a simple approach scaling the number of divisions along each
reciprocal lattice vector proportional to its length.
Args:
structure:
Input structure
kppa:
Grid density
"""
latt = structure.lattice
lengths = latt.abc
ngrid = kppa / structure.num_sites
mult = (ngrid * lengths[0] * lengths[1] * lengths[2]) ** (1 / 3)
num_div = [int(round(mult / l)) for l in lengths]
# ensure that numDiv[i] > 0
num_div = [i if i > 0 else 1 for i in num_div]
# VASP documentation recommends to use even grids for n <= 8 and odd
# grids for n > 8.
num_div = [i + i % 2 if i <= 8 else i - i % 2 + 1 for i in num_div]
style = Kpoints.supported_modes.Gamma
comment = "pymatgen 4.7.6+ generated KPOINTS with grid density = " + \
"{} / atom".format(kppa)
num_kpts = 0
return Kpoints(comment, num_kpts, style, [num_div], [0, 0, 0]) | [
"def",
"automatic_gamma_density",
"(",
"structure",
",",
"kppa",
")",
":",
"latt",
"=",
"structure",
".",
"lattice",
"lengths",
"=",
"latt",
".",
"abc",
"ngrid",
"=",
"kppa",
"/",
"structure",
".",
"num_sites",
"mult",
"=",
"(",
"ngrid",
"*",
"lengths",
"[",
"0",
"]",
"*",
"lengths",
"[",
"1",
"]",
"*",
"lengths",
"[",
"2",
"]",
")",
"**",
"(",
"1",
"/",
"3",
")",
"num_div",
"=",
"[",
"int",
"(",
"round",
"(",
"mult",
"/",
"l",
")",
")",
"for",
"l",
"in",
"lengths",
"]",
"# ensure that numDiv[i] > 0",
"num_div",
"=",
"[",
"i",
"if",
"i",
">",
"0",
"else",
"1",
"for",
"i",
"in",
"num_div",
"]",
"# VASP documentation recommends to use even grids for n <= 8 and odd",
"# grids for n > 8.",
"num_div",
"=",
"[",
"i",
"+",
"i",
"%",
"2",
"if",
"i",
"<=",
"8",
"else",
"i",
"-",
"i",
"%",
"2",
"+",
"1",
"for",
"i",
"in",
"num_div",
"]",
"style",
"=",
"Kpoints",
".",
"supported_modes",
".",
"Gamma",
"comment",
"=",
"\"pymatgen 4.7.6+ generated KPOINTS with grid density = \"",
"+",
"\"{} / atom\"",
".",
"format",
"(",
"kppa",
")",
"num_kpts",
"=",
"0",
"return",
"Kpoints",
"(",
"comment",
",",
"num_kpts",
",",
"style",
",",
"[",
"num_div",
"]",
",",
"[",
"0",
",",
"0",
",",
"0",
"]",
")"
] | Returns an automatic Kpoint object based on a structure and a kpoint
density. Uses Gamma centered meshes always. For GW.
Algorithm:
Uses a simple approach scaling the number of divisions along each
reciprocal lattice vector proportional to its length.
Args:
structure:
Input structure
kppa:
Grid density | [
"Returns",
"an",
"automatic",
"Kpoint",
"object",
"based",
"on",
"a",
"structure",
"and",
"a",
"kpoint",
"density",
".",
"Uses",
"Gamma",
"centered",
"meshes",
"always",
".",
"For",
"GW",
"."
] | python | train |
QuantEcon/QuantEcon.py | quantecon/markov/core.py | https://github.com/QuantEcon/QuantEcon.py/blob/26a66c552f2a73967d7efb6e1f4b4c4985a12643/quantecon/markov/core.py#L441-L527 | def simulate_indices(self, ts_length, init=None, num_reps=None,
random_state=None):
"""
Simulate time series of state transitions, where state indices
are returned.
Parameters
----------
ts_length : scalar(int)
Length of each simulation.
init : int or array_like(int, ndim=1), optional
Initial state(s). If None, the initial state is randomly
drawn.
num_reps : scalar(int), optional(default=None)
Number of repetitions of simulation.
random_state : int or np.random.RandomState, optional
Random seed (integer) or np.random.RandomState instance to
set the initial state of the random number generator for
reproducibility. If None, a randomly initialized RandomState
is used.
Returns
-------
X : ndarray(ndim=1 or 2)
Array containing the state values of the sample path(s). See
the `simulate` method for more information.
"""
random_state = check_random_state(random_state)
dim = 1 # Dimension of the returned array: 1 or 2
msg_out_of_range = 'index {init} is out of the state space'
try:
k = len(init) # init is an array
dim = 2
init_states = np.asarray(init, dtype=int)
# Check init_states are in the state space
if (init_states >= self.n).any() or (init_states < -self.n).any():
idx = np.where(
(init_states >= self.n) + (init_states < -self.n)
)[0][0]
raise ValueError(msg_out_of_range.format(init=idx))
if num_reps is not None:
k *= num_reps
init_states = np.tile(init_states, num_reps)
except TypeError: # init is a scalar(int) or None
k = 1
if num_reps is not None:
dim = 2
k = num_reps
if init is None:
init_states = random_state.randint(self.n, size=k)
elif isinstance(init, numbers.Integral):
# Check init is in the state space
if init >= self.n or init < -self.n:
raise ValueError(msg_out_of_range.format(init=init))
init_states = np.ones(k, dtype=int) * init
else:
raise ValueError(
'init must be int, array_like of ints, or None'
)
# === set up array to store output === #
X = np.empty((k, ts_length), dtype=int)
# Random values, uniformly sampled from [0, 1)
random_values = random_state.random_sample(size=(k, ts_length-1))
# Generate sample paths and store in X
if not self.is_sparse: # Dense
_generate_sample_paths(
self.cdfs, init_states, random_values, out=X
)
else: # Sparse
_generate_sample_paths_sparse(
self.cdfs1d, self.P.indices, self.P.indptr, init_states,
random_values, out=X
)
if dim == 1:
return X[0]
else:
return X | [
"def",
"simulate_indices",
"(",
"self",
",",
"ts_length",
",",
"init",
"=",
"None",
",",
"num_reps",
"=",
"None",
",",
"random_state",
"=",
"None",
")",
":",
"random_state",
"=",
"check_random_state",
"(",
"random_state",
")",
"dim",
"=",
"1",
"# Dimension of the returned array: 1 or 2",
"msg_out_of_range",
"=",
"'index {init} is out of the state space'",
"try",
":",
"k",
"=",
"len",
"(",
"init",
")",
"# init is an array",
"dim",
"=",
"2",
"init_states",
"=",
"np",
".",
"asarray",
"(",
"init",
",",
"dtype",
"=",
"int",
")",
"# Check init_states are in the state space",
"if",
"(",
"init_states",
">=",
"self",
".",
"n",
")",
".",
"any",
"(",
")",
"or",
"(",
"init_states",
"<",
"-",
"self",
".",
"n",
")",
".",
"any",
"(",
")",
":",
"idx",
"=",
"np",
".",
"where",
"(",
"(",
"init_states",
">=",
"self",
".",
"n",
")",
"+",
"(",
"init_states",
"<",
"-",
"self",
".",
"n",
")",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"raise",
"ValueError",
"(",
"msg_out_of_range",
".",
"format",
"(",
"init",
"=",
"idx",
")",
")",
"if",
"num_reps",
"is",
"not",
"None",
":",
"k",
"*=",
"num_reps",
"init_states",
"=",
"np",
".",
"tile",
"(",
"init_states",
",",
"num_reps",
")",
"except",
"TypeError",
":",
"# init is a scalar(int) or None",
"k",
"=",
"1",
"if",
"num_reps",
"is",
"not",
"None",
":",
"dim",
"=",
"2",
"k",
"=",
"num_reps",
"if",
"init",
"is",
"None",
":",
"init_states",
"=",
"random_state",
".",
"randint",
"(",
"self",
".",
"n",
",",
"size",
"=",
"k",
")",
"elif",
"isinstance",
"(",
"init",
",",
"numbers",
".",
"Integral",
")",
":",
"# Check init is in the state space",
"if",
"init",
">=",
"self",
".",
"n",
"or",
"init",
"<",
"-",
"self",
".",
"n",
":",
"raise",
"ValueError",
"(",
"msg_out_of_range",
".",
"format",
"(",
"init",
"=",
"init",
")",
")",
"init_states",
"=",
"np",
".",
"ones",
"(",
"k",
",",
"dtype",
"=",
"int",
")",
"*",
"init",
"else",
":",
"raise",
"ValueError",
"(",
"'init must be int, array_like of ints, or None'",
")",
"# === set up array to store output === #",
"X",
"=",
"np",
".",
"empty",
"(",
"(",
"k",
",",
"ts_length",
")",
",",
"dtype",
"=",
"int",
")",
"# Random values, uniformly sampled from [0, 1)",
"random_values",
"=",
"random_state",
".",
"random_sample",
"(",
"size",
"=",
"(",
"k",
",",
"ts_length",
"-",
"1",
")",
")",
"# Generate sample paths and store in X",
"if",
"not",
"self",
".",
"is_sparse",
":",
"# Dense",
"_generate_sample_paths",
"(",
"self",
".",
"cdfs",
",",
"init_states",
",",
"random_values",
",",
"out",
"=",
"X",
")",
"else",
":",
"# Sparse",
"_generate_sample_paths_sparse",
"(",
"self",
".",
"cdfs1d",
",",
"self",
".",
"P",
".",
"indices",
",",
"self",
".",
"P",
".",
"indptr",
",",
"init_states",
",",
"random_values",
",",
"out",
"=",
"X",
")",
"if",
"dim",
"==",
"1",
":",
"return",
"X",
"[",
"0",
"]",
"else",
":",
"return",
"X"
] | Simulate time series of state transitions, where state indices
are returned.
Parameters
----------
ts_length : scalar(int)
Length of each simulation.
init : int or array_like(int, ndim=1), optional
Initial state(s). If None, the initial state is randomly
drawn.
num_reps : scalar(int), optional(default=None)
Number of repetitions of simulation.
random_state : int or np.random.RandomState, optional
Random seed (integer) or np.random.RandomState instance to
set the initial state of the random number generator for
reproducibility. If None, a randomly initialized RandomState
is used.
Returns
-------
X : ndarray(ndim=1 or 2)
Array containing the state values of the sample path(s). See
the `simulate` method for more information. | [
"Simulate",
"time",
"series",
"of",
"state",
"transitions",
"where",
"state",
"indices",
"are",
"returned",
"."
] | python | train |
tjcsl/cslbot | cslbot/helpers/handler.py | https://github.com/tjcsl/cslbot/blob/aebe07be47141f61d7c180706bddfb707f19b2b5/cslbot/helpers/handler.py#L186-L213 | def send(self, target, nick, msg, msgtype, ignore_length=False, filters=None):
"""Send a message.
Records the message in the log.
"""
if not isinstance(msg, str):
raise Exception("Trying to send a %s to irc, only strings allowed." % type(msg).__name__)
if filters is None:
filters = self.outputfilter[target]
for i in filters:
if target != self.config['core']['ctrlchan']:
msg = i(msg)
# Avoid spam from commands that produce excessive output.
if not ignore_length:
# Ignore everything after the first 800 chars.
msg = misc.truncate_msg(msg, 800)
# We can't send messages > 512 bytes to irc.
max_len = misc.get_max_length(target, msgtype)
msgs = self.build_split_msg(msg, max_len)
for i in msgs:
self.do_log(target, nick, i, msgtype)
if msgtype == 'action':
self.rate_limited_send('action', target, i)
else:
self.rate_limited_send('privmsg', target, i) | [
"def",
"send",
"(",
"self",
",",
"target",
",",
"nick",
",",
"msg",
",",
"msgtype",
",",
"ignore_length",
"=",
"False",
",",
"filters",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"msg",
",",
"str",
")",
":",
"raise",
"Exception",
"(",
"\"Trying to send a %s to irc, only strings allowed.\"",
"%",
"type",
"(",
"msg",
")",
".",
"__name__",
")",
"if",
"filters",
"is",
"None",
":",
"filters",
"=",
"self",
".",
"outputfilter",
"[",
"target",
"]",
"for",
"i",
"in",
"filters",
":",
"if",
"target",
"!=",
"self",
".",
"config",
"[",
"'core'",
"]",
"[",
"'ctrlchan'",
"]",
":",
"msg",
"=",
"i",
"(",
"msg",
")",
"# Avoid spam from commands that produce excessive output.",
"if",
"not",
"ignore_length",
":",
"# Ignore everything after the first 800 chars.",
"msg",
"=",
"misc",
".",
"truncate_msg",
"(",
"msg",
",",
"800",
")",
"# We can't send messages > 512 bytes to irc.",
"max_len",
"=",
"misc",
".",
"get_max_length",
"(",
"target",
",",
"msgtype",
")",
"msgs",
"=",
"self",
".",
"build_split_msg",
"(",
"msg",
",",
"max_len",
")",
"for",
"i",
"in",
"msgs",
":",
"self",
".",
"do_log",
"(",
"target",
",",
"nick",
",",
"i",
",",
"msgtype",
")",
"if",
"msgtype",
"==",
"'action'",
":",
"self",
".",
"rate_limited_send",
"(",
"'action'",
",",
"target",
",",
"i",
")",
"else",
":",
"self",
".",
"rate_limited_send",
"(",
"'privmsg'",
",",
"target",
",",
"i",
")"
] | Send a message.
Records the message in the log. | [
"Send",
"a",
"message",
"."
] | python | train |
mlperf/training | reinforcement/tensorflow/minigo/mask_flags.py | https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/reinforcement/tensorflow/minigo/mask_flags.py#L50-L64 | def parse_helpfull_output(help_output, regex=FLAG_HELP_RE_PY):
"""Parses the output of --helpfull.
Args:
help_output: str, the full output of --helpfull.
Returns:
A set of flags that are valid flags.
"""
valid_flags = set()
for _, no_prefix, flag_name in regex.findall(help_output):
valid_flags.add('--' + flag_name)
if no_prefix:
valid_flags.add('--no' + flag_name)
return valid_flags | [
"def",
"parse_helpfull_output",
"(",
"help_output",
",",
"regex",
"=",
"FLAG_HELP_RE_PY",
")",
":",
"valid_flags",
"=",
"set",
"(",
")",
"for",
"_",
",",
"no_prefix",
",",
"flag_name",
"in",
"regex",
".",
"findall",
"(",
"help_output",
")",
":",
"valid_flags",
".",
"add",
"(",
"'--'",
"+",
"flag_name",
")",
"if",
"no_prefix",
":",
"valid_flags",
".",
"add",
"(",
"'--no'",
"+",
"flag_name",
")",
"return",
"valid_flags"
] | Parses the output of --helpfull.
Args:
help_output: str, the full output of --helpfull.
Returns:
A set of flags that are valid flags. | [
"Parses",
"the",
"output",
"of",
"--",
"helpfull",
"."
] | python | train |
evhub/coconut | coconut/compiler/compiler.py | https://github.com/evhub/coconut/blob/ff97177344e7604e89a0a98a977a87ed2a56fc6d/coconut/compiler/compiler.py#L457-L469 | def adjust(self, ln):
"""Converts a parsing line number into an original line number."""
adj_ln = ln
need_unskipped = 0
for i in self.skips:
if i <= ln:
need_unskipped += 1
elif adj_ln + need_unskipped < i:
break
else:
need_unskipped -= i - adj_ln - 1
adj_ln = i
return adj_ln + need_unskipped | [
"def",
"adjust",
"(",
"self",
",",
"ln",
")",
":",
"adj_ln",
"=",
"ln",
"need_unskipped",
"=",
"0",
"for",
"i",
"in",
"self",
".",
"skips",
":",
"if",
"i",
"<=",
"ln",
":",
"need_unskipped",
"+=",
"1",
"elif",
"adj_ln",
"+",
"need_unskipped",
"<",
"i",
":",
"break",
"else",
":",
"need_unskipped",
"-=",
"i",
"-",
"adj_ln",
"-",
"1",
"adj_ln",
"=",
"i",
"return",
"adj_ln",
"+",
"need_unskipped"
] | Converts a parsing line number into an original line number. | [
"Converts",
"a",
"parsing",
"line",
"number",
"into",
"an",
"original",
"line",
"number",
"."
] | python | train |
quantopian/alphalens | alphalens/plotting.py | https://github.com/quantopian/alphalens/blob/d43eac871bb061e956df936794d3dd514da99e44/alphalens/plotting.py#L192-L245 | def plot_ic_ts(ic, ax=None):
"""
Plots Spearman Rank Information Coefficient and IC moving
average for a given factor.
Parameters
----------
ic : pd.DataFrame
DataFrame indexed by date, with IC for each forward return.
ax : matplotlib.Axes, optional
Axes upon which to plot.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
ic = ic.copy()
num_plots = len(ic.columns)
if ax is None:
f, ax = plt.subplots(num_plots, 1, figsize=(18, num_plots * 7))
ax = np.asarray([ax]).flatten()
ymin, ymax = (None, None)
for a, (period_num, ic) in zip(ax, ic.iteritems()):
ic.plot(alpha=0.7, ax=a, lw=0.7, color='steelblue')
ic.rolling(window=22).mean().plot(
ax=a,
color='forestgreen',
lw=2,
alpha=0.8
)
a.set(ylabel='IC', xlabel="")
a.set_title(
"{} Period Forward Return Information Coefficient (IC)"
.format(period_num))
a.axhline(0.0, linestyle='-', color='black', lw=1, alpha=0.8)
a.legend(['IC', '1 month moving avg'], loc='upper right')
a.text(.05, .95, "Mean %.3f \n Std. %.3f" % (ic.mean(), ic.std()),
fontsize=16,
bbox={'facecolor': 'white', 'alpha': 1, 'pad': 5},
transform=a.transAxes,
verticalalignment='top')
curr_ymin, curr_ymax = a.get_ylim()
ymin = curr_ymin if ymin is None else min(ymin, curr_ymin)
ymax = curr_ymax if ymax is None else max(ymax, curr_ymax)
for a in ax:
a.set_ylim([ymin, ymax])
return ax | [
"def",
"plot_ic_ts",
"(",
"ic",
",",
"ax",
"=",
"None",
")",
":",
"ic",
"=",
"ic",
".",
"copy",
"(",
")",
"num_plots",
"=",
"len",
"(",
"ic",
".",
"columns",
")",
"if",
"ax",
"is",
"None",
":",
"f",
",",
"ax",
"=",
"plt",
".",
"subplots",
"(",
"num_plots",
",",
"1",
",",
"figsize",
"=",
"(",
"18",
",",
"num_plots",
"*",
"7",
")",
")",
"ax",
"=",
"np",
".",
"asarray",
"(",
"[",
"ax",
"]",
")",
".",
"flatten",
"(",
")",
"ymin",
",",
"ymax",
"=",
"(",
"None",
",",
"None",
")",
"for",
"a",
",",
"(",
"period_num",
",",
"ic",
")",
"in",
"zip",
"(",
"ax",
",",
"ic",
".",
"iteritems",
"(",
")",
")",
":",
"ic",
".",
"plot",
"(",
"alpha",
"=",
"0.7",
",",
"ax",
"=",
"a",
",",
"lw",
"=",
"0.7",
",",
"color",
"=",
"'steelblue'",
")",
"ic",
".",
"rolling",
"(",
"window",
"=",
"22",
")",
".",
"mean",
"(",
")",
".",
"plot",
"(",
"ax",
"=",
"a",
",",
"color",
"=",
"'forestgreen'",
",",
"lw",
"=",
"2",
",",
"alpha",
"=",
"0.8",
")",
"a",
".",
"set",
"(",
"ylabel",
"=",
"'IC'",
",",
"xlabel",
"=",
"\"\"",
")",
"a",
".",
"set_title",
"(",
"\"{} Period Forward Return Information Coefficient (IC)\"",
".",
"format",
"(",
"period_num",
")",
")",
"a",
".",
"axhline",
"(",
"0.0",
",",
"linestyle",
"=",
"'-'",
",",
"color",
"=",
"'black'",
",",
"lw",
"=",
"1",
",",
"alpha",
"=",
"0.8",
")",
"a",
".",
"legend",
"(",
"[",
"'IC'",
",",
"'1 month moving avg'",
"]",
",",
"loc",
"=",
"'upper right'",
")",
"a",
".",
"text",
"(",
".05",
",",
".95",
",",
"\"Mean %.3f \\n Std. %.3f\"",
"%",
"(",
"ic",
".",
"mean",
"(",
")",
",",
"ic",
".",
"std",
"(",
")",
")",
",",
"fontsize",
"=",
"16",
",",
"bbox",
"=",
"{",
"'facecolor'",
":",
"'white'",
",",
"'alpha'",
":",
"1",
",",
"'pad'",
":",
"5",
"}",
",",
"transform",
"=",
"a",
".",
"transAxes",
",",
"verticalalignment",
"=",
"'top'",
")",
"curr_ymin",
",",
"curr_ymax",
"=",
"a",
".",
"get_ylim",
"(",
")",
"ymin",
"=",
"curr_ymin",
"if",
"ymin",
"is",
"None",
"else",
"min",
"(",
"ymin",
",",
"curr_ymin",
")",
"ymax",
"=",
"curr_ymax",
"if",
"ymax",
"is",
"None",
"else",
"max",
"(",
"ymax",
",",
"curr_ymax",
")",
"for",
"a",
"in",
"ax",
":",
"a",
".",
"set_ylim",
"(",
"[",
"ymin",
",",
"ymax",
"]",
")",
"return",
"ax"
] | Plots Spearman Rank Information Coefficient and IC moving
average for a given factor.
Parameters
----------
ic : pd.DataFrame
DataFrame indexed by date, with IC for each forward return.
ax : matplotlib.Axes, optional
Axes upon which to plot.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on. | [
"Plots",
"Spearman",
"Rank",
"Information",
"Coefficient",
"and",
"IC",
"moving",
"average",
"for",
"a",
"given",
"factor",
"."
] | python | train |
RRZE-HPC/kerncraft | kerncraft/iaca.py | https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/iaca.py#L251-L284 | def userselect_block(blocks, default=None, debug=False):
"""Let user interactively select block."""
print("Blocks found in assembly file:")
print(" block | OPs | pck. | AVX || Registers | ZMM | YMM | XMM | GP ||ptr.inc|\n"
"----------------+-----+------+-----++-----------+----------+----------+----------+---------++-------|")
for idx, b in blocks:
print('{:>2} {b[labels]!r:>12} | {b[ops]:>3} | {b[packed_instr]:>4} | {b[avx_instr]:>3} |'
'| {b[regs][0]:>3} ({b[regs][1]:>3}) | {b[ZMM][0]:>3} ({b[ZMM][1]:>2}) | '
'{b[YMM][0]:>3} ({b[YMM][1]:>2}) | '
'{b[XMM][0]:>3} ({b[XMM][1]:>2}) | {b[GP][0]:>2} ({b[GP][1]:>2}) || '
'{b[pointer_increment]!s:>5} |'.format(idx, b=b))
if debug:
ln = b['first_line']
print(' '*4 + 'Code:')
for l in b['lines']:
print(' '*8 + '{:>5} | {}'.format(ln, l))
ln += 1
print(' '*4 + 'Metadata:')
print(textwrap.indent(
pformat({k: v for k,v in b.items() if k not in ['lines']}),
' '*8))
# Let user select block:
block_idx = -1
while not (0 <= block_idx < len(blocks)):
block_idx = input("Choose block to be marked [" + str(default) + "]: ") or default
try:
block_idx = int(block_idx)
except ValueError:
block_idx = -1
# block = blocks[block_idx][1]
return block_idx | [
"def",
"userselect_block",
"(",
"blocks",
",",
"default",
"=",
"None",
",",
"debug",
"=",
"False",
")",
":",
"print",
"(",
"\"Blocks found in assembly file:\"",
")",
"print",
"(",
"\" block | OPs | pck. | AVX || Registers | ZMM | YMM | XMM | GP ||ptr.inc|\\n\"",
"\"----------------+-----+------+-----++-----------+----------+----------+----------+---------++-------|\"",
")",
"for",
"idx",
",",
"b",
"in",
"blocks",
":",
"print",
"(",
"'{:>2} {b[labels]!r:>12} | {b[ops]:>3} | {b[packed_instr]:>4} | {b[avx_instr]:>3} |'",
"'| {b[regs][0]:>3} ({b[regs][1]:>3}) | {b[ZMM][0]:>3} ({b[ZMM][1]:>2}) | '",
"'{b[YMM][0]:>3} ({b[YMM][1]:>2}) | '",
"'{b[XMM][0]:>3} ({b[XMM][1]:>2}) | {b[GP][0]:>2} ({b[GP][1]:>2}) || '",
"'{b[pointer_increment]!s:>5} |'",
".",
"format",
"(",
"idx",
",",
"b",
"=",
"b",
")",
")",
"if",
"debug",
":",
"ln",
"=",
"b",
"[",
"'first_line'",
"]",
"print",
"(",
"' '",
"*",
"4",
"+",
"'Code:'",
")",
"for",
"l",
"in",
"b",
"[",
"'lines'",
"]",
":",
"print",
"(",
"' '",
"*",
"8",
"+",
"'{:>5} | {}'",
".",
"format",
"(",
"ln",
",",
"l",
")",
")",
"ln",
"+=",
"1",
"print",
"(",
"' '",
"*",
"4",
"+",
"'Metadata:'",
")",
"print",
"(",
"textwrap",
".",
"indent",
"(",
"pformat",
"(",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"b",
".",
"items",
"(",
")",
"if",
"k",
"not",
"in",
"[",
"'lines'",
"]",
"}",
")",
",",
"' '",
"*",
"8",
")",
")",
"# Let user select block:",
"block_idx",
"=",
"-",
"1",
"while",
"not",
"(",
"0",
"<=",
"block_idx",
"<",
"len",
"(",
"blocks",
")",
")",
":",
"block_idx",
"=",
"input",
"(",
"\"Choose block to be marked [\"",
"+",
"str",
"(",
"default",
")",
"+",
"\"]: \"",
")",
"or",
"default",
"try",
":",
"block_idx",
"=",
"int",
"(",
"block_idx",
")",
"except",
"ValueError",
":",
"block_idx",
"=",
"-",
"1",
"# block = blocks[block_idx][1]",
"return",
"block_idx"
] | Let user interactively select block. | [
"Let",
"user",
"interactively",
"select",
"block",
"."
] | python | test |
llazzaro/analyzerdam | analyzerdam/sqlDAM.py | https://github.com/llazzaro/analyzerdam/blob/c5bc7483dae23bd2e14bbf36147b7a43a0067bc0/analyzerdam/sqlDAM.py#L86-L99 | def readTupleQuotes(self, symbol, start, end):
''' read quotes as tuple '''
if end is None:
end=sys.maxint
session=self.getReadSession()()
try:
rows=session.query(Quote).filter(and_(Quote.symbol == symbol,
Quote.time >= int(start),
Quote.time < int(end)))
finally:
self.getReadSession().remove()
return rows | [
"def",
"readTupleQuotes",
"(",
"self",
",",
"symbol",
",",
"start",
",",
"end",
")",
":",
"if",
"end",
"is",
"None",
":",
"end",
"=",
"sys",
".",
"maxint",
"session",
"=",
"self",
".",
"getReadSession",
"(",
")",
"(",
")",
"try",
":",
"rows",
"=",
"session",
".",
"query",
"(",
"Quote",
")",
".",
"filter",
"(",
"and_",
"(",
"Quote",
".",
"symbol",
"==",
"symbol",
",",
"Quote",
".",
"time",
">=",
"int",
"(",
"start",
")",
",",
"Quote",
".",
"time",
"<",
"int",
"(",
"end",
")",
")",
")",
"finally",
":",
"self",
".",
"getReadSession",
"(",
")",
".",
"remove",
"(",
")",
"return",
"rows"
] | read quotes as tuple | [
"read",
"quotes",
"as",
"tuple"
] | python | train |
aaugustin/websockets | src/websockets/client.py | https://github.com/aaugustin/websockets/blob/17b3f47549b6f752a1be07fa1ba3037cb59c7d56/src/websockets/client.py#L223-L309 | async def handshake(
self,
wsuri: WebSocketURI,
origin: Optional[Origin] = None,
available_extensions: Optional[Sequence[ClientExtensionFactory]] = None,
available_subprotocols: Optional[Sequence[Subprotocol]] = None,
extra_headers: Optional[HeadersLike] = None,
) -> None:
"""
Perform the client side of the opening handshake.
If provided, ``origin`` sets the Origin HTTP header.
If provided, ``available_extensions`` is a list of supported
extensions in the order in which they should be used.
If provided, ``available_subprotocols`` is a list of supported
subprotocols in order of decreasing preference.
If provided, ``extra_headers`` sets additional HTTP request headers.
It must be a :class:`~websockets.http.Headers` instance, a
:class:`~collections.abc.Mapping`, or an iterable of ``(name, value)``
pairs.
Raise :exc:`~websockets.exceptions.InvalidHandshake` if the handshake
fails.
"""
request_headers = Headers()
if wsuri.port == (443 if wsuri.secure else 80): # pragma: no cover
request_headers["Host"] = wsuri.host
else:
request_headers["Host"] = f"{wsuri.host}:{wsuri.port}"
if wsuri.user_info:
request_headers["Authorization"] = build_basic_auth(*wsuri.user_info)
if origin is not None:
request_headers["Origin"] = origin
key = build_request(request_headers)
if available_extensions is not None:
extensions_header = build_extension(
[
(extension_factory.name, extension_factory.get_request_params())
for extension_factory in available_extensions
]
)
request_headers["Sec-WebSocket-Extensions"] = extensions_header
if available_subprotocols is not None:
protocol_header = build_subprotocol(available_subprotocols)
request_headers["Sec-WebSocket-Protocol"] = protocol_header
if extra_headers is not None:
if isinstance(extra_headers, Headers):
extra_headers = extra_headers.raw_items()
elif isinstance(extra_headers, collections.abc.Mapping):
extra_headers = extra_headers.items()
for name, value in extra_headers:
request_headers[name] = value
request_headers.setdefault("User-Agent", USER_AGENT)
self.write_http_request(wsuri.resource_name, request_headers)
status_code, response_headers = await self.read_http_response()
if status_code in (301, 302, 303, 307, 308):
if "Location" not in response_headers:
raise InvalidMessage("Redirect response missing Location")
raise RedirectHandshake(response_headers["Location"])
elif status_code != 101:
raise InvalidStatusCode(status_code)
check_response(response_headers, key)
self.extensions = self.process_extensions(
response_headers, available_extensions
)
self.subprotocol = self.process_subprotocol(
response_headers, available_subprotocols
)
self.connection_open() | [
"async",
"def",
"handshake",
"(",
"self",
",",
"wsuri",
":",
"WebSocketURI",
",",
"origin",
":",
"Optional",
"[",
"Origin",
"]",
"=",
"None",
",",
"available_extensions",
":",
"Optional",
"[",
"Sequence",
"[",
"ClientExtensionFactory",
"]",
"]",
"=",
"None",
",",
"available_subprotocols",
":",
"Optional",
"[",
"Sequence",
"[",
"Subprotocol",
"]",
"]",
"=",
"None",
",",
"extra_headers",
":",
"Optional",
"[",
"HeadersLike",
"]",
"=",
"None",
",",
")",
"->",
"None",
":",
"request_headers",
"=",
"Headers",
"(",
")",
"if",
"wsuri",
".",
"port",
"==",
"(",
"443",
"if",
"wsuri",
".",
"secure",
"else",
"80",
")",
":",
"# pragma: no cover",
"request_headers",
"[",
"\"Host\"",
"]",
"=",
"wsuri",
".",
"host",
"else",
":",
"request_headers",
"[",
"\"Host\"",
"]",
"=",
"f\"{wsuri.host}:{wsuri.port}\"",
"if",
"wsuri",
".",
"user_info",
":",
"request_headers",
"[",
"\"Authorization\"",
"]",
"=",
"build_basic_auth",
"(",
"*",
"wsuri",
".",
"user_info",
")",
"if",
"origin",
"is",
"not",
"None",
":",
"request_headers",
"[",
"\"Origin\"",
"]",
"=",
"origin",
"key",
"=",
"build_request",
"(",
"request_headers",
")",
"if",
"available_extensions",
"is",
"not",
"None",
":",
"extensions_header",
"=",
"build_extension",
"(",
"[",
"(",
"extension_factory",
".",
"name",
",",
"extension_factory",
".",
"get_request_params",
"(",
")",
")",
"for",
"extension_factory",
"in",
"available_extensions",
"]",
")",
"request_headers",
"[",
"\"Sec-WebSocket-Extensions\"",
"]",
"=",
"extensions_header",
"if",
"available_subprotocols",
"is",
"not",
"None",
":",
"protocol_header",
"=",
"build_subprotocol",
"(",
"available_subprotocols",
")",
"request_headers",
"[",
"\"Sec-WebSocket-Protocol\"",
"]",
"=",
"protocol_header",
"if",
"extra_headers",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"extra_headers",
",",
"Headers",
")",
":",
"extra_headers",
"=",
"extra_headers",
".",
"raw_items",
"(",
")",
"elif",
"isinstance",
"(",
"extra_headers",
",",
"collections",
".",
"abc",
".",
"Mapping",
")",
":",
"extra_headers",
"=",
"extra_headers",
".",
"items",
"(",
")",
"for",
"name",
",",
"value",
"in",
"extra_headers",
":",
"request_headers",
"[",
"name",
"]",
"=",
"value",
"request_headers",
".",
"setdefault",
"(",
"\"User-Agent\"",
",",
"USER_AGENT",
")",
"self",
".",
"write_http_request",
"(",
"wsuri",
".",
"resource_name",
",",
"request_headers",
")",
"status_code",
",",
"response_headers",
"=",
"await",
"self",
".",
"read_http_response",
"(",
")",
"if",
"status_code",
"in",
"(",
"301",
",",
"302",
",",
"303",
",",
"307",
",",
"308",
")",
":",
"if",
"\"Location\"",
"not",
"in",
"response_headers",
":",
"raise",
"InvalidMessage",
"(",
"\"Redirect response missing Location\"",
")",
"raise",
"RedirectHandshake",
"(",
"response_headers",
"[",
"\"Location\"",
"]",
")",
"elif",
"status_code",
"!=",
"101",
":",
"raise",
"InvalidStatusCode",
"(",
"status_code",
")",
"check_response",
"(",
"response_headers",
",",
"key",
")",
"self",
".",
"extensions",
"=",
"self",
".",
"process_extensions",
"(",
"response_headers",
",",
"available_extensions",
")",
"self",
".",
"subprotocol",
"=",
"self",
".",
"process_subprotocol",
"(",
"response_headers",
",",
"available_subprotocols",
")",
"self",
".",
"connection_open",
"(",
")"
] | Perform the client side of the opening handshake.
If provided, ``origin`` sets the Origin HTTP header.
If provided, ``available_extensions`` is a list of supported
extensions in the order in which they should be used.
If provided, ``available_subprotocols`` is a list of supported
subprotocols in order of decreasing preference.
If provided, ``extra_headers`` sets additional HTTP request headers.
It must be a :class:`~websockets.http.Headers` instance, a
:class:`~collections.abc.Mapping`, or an iterable of ``(name, value)``
pairs.
Raise :exc:`~websockets.exceptions.InvalidHandshake` if the handshake
fails. | [
"Perform",
"the",
"client",
"side",
"of",
"the",
"opening",
"handshake",
"."
] | python | train |
shoebot/shoebot | lib/graph/__init__.py | https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/lib/graph/__init__.py#L241-L257 | def copy(self, empty=False):
""" Create a copy of the graph (by default with nodes and edges).
"""
g = graph(self.layout.n, self.distance, self.layout.type)
g.layout = self.layout.copy(g)
g.styles = self.styles.copy(g)
g.events = self.events.copy(g)
if not empty:
for n in self.nodes:
g.add_node(n.id, n.r, n.style, n.category, n.label, (n == self.root), n.__dict__)
for e in self.edges:
g.add_edge(e.node1.id, e.node2.id, e.weight, e.length, e.label, e.__dict__)
return g | [
"def",
"copy",
"(",
"self",
",",
"empty",
"=",
"False",
")",
":",
"g",
"=",
"graph",
"(",
"self",
".",
"layout",
".",
"n",
",",
"self",
".",
"distance",
",",
"self",
".",
"layout",
".",
"type",
")",
"g",
".",
"layout",
"=",
"self",
".",
"layout",
".",
"copy",
"(",
"g",
")",
"g",
".",
"styles",
"=",
"self",
".",
"styles",
".",
"copy",
"(",
"g",
")",
"g",
".",
"events",
"=",
"self",
".",
"events",
".",
"copy",
"(",
"g",
")",
"if",
"not",
"empty",
":",
"for",
"n",
"in",
"self",
".",
"nodes",
":",
"g",
".",
"add_node",
"(",
"n",
".",
"id",
",",
"n",
".",
"r",
",",
"n",
".",
"style",
",",
"n",
".",
"category",
",",
"n",
".",
"label",
",",
"(",
"n",
"==",
"self",
".",
"root",
")",
",",
"n",
".",
"__dict__",
")",
"for",
"e",
"in",
"self",
".",
"edges",
":",
"g",
".",
"add_edge",
"(",
"e",
".",
"node1",
".",
"id",
",",
"e",
".",
"node2",
".",
"id",
",",
"e",
".",
"weight",
",",
"e",
".",
"length",
",",
"e",
".",
"label",
",",
"e",
".",
"__dict__",
")",
"return",
"g"
] | Create a copy of the graph (by default with nodes and edges). | [
"Create",
"a",
"copy",
"of",
"the",
"graph",
"(",
"by",
"default",
"with",
"nodes",
"and",
"edges",
")",
"."
] | python | valid |
JohnDoee/thomas | thomas/__main__.py | https://github.com/JohnDoee/thomas/blob/51916dd110098b189a1c2fbcb71794fd9ec94832/thomas/__main__.py#L15-L42 | def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
print(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
print("Please respond with 'yes' or 'no' (or 'y' or 'n').\n") | [
"def",
"query_yes_no",
"(",
"question",
",",
"default",
"=",
"\"yes\"",
")",
":",
"valid",
"=",
"{",
"\"yes\"",
":",
"True",
",",
"\"y\"",
":",
"True",
",",
"\"ye\"",
":",
"True",
",",
"\"no\"",
":",
"False",
",",
"\"n\"",
":",
"False",
"}",
"if",
"default",
"is",
"None",
":",
"prompt",
"=",
"\" [y/n] \"",
"elif",
"default",
"==",
"\"yes\"",
":",
"prompt",
"=",
"\" [Y/n] \"",
"elif",
"default",
"==",
"\"no\"",
":",
"prompt",
"=",
"\" [y/N] \"",
"else",
":",
"raise",
"ValueError",
"(",
"\"invalid default answer: '%s'\"",
"%",
"default",
")",
"while",
"True",
":",
"print",
"(",
"question",
"+",
"prompt",
")",
"choice",
"=",
"input",
"(",
")",
".",
"lower",
"(",
")",
"if",
"default",
"is",
"not",
"None",
"and",
"choice",
"==",
"''",
":",
"return",
"valid",
"[",
"default",
"]",
"elif",
"choice",
"in",
"valid",
":",
"return",
"valid",
"[",
"choice",
"]",
"else",
":",
"print",
"(",
"\"Please respond with 'yes' or 'no' (or 'y' or 'n').\\n\"",
")"
] | Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no". | [
"Ask",
"a",
"yes",
"/",
"no",
"question",
"via",
"raw_input",
"()",
"and",
"return",
"their",
"answer",
".",
"question",
"is",
"a",
"string",
"that",
"is",
"presented",
"to",
"the",
"user",
".",
"default",
"is",
"the",
"presumed",
"answer",
"if",
"the",
"user",
"just",
"hits",
"<Enter",
">",
".",
"It",
"must",
"be",
"yes",
"(",
"the",
"default",
")",
"no",
"or",
"None",
"(",
"meaning",
"an",
"answer",
"is",
"required",
"of",
"the",
"user",
")",
".",
"The",
"answer",
"return",
"value",
"is",
"True",
"for",
"yes",
"or",
"False",
"for",
"no",
"."
] | python | train |
waqasbhatti/astrobase | astrobase/hatsurveys/hatlc.py | https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/hatsurveys/hatlc.py#L1303-L1369 | def describe_lcc_csv(lcdict, returndesc=False):
'''
This describes the LCC CSV format light curve file.
Parameters
----------
lcdict : dict
The input lcdict to parse for column and metadata info.
returndesc : bool
If True, returns the description string as an str instead of just
printing it to stdout.
Returns
-------
str or None
If returndesc is True, returns the description lines as a str, otherwise
returns nothing.
'''
metadata_lines = []
coldef_lines = []
if 'lcformat' in lcdict and 'lcc-csv' in lcdict['lcformat'].lower():
metadata = lcdict['metadata']
metakeys = lcdict['objectinfo'].keys()
coldefs = lcdict['coldefs']
for mk in metakeys:
metadata_lines.append(
'%20s | %s' % (
mk,
metadata[mk]['desc']
)
)
for ck in lcdict['columns']:
coldef_lines.append('column %02d | %8s | numpy dtype: %3s | %s'
% (coldefs[ck]['colnum'],
ck,
coldefs[ck]['dtype'],
coldefs[ck]['desc']))
desc = LCC_CSVLC_DESCTEMPLATE.format(
objectid=lcdict['objectid'],
metadata_desc='\n'.join(metadata_lines),
metadata=pformat(lcdict['objectinfo']),
columndefs='\n'.join(coldef_lines)
)
print(desc)
if returndesc:
return desc
else:
LOGERROR("this lcdict is not from an LCC CSV, can't figure it out...")
return None | [
"def",
"describe_lcc_csv",
"(",
"lcdict",
",",
"returndesc",
"=",
"False",
")",
":",
"metadata_lines",
"=",
"[",
"]",
"coldef_lines",
"=",
"[",
"]",
"if",
"'lcformat'",
"in",
"lcdict",
"and",
"'lcc-csv'",
"in",
"lcdict",
"[",
"'lcformat'",
"]",
".",
"lower",
"(",
")",
":",
"metadata",
"=",
"lcdict",
"[",
"'metadata'",
"]",
"metakeys",
"=",
"lcdict",
"[",
"'objectinfo'",
"]",
".",
"keys",
"(",
")",
"coldefs",
"=",
"lcdict",
"[",
"'coldefs'",
"]",
"for",
"mk",
"in",
"metakeys",
":",
"metadata_lines",
".",
"append",
"(",
"'%20s | %s'",
"%",
"(",
"mk",
",",
"metadata",
"[",
"mk",
"]",
"[",
"'desc'",
"]",
")",
")",
"for",
"ck",
"in",
"lcdict",
"[",
"'columns'",
"]",
":",
"coldef_lines",
".",
"append",
"(",
"'column %02d | %8s | numpy dtype: %3s | %s'",
"%",
"(",
"coldefs",
"[",
"ck",
"]",
"[",
"'colnum'",
"]",
",",
"ck",
",",
"coldefs",
"[",
"ck",
"]",
"[",
"'dtype'",
"]",
",",
"coldefs",
"[",
"ck",
"]",
"[",
"'desc'",
"]",
")",
")",
"desc",
"=",
"LCC_CSVLC_DESCTEMPLATE",
".",
"format",
"(",
"objectid",
"=",
"lcdict",
"[",
"'objectid'",
"]",
",",
"metadata_desc",
"=",
"'\\n'",
".",
"join",
"(",
"metadata_lines",
")",
",",
"metadata",
"=",
"pformat",
"(",
"lcdict",
"[",
"'objectinfo'",
"]",
")",
",",
"columndefs",
"=",
"'\\n'",
".",
"join",
"(",
"coldef_lines",
")",
")",
"print",
"(",
"desc",
")",
"if",
"returndesc",
":",
"return",
"desc",
"else",
":",
"LOGERROR",
"(",
"\"this lcdict is not from an LCC CSV, can't figure it out...\"",
")",
"return",
"None"
] | This describes the LCC CSV format light curve file.
Parameters
----------
lcdict : dict
The input lcdict to parse for column and metadata info.
returndesc : bool
If True, returns the description string as an str instead of just
printing it to stdout.
Returns
-------
str or None
If returndesc is True, returns the description lines as a str, otherwise
returns nothing. | [
"This",
"describes",
"the",
"LCC",
"CSV",
"format",
"light",
"curve",
"file",
"."
] | python | valid |
pinterest/pymemcache | pymemcache/client/base.py | https://github.com/pinterest/pymemcache/blob/f3a348f4ce2248cce8b398e93e08d984fb9100e5/pymemcache/client/base.py#L585-L608 | def decr(self, key, value, noreply=False):
"""
The memcached "decr" command.
Args:
key: str, see class docs for details.
value: int, the amount by which to increment the value.
noreply: optional bool, False to wait for the reply (the default).
Returns:
If noreply is True, always returns None. Otherwise returns the new
value of the key, or None if the key wasn't found.
"""
key = self.check_key(key)
cmd = b'decr ' + key + b' ' + six.text_type(value).encode('ascii')
if noreply:
cmd += b' noreply'
cmd += b'\r\n'
results = self._misc_cmd([cmd], b'decr', noreply)
if noreply:
return None
if results[0] == b'NOT_FOUND':
return None
return int(results[0]) | [
"def",
"decr",
"(",
"self",
",",
"key",
",",
"value",
",",
"noreply",
"=",
"False",
")",
":",
"key",
"=",
"self",
".",
"check_key",
"(",
"key",
")",
"cmd",
"=",
"b'decr '",
"+",
"key",
"+",
"b' '",
"+",
"six",
".",
"text_type",
"(",
"value",
")",
".",
"encode",
"(",
"'ascii'",
")",
"if",
"noreply",
":",
"cmd",
"+=",
"b' noreply'",
"cmd",
"+=",
"b'\\r\\n'",
"results",
"=",
"self",
".",
"_misc_cmd",
"(",
"[",
"cmd",
"]",
",",
"b'decr'",
",",
"noreply",
")",
"if",
"noreply",
":",
"return",
"None",
"if",
"results",
"[",
"0",
"]",
"==",
"b'NOT_FOUND'",
":",
"return",
"None",
"return",
"int",
"(",
"results",
"[",
"0",
"]",
")"
] | The memcached "decr" command.
Args:
key: str, see class docs for details.
value: int, the amount by which to increment the value.
noreply: optional bool, False to wait for the reply (the default).
Returns:
If noreply is True, always returns None. Otherwise returns the new
value of the key, or None if the key wasn't found. | [
"The",
"memcached",
"decr",
"command",
"."
] | python | train |
Murali-group/halp | halp/utilities/directed_statistics.py | https://github.com/Murali-group/halp/blob/6eb27466ba84e2281e18f93b62aae5efb21ef8b3/halp/utilities/directed_statistics.py#L240-L256 | def _F_hyperedge_head_cardinality(H, F):
"""Returns the result of a function F applied to the set of cardinalities
of hyperedge heads in the hypergraph.
:param H: the hypergraph whose head cardinalities will be
operated on.
:param F: function to execute on the set of cardinalities in the
hypergraph.
:returns: result of the given function F.
:raises: TypeError -- Algorithm only applicable to directed hypergraphs
"""
if not isinstance(H, DirectedHypergraph):
raise TypeError("Algorithm only applicable to directed hypergraphs")
return F([len(H.get_hyperedge_head(hyperedge_id))
for hyperedge_id in H.get_hyperedge_id_set()]) | [
"def",
"_F_hyperedge_head_cardinality",
"(",
"H",
",",
"F",
")",
":",
"if",
"not",
"isinstance",
"(",
"H",
",",
"DirectedHypergraph",
")",
":",
"raise",
"TypeError",
"(",
"\"Algorithm only applicable to directed hypergraphs\"",
")",
"return",
"F",
"(",
"[",
"len",
"(",
"H",
".",
"get_hyperedge_head",
"(",
"hyperedge_id",
")",
")",
"for",
"hyperedge_id",
"in",
"H",
".",
"get_hyperedge_id_set",
"(",
")",
"]",
")"
] | Returns the result of a function F applied to the set of cardinalities
of hyperedge heads in the hypergraph.
:param H: the hypergraph whose head cardinalities will be
operated on.
:param F: function to execute on the set of cardinalities in the
hypergraph.
:returns: result of the given function F.
:raises: TypeError -- Algorithm only applicable to directed hypergraphs | [
"Returns",
"the",
"result",
"of",
"a",
"function",
"F",
"applied",
"to",
"the",
"set",
"of",
"cardinalities",
"of",
"hyperedge",
"heads",
"in",
"the",
"hypergraph",
"."
] | python | train |
mila-iqia/fuel | fuel/converters/ilsvrc2010.py | https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/converters/ilsvrc2010.py#L533-L573 | def extract_patch_images(f, which_set):
"""Extracts a dict of the "patch images" for ILSVRC2010.
Parameters
----------
f : str or file-like object
The filename or file-handle to the patch images TAR file.
which_set : str
Which set of images to extract. One of 'train', 'valid', 'test'.
Returns
-------
dict
A dictionary contains a mapping of filenames (without path) to a
bytes object containing the replacement image.
Notes
-----
Certain images in the distributed archives are blank, or display
an "image not available" banner. A separate TAR file of
"patch images" is distributed with the corrected versions of
these. It is this archive that this function is intended to read.
"""
if which_set not in ('train', 'valid', 'test'):
raise ValueError('which_set must be one of train, valid, or test')
which_set = 'val' if which_set == 'valid' else which_set
patch_images = {}
with tar_open(f) as tar:
for info_obj in tar:
if not info_obj.name.endswith('.JPEG'):
continue
# Pretty sure that '/' is used for tarfile regardless of
# os.path.sep, but I officially don't care about Windows.
tokens = info_obj.name.split('/')
file_which_set = tokens[-2]
if file_which_set != which_set:
continue
filename = tokens[-1]
patch_images[filename] = tar.extractfile(info_obj.name).read()
return patch_images | [
"def",
"extract_patch_images",
"(",
"f",
",",
"which_set",
")",
":",
"if",
"which_set",
"not",
"in",
"(",
"'train'",
",",
"'valid'",
",",
"'test'",
")",
":",
"raise",
"ValueError",
"(",
"'which_set must be one of train, valid, or test'",
")",
"which_set",
"=",
"'val'",
"if",
"which_set",
"==",
"'valid'",
"else",
"which_set",
"patch_images",
"=",
"{",
"}",
"with",
"tar_open",
"(",
"f",
")",
"as",
"tar",
":",
"for",
"info_obj",
"in",
"tar",
":",
"if",
"not",
"info_obj",
".",
"name",
".",
"endswith",
"(",
"'.JPEG'",
")",
":",
"continue",
"# Pretty sure that '/' is used for tarfile regardless of",
"# os.path.sep, but I officially don't care about Windows.",
"tokens",
"=",
"info_obj",
".",
"name",
".",
"split",
"(",
"'/'",
")",
"file_which_set",
"=",
"tokens",
"[",
"-",
"2",
"]",
"if",
"file_which_set",
"!=",
"which_set",
":",
"continue",
"filename",
"=",
"tokens",
"[",
"-",
"1",
"]",
"patch_images",
"[",
"filename",
"]",
"=",
"tar",
".",
"extractfile",
"(",
"info_obj",
".",
"name",
")",
".",
"read",
"(",
")",
"return",
"patch_images"
] | Extracts a dict of the "patch images" for ILSVRC2010.
Parameters
----------
f : str or file-like object
The filename or file-handle to the patch images TAR file.
which_set : str
Which set of images to extract. One of 'train', 'valid', 'test'.
Returns
-------
dict
A dictionary contains a mapping of filenames (without path) to a
bytes object containing the replacement image.
Notes
-----
Certain images in the distributed archives are blank, or display
an "image not available" banner. A separate TAR file of
"patch images" is distributed with the corrected versions of
these. It is this archive that this function is intended to read. | [
"Extracts",
"a",
"dict",
"of",
"the",
"patch",
"images",
"for",
"ILSVRC2010",
"."
] | python | train |
gwastro/pycbc | pycbc/frame/frame.py | https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/frame/frame.py#L142-L246 | def read_frame(location, channels, start_time=None,
end_time=None, duration=None, check_integrity=True,
sieve=None):
"""Read time series from frame data.
Using the `location`, which can either be a frame file ".gwf" or a
frame cache ".gwf", read in the data for the given channel(s) and output
as a TimeSeries or list of TimeSeries.
Parameters
----------
location : string
A source of gravitational wave frames. Either a frame filename
(can include pattern), a list of frame files, or frame cache file.
channels : string or list of strings
Either a string that contains the channel name or a list of channel
name strings.
start_time : {None, LIGOTimeGPS}, optional
The gps start time of the time series. Defaults to reading from the
beginning of the available frame(s).
end_time : {None, LIGOTimeGPS}, optional
The gps end time of the time series. Defaults to the end of the frame.
Note, this argument is incompatible with `duration`.
duration : {None, float}, optional
The amount of data to read in seconds. Note, this argument is
incompatible with `end`.
check_integrity : {True, bool}, optional
Test the frame files for internal integrity.
sieve : string, optional
Selects only frames where the frame URL matches the regular
expression sieve
Returns
-------
Frame Data: TimeSeries or list of TimeSeries
A TimeSeries or a list of TimeSeries, corresponding to the data from
the frame file/cache for a given channel or channels.
"""
if end_time and duration:
raise ValueError("end time and duration are mutually exclusive")
if type(location) is list:
locations = location
else:
locations = [location]
cum_cache = locations_to_cache(locations)
if sieve:
logging.info("Using frames that match regexp: %s", sieve)
lal.CacheSieve(cum_cache, 0, 0, None, None, sieve)
stream = lalframe.FrStreamCacheOpen(cum_cache)
stream.mode = lalframe.FR_STREAM_VERBOSE_MODE
if check_integrity:
stream.mode = (stream.mode | lalframe.FR_STREAM_CHECKSUM_MODE)
lalframe.FrSetMode(stream.mode, stream)
# determine duration of data
if type(channels) is list:
first_channel = channels[0]
else:
first_channel = channels
data_length = lalframe.FrStreamGetVectorLength(first_channel, stream)
channel_type = lalframe.FrStreamGetTimeSeriesType(first_channel, stream)
create_series_func = _fr_type_map[channel_type][2]
get_series_metadata_func = _fr_type_map[channel_type][3]
series = create_series_func(first_channel, stream.epoch, 0, 0,
lal.ADCCountUnit, 0)
get_series_metadata_func(series, stream)
data_duration = data_length * series.deltaT
if start_time is None:
start_time = stream.epoch*1
if end_time is None:
end_time = start_time + data_duration
if type(start_time) is not lal.LIGOTimeGPS:
start_time = lal.LIGOTimeGPS(start_time)
if type(end_time) is not lal.LIGOTimeGPS:
end_time = lal.LIGOTimeGPS(end_time)
if duration is None:
duration = float(end_time - start_time)
else:
duration = float(duration)
# lalframe behaves dangerously with invalid duration so catch it here
if duration <= 0:
raise ValueError("Negative or null duration")
#if duration > data_duration:
# raise ValueError("Requested duration longer than available data")
if type(channels) is list:
all_data = []
for channel in channels:
channel_data = _read_channel(channel, stream, start_time, duration)
lalframe.FrStreamSeek(stream, start_time)
all_data.append(channel_data)
return all_data
else:
return _read_channel(channels, stream, start_time, duration) | [
"def",
"read_frame",
"(",
"location",
",",
"channels",
",",
"start_time",
"=",
"None",
",",
"end_time",
"=",
"None",
",",
"duration",
"=",
"None",
",",
"check_integrity",
"=",
"True",
",",
"sieve",
"=",
"None",
")",
":",
"if",
"end_time",
"and",
"duration",
":",
"raise",
"ValueError",
"(",
"\"end time and duration are mutually exclusive\"",
")",
"if",
"type",
"(",
"location",
")",
"is",
"list",
":",
"locations",
"=",
"location",
"else",
":",
"locations",
"=",
"[",
"location",
"]",
"cum_cache",
"=",
"locations_to_cache",
"(",
"locations",
")",
"if",
"sieve",
":",
"logging",
".",
"info",
"(",
"\"Using frames that match regexp: %s\"",
",",
"sieve",
")",
"lal",
".",
"CacheSieve",
"(",
"cum_cache",
",",
"0",
",",
"0",
",",
"None",
",",
"None",
",",
"sieve",
")",
"stream",
"=",
"lalframe",
".",
"FrStreamCacheOpen",
"(",
"cum_cache",
")",
"stream",
".",
"mode",
"=",
"lalframe",
".",
"FR_STREAM_VERBOSE_MODE",
"if",
"check_integrity",
":",
"stream",
".",
"mode",
"=",
"(",
"stream",
".",
"mode",
"|",
"lalframe",
".",
"FR_STREAM_CHECKSUM_MODE",
")",
"lalframe",
".",
"FrSetMode",
"(",
"stream",
".",
"mode",
",",
"stream",
")",
"# determine duration of data",
"if",
"type",
"(",
"channels",
")",
"is",
"list",
":",
"first_channel",
"=",
"channels",
"[",
"0",
"]",
"else",
":",
"first_channel",
"=",
"channels",
"data_length",
"=",
"lalframe",
".",
"FrStreamGetVectorLength",
"(",
"first_channel",
",",
"stream",
")",
"channel_type",
"=",
"lalframe",
".",
"FrStreamGetTimeSeriesType",
"(",
"first_channel",
",",
"stream",
")",
"create_series_func",
"=",
"_fr_type_map",
"[",
"channel_type",
"]",
"[",
"2",
"]",
"get_series_metadata_func",
"=",
"_fr_type_map",
"[",
"channel_type",
"]",
"[",
"3",
"]",
"series",
"=",
"create_series_func",
"(",
"first_channel",
",",
"stream",
".",
"epoch",
",",
"0",
",",
"0",
",",
"lal",
".",
"ADCCountUnit",
",",
"0",
")",
"get_series_metadata_func",
"(",
"series",
",",
"stream",
")",
"data_duration",
"=",
"data_length",
"*",
"series",
".",
"deltaT",
"if",
"start_time",
"is",
"None",
":",
"start_time",
"=",
"stream",
".",
"epoch",
"*",
"1",
"if",
"end_time",
"is",
"None",
":",
"end_time",
"=",
"start_time",
"+",
"data_duration",
"if",
"type",
"(",
"start_time",
")",
"is",
"not",
"lal",
".",
"LIGOTimeGPS",
":",
"start_time",
"=",
"lal",
".",
"LIGOTimeGPS",
"(",
"start_time",
")",
"if",
"type",
"(",
"end_time",
")",
"is",
"not",
"lal",
".",
"LIGOTimeGPS",
":",
"end_time",
"=",
"lal",
".",
"LIGOTimeGPS",
"(",
"end_time",
")",
"if",
"duration",
"is",
"None",
":",
"duration",
"=",
"float",
"(",
"end_time",
"-",
"start_time",
")",
"else",
":",
"duration",
"=",
"float",
"(",
"duration",
")",
"# lalframe behaves dangerously with invalid duration so catch it here",
"if",
"duration",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"Negative or null duration\"",
")",
"#if duration > data_duration:",
"# raise ValueError(\"Requested duration longer than available data\")",
"if",
"type",
"(",
"channels",
")",
"is",
"list",
":",
"all_data",
"=",
"[",
"]",
"for",
"channel",
"in",
"channels",
":",
"channel_data",
"=",
"_read_channel",
"(",
"channel",
",",
"stream",
",",
"start_time",
",",
"duration",
")",
"lalframe",
".",
"FrStreamSeek",
"(",
"stream",
",",
"start_time",
")",
"all_data",
".",
"append",
"(",
"channel_data",
")",
"return",
"all_data",
"else",
":",
"return",
"_read_channel",
"(",
"channels",
",",
"stream",
",",
"start_time",
",",
"duration",
")"
] | Read time series from frame data.
Using the `location`, which can either be a frame file ".gwf" or a
frame cache ".gwf", read in the data for the given channel(s) and output
as a TimeSeries or list of TimeSeries.
Parameters
----------
location : string
A source of gravitational wave frames. Either a frame filename
(can include pattern), a list of frame files, or frame cache file.
channels : string or list of strings
Either a string that contains the channel name or a list of channel
name strings.
start_time : {None, LIGOTimeGPS}, optional
The gps start time of the time series. Defaults to reading from the
beginning of the available frame(s).
end_time : {None, LIGOTimeGPS}, optional
The gps end time of the time series. Defaults to the end of the frame.
Note, this argument is incompatible with `duration`.
duration : {None, float}, optional
The amount of data to read in seconds. Note, this argument is
incompatible with `end`.
check_integrity : {True, bool}, optional
Test the frame files for internal integrity.
sieve : string, optional
Selects only frames where the frame URL matches the regular
expression sieve
Returns
-------
Frame Data: TimeSeries or list of TimeSeries
A TimeSeries or a list of TimeSeries, corresponding to the data from
the frame file/cache for a given channel or channels. | [
"Read",
"time",
"series",
"from",
"frame",
"data",
"."
] | python | train |
saltstack/salt | salt/modules/pcs.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/pcs.py#L214-L235 | def cluster_node_add(node, extra_args=None):
'''
Add a node to the pacemaker cluster via pcs command
node
node that should be added
extra_args
list of extra option for the \'pcs cluster node add\' command
CLI Example:
.. code-block:: bash
salt '*' pcs.cluster_node_add node=node2.example.org
'''
cmd = ['pcs', 'cluster', 'node', 'add']
cmd += [node]
if isinstance(extra_args, (list, tuple)):
cmd += extra_args
return __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) | [
"def",
"cluster_node_add",
"(",
"node",
",",
"extra_args",
"=",
"None",
")",
":",
"cmd",
"=",
"[",
"'pcs'",
",",
"'cluster'",
",",
"'node'",
",",
"'add'",
"]",
"cmd",
"+=",
"[",
"node",
"]",
"if",
"isinstance",
"(",
"extra_args",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"cmd",
"+=",
"extra_args",
"return",
"__salt__",
"[",
"'cmd.run_all'",
"]",
"(",
"cmd",
",",
"output_loglevel",
"=",
"'trace'",
",",
"python_shell",
"=",
"False",
")"
] | Add a node to the pacemaker cluster via pcs command
node
node that should be added
extra_args
list of extra option for the \'pcs cluster node add\' command
CLI Example:
.. code-block:: bash
salt '*' pcs.cluster_node_add node=node2.example.org | [
"Add",
"a",
"node",
"to",
"the",
"pacemaker",
"cluster",
"via",
"pcs",
"command"
] | python | train |
fermiPy/fermipy | fermipy/utils.py | https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/utils.py#L1068-L1075 | def val_to_edge(edges, x):
"""Convert axis coordinate to bin index."""
edges = np.array(edges)
w = edges[1:] - edges[:-1]
w = np.insert(w, 0, w[0])
ibin = np.digitize(np.array(x, ndmin=1), edges - 0.5 * w) - 1
ibin[ibin < 0] = 0
return ibin | [
"def",
"val_to_edge",
"(",
"edges",
",",
"x",
")",
":",
"edges",
"=",
"np",
".",
"array",
"(",
"edges",
")",
"w",
"=",
"edges",
"[",
"1",
":",
"]",
"-",
"edges",
"[",
":",
"-",
"1",
"]",
"w",
"=",
"np",
".",
"insert",
"(",
"w",
",",
"0",
",",
"w",
"[",
"0",
"]",
")",
"ibin",
"=",
"np",
".",
"digitize",
"(",
"np",
".",
"array",
"(",
"x",
",",
"ndmin",
"=",
"1",
")",
",",
"edges",
"-",
"0.5",
"*",
"w",
")",
"-",
"1",
"ibin",
"[",
"ibin",
"<",
"0",
"]",
"=",
"0",
"return",
"ibin"
] | Convert axis coordinate to bin index. | [
"Convert",
"axis",
"coordinate",
"to",
"bin",
"index",
"."
] | python | train |
docker/docker-py | docker/models/containers.py | https://github.com/docker/docker-py/blob/613d6aad83acc9931ff2ecfd6a6c7bd8061dc125/docker/models/containers.py#L349-L360 | def rename(self, name):
"""
Rename this container. Similar to the ``docker rename`` command.
Args:
name (str): New name for the container
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.rename(self.id, name) | [
"def",
"rename",
"(",
"self",
",",
"name",
")",
":",
"return",
"self",
".",
"client",
".",
"api",
".",
"rename",
"(",
"self",
".",
"id",
",",
"name",
")"
] | Rename this container. Similar to the ``docker rename`` command.
Args:
name (str): New name for the container
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | [
"Rename",
"this",
"container",
".",
"Similar",
"to",
"the",
"docker",
"rename",
"command",
"."
] | python | train |
alex-kostirin/pyatomac | atomac/ldtpd/menu.py | https://github.com/alex-kostirin/pyatomac/blob/3f46f6feb4504315eec07abb18bb41be4d257aeb/atomac/ldtpd/menu.py#L93-L114 | def menuitemenabled(self, window_name, object_name):
"""
Verify a menu item is enabled
@param window_name: Window name to look for, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to look for, either full name,
LDTP's name convention, or a Unix glob. Or menu heirarchy
@type object_name: string
@return: 1 on success.
@rtype: integer
"""
try:
menu_handle = self._get_menu_handle(window_name, object_name,
False)
if menu_handle.AXEnabled:
return 1
except LdtpServerException:
pass
return 0 | [
"def",
"menuitemenabled",
"(",
"self",
",",
"window_name",
",",
"object_name",
")",
":",
"try",
":",
"menu_handle",
"=",
"self",
".",
"_get_menu_handle",
"(",
"window_name",
",",
"object_name",
",",
"False",
")",
"if",
"menu_handle",
".",
"AXEnabled",
":",
"return",
"1",
"except",
"LdtpServerException",
":",
"pass",
"return",
"0"
] | Verify a menu item is enabled
@param window_name: Window name to look for, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to look for, either full name,
LDTP's name convention, or a Unix glob. Or menu heirarchy
@type object_name: string
@return: 1 on success.
@rtype: integer | [
"Verify",
"a",
"menu",
"item",
"is",
"enabled"
] | python | valid |
pandas-dev/pandas | pandas/core/generic.py | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L4006-L4096 | def sort_values(self, by=None, axis=0, ascending=True, inplace=False,
kind='quicksort', na_position='last'):
"""
Sort by the values along either axis.
Parameters
----------%(optional_by)s
axis : %(axes_single_arg)s, default 0
Axis to be sorted.
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of
the by.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See also ndarray.np.sort for more
information. `mergesort` is the only stable algorithm. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
Puts NaNs at the beginning if `first`; `last` puts NaNs at the
end.
Returns
-------
sorted_obj : DataFrame or None
DataFrame with sorted values if inplace=False, None otherwise.
Examples
--------
>>> df = pd.DataFrame({
... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'],
... 'col2': [2, 1, 9, 8, 7, 4],
... 'col3': [0, 1, 9, 4, 2, 3],
... })
>>> df
col1 col2 col3
0 A 2 0
1 A 1 1
2 B 9 9
3 NaN 8 4
4 D 7 2
5 C 4 3
Sort by col1
>>> df.sort_values(by=['col1'])
col1 col2 col3
0 A 2 0
1 A 1 1
2 B 9 9
5 C 4 3
4 D 7 2
3 NaN 8 4
Sort by multiple columns
>>> df.sort_values(by=['col1', 'col2'])
col1 col2 col3
1 A 1 1
0 A 2 0
2 B 9 9
5 C 4 3
4 D 7 2
3 NaN 8 4
Sort Descending
>>> df.sort_values(by='col1', ascending=False)
col1 col2 col3
4 D 7 2
5 C 4 3
2 B 9 9
0 A 2 0
1 A 1 1
3 NaN 8 4
Putting NAs first
>>> df.sort_values(by='col1', ascending=False, na_position='first')
col1 col2 col3
3 NaN 8 4
4 D 7 2
5 C 4 3
2 B 9 9
0 A 2 0
1 A 1 1
"""
raise NotImplementedError("sort_values has not been implemented "
"on Panel or Panel4D objects.") | [
"def",
"sort_values",
"(",
"self",
",",
"by",
"=",
"None",
",",
"axis",
"=",
"0",
",",
"ascending",
"=",
"True",
",",
"inplace",
"=",
"False",
",",
"kind",
"=",
"'quicksort'",
",",
"na_position",
"=",
"'last'",
")",
":",
"raise",
"NotImplementedError",
"(",
"\"sort_values has not been implemented \"",
"\"on Panel or Panel4D objects.\"",
")"
] | Sort by the values along either axis.
Parameters
----------%(optional_by)s
axis : %(axes_single_arg)s, default 0
Axis to be sorted.
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of
the by.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See also ndarray.np.sort for more
information. `mergesort` is the only stable algorithm. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
Puts NaNs at the beginning if `first`; `last` puts NaNs at the
end.
Returns
-------
sorted_obj : DataFrame or None
DataFrame with sorted values if inplace=False, None otherwise.
Examples
--------
>>> df = pd.DataFrame({
... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'],
... 'col2': [2, 1, 9, 8, 7, 4],
... 'col3': [0, 1, 9, 4, 2, 3],
... })
>>> df
col1 col2 col3
0 A 2 0
1 A 1 1
2 B 9 9
3 NaN 8 4
4 D 7 2
5 C 4 3
Sort by col1
>>> df.sort_values(by=['col1'])
col1 col2 col3
0 A 2 0
1 A 1 1
2 B 9 9
5 C 4 3
4 D 7 2
3 NaN 8 4
Sort by multiple columns
>>> df.sort_values(by=['col1', 'col2'])
col1 col2 col3
1 A 1 1
0 A 2 0
2 B 9 9
5 C 4 3
4 D 7 2
3 NaN 8 4
Sort Descending
>>> df.sort_values(by='col1', ascending=False)
col1 col2 col3
4 D 7 2
5 C 4 3
2 B 9 9
0 A 2 0
1 A 1 1
3 NaN 8 4
Putting NAs first
>>> df.sort_values(by='col1', ascending=False, na_position='first')
col1 col2 col3
3 NaN 8 4
4 D 7 2
5 C 4 3
2 B 9 9
0 A 2 0
1 A 1 1 | [
"Sort",
"by",
"the",
"values",
"along",
"either",
"axis",
"."
] | python | train |
fastai/fastai | fastai/callbacks/tensorboard.py | https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/callbacks/tensorboard.py#L387-L392 | def write(self)->None:
"Writes original, generated and real(target) images to Tensorboard."
orig_images, gen_images, real_images = self._get_image_tensors()
self._write_images(name='orig images', images=orig_images)
self._write_images(name='gen images', images=gen_images)
self._write_images(name='real images', images=real_images) | [
"def",
"write",
"(",
"self",
")",
"->",
"None",
":",
"orig_images",
",",
"gen_images",
",",
"real_images",
"=",
"self",
".",
"_get_image_tensors",
"(",
")",
"self",
".",
"_write_images",
"(",
"name",
"=",
"'orig images'",
",",
"images",
"=",
"orig_images",
")",
"self",
".",
"_write_images",
"(",
"name",
"=",
"'gen images'",
",",
"images",
"=",
"gen_images",
")",
"self",
".",
"_write_images",
"(",
"name",
"=",
"'real images'",
",",
"images",
"=",
"real_images",
")"
] | Writes original, generated and real(target) images to Tensorboard. | [
"Writes",
"original",
"generated",
"and",
"real",
"(",
"target",
")",
"images",
"to",
"Tensorboard",
"."
] | python | train |
biolink/ontobio | ontobio/sim/api/interfaces.py | https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/sim/api/interfaces.py#L44-L55 | def filtered_search(self,
id_list: Iterable,
negated_classes: Iterable,
limit: Optional[int],
taxon_filter: Optional,
category_filter: Optional,
method: Optional) -> SimResult:
"""
Given an input iterable of classes or individuals,
provides a ranking of similar profiles
"""
pass | [
"def",
"filtered_search",
"(",
"self",
",",
"id_list",
":",
"Iterable",
",",
"negated_classes",
":",
"Iterable",
",",
"limit",
":",
"Optional",
"[",
"int",
"]",
",",
"taxon_filter",
":",
"Optional",
",",
"category_filter",
":",
"Optional",
",",
"method",
":",
"Optional",
")",
"->",
"SimResult",
":",
"pass"
] | Given an input iterable of classes or individuals,
provides a ranking of similar profiles | [
"Given",
"an",
"input",
"iterable",
"of",
"classes",
"or",
"individuals",
"provides",
"a",
"ranking",
"of",
"similar",
"profiles"
] | python | train |
internetarchive/brozzler | brozzler/ydl.py | https://github.com/internetarchive/brozzler/blob/411b3f266a38b9bb942021c0121ebd8e5ca66447/brozzler/ydl.py#L357-L387 | def do_youtube_dl(worker, site, page):
'''
Runs youtube-dl configured for `worker` and `site` to download videos from
`page`.
Args:
worker (brozzler.BrozzlerWorker): the calling brozzler worker
site (brozzler.Site): the site we are brozzling
page (brozzler.Page): the page we are brozzling
Returns:
tuple with two entries:
`list` of `dict`: with info about urls fetched:
[{
'url': ...,
'method': ...,
'response_code': ...,
'response_headers': ...,
}, ...]
`list` of `str`: outlink urls
'''
with tempfile.TemporaryDirectory(prefix='brzl-ydl-') as tempdir:
ydl = _build_youtube_dl(worker, tempdir, site)
ie_result = _try_youtube_dl(worker, ydl, site, page)
outlinks = set()
if ie_result and ie_result.get('extractor') == 'youtube:playlist':
# youtube watch pages as outlinks
outlinks = {'https://www.youtube.com/watch?v=%s' % e['id']
for e in ie_result.get('entries_no_dl', [])}
# any outlinks for other cases?
return ydl.fetch_spy.fetches, outlinks | [
"def",
"do_youtube_dl",
"(",
"worker",
",",
"site",
",",
"page",
")",
":",
"with",
"tempfile",
".",
"TemporaryDirectory",
"(",
"prefix",
"=",
"'brzl-ydl-'",
")",
"as",
"tempdir",
":",
"ydl",
"=",
"_build_youtube_dl",
"(",
"worker",
",",
"tempdir",
",",
"site",
")",
"ie_result",
"=",
"_try_youtube_dl",
"(",
"worker",
",",
"ydl",
",",
"site",
",",
"page",
")",
"outlinks",
"=",
"set",
"(",
")",
"if",
"ie_result",
"and",
"ie_result",
".",
"get",
"(",
"'extractor'",
")",
"==",
"'youtube:playlist'",
":",
"# youtube watch pages as outlinks",
"outlinks",
"=",
"{",
"'https://www.youtube.com/watch?v=%s'",
"%",
"e",
"[",
"'id'",
"]",
"for",
"e",
"in",
"ie_result",
".",
"get",
"(",
"'entries_no_dl'",
",",
"[",
"]",
")",
"}",
"# any outlinks for other cases?",
"return",
"ydl",
".",
"fetch_spy",
".",
"fetches",
",",
"outlinks"
] | Runs youtube-dl configured for `worker` and `site` to download videos from
`page`.
Args:
worker (brozzler.BrozzlerWorker): the calling brozzler worker
site (brozzler.Site): the site we are brozzling
page (brozzler.Page): the page we are brozzling
Returns:
tuple with two entries:
`list` of `dict`: with info about urls fetched:
[{
'url': ...,
'method': ...,
'response_code': ...,
'response_headers': ...,
}, ...]
`list` of `str`: outlink urls | [
"Runs",
"youtube",
"-",
"dl",
"configured",
"for",
"worker",
"and",
"site",
"to",
"download",
"videos",
"from",
"page",
"."
] | python | train |
tensorflow/probability | tensorflow_probability/python/internal/nest_util.py | https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/nest_util.py#L76-L79 | def expand_as_args(args):
"""Returns `True` if `args` should be expanded as `*args`."""
return (isinstance(args, collections.Sequence) and
not _is_namedtuple(args) and not _force_leaf(args)) | [
"def",
"expand_as_args",
"(",
"args",
")",
":",
"return",
"(",
"isinstance",
"(",
"args",
",",
"collections",
".",
"Sequence",
")",
"and",
"not",
"_is_namedtuple",
"(",
"args",
")",
"and",
"not",
"_force_leaf",
"(",
"args",
")",
")"
] | Returns `True` if `args` should be expanded as `*args`. | [
"Returns",
"True",
"if",
"args",
"should",
"be",
"expanded",
"as",
"*",
"args",
"."
] | python | test |
saltstack/salt | salt/modules/nspawn.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/nspawn.py#L248-L268 | def _ensure_systemd(version):
'''
Raises an exception if the systemd version is not greater than the
passed version.
'''
try:
version = int(version)
except ValueError:
raise CommandExecutionError('Invalid version \'{0}\''.format(version))
try:
installed = _sd_version()
log.debug('nspawn: detected systemd %s', installed)
except (IndexError, ValueError):
raise CommandExecutionError('nspawn: Unable to get systemd version')
if installed < version:
raise CommandExecutionError(
'This function requires systemd >= {0} '
'(Detected version: {1}).'.format(version, installed)
) | [
"def",
"_ensure_systemd",
"(",
"version",
")",
":",
"try",
":",
"version",
"=",
"int",
"(",
"version",
")",
"except",
"ValueError",
":",
"raise",
"CommandExecutionError",
"(",
"'Invalid version \\'{0}\\''",
".",
"format",
"(",
"version",
")",
")",
"try",
":",
"installed",
"=",
"_sd_version",
"(",
")",
"log",
".",
"debug",
"(",
"'nspawn: detected systemd %s'",
",",
"installed",
")",
"except",
"(",
"IndexError",
",",
"ValueError",
")",
":",
"raise",
"CommandExecutionError",
"(",
"'nspawn: Unable to get systemd version'",
")",
"if",
"installed",
"<",
"version",
":",
"raise",
"CommandExecutionError",
"(",
"'This function requires systemd >= {0} '",
"'(Detected version: {1}).'",
".",
"format",
"(",
"version",
",",
"installed",
")",
")"
] | Raises an exception if the systemd version is not greater than the
passed version. | [
"Raises",
"an",
"exception",
"if",
"the",
"systemd",
"version",
"is",
"not",
"greater",
"than",
"the",
"passed",
"version",
"."
] | python | train |
bigchaindb/bigchaindb | bigchaindb/common/schema/__init__.py | https://github.com/bigchaindb/bigchaindb/blob/835fdfcf598918f76139e3b88ee33dd157acaaa7/bigchaindb/common/schema/__init__.py#L71-L81 | def validate_transaction_schema(tx):
"""Validate a transaction dict.
TX_SCHEMA_COMMON contains properties that are common to all types of
transaction. TX_SCHEMA_[TRANSFER|CREATE] add additional constraints on top.
"""
_validate_schema(TX_SCHEMA_COMMON, tx)
if tx['operation'] == 'TRANSFER':
_validate_schema(TX_SCHEMA_TRANSFER, tx)
else:
_validate_schema(TX_SCHEMA_CREATE, tx) | [
"def",
"validate_transaction_schema",
"(",
"tx",
")",
":",
"_validate_schema",
"(",
"TX_SCHEMA_COMMON",
",",
"tx",
")",
"if",
"tx",
"[",
"'operation'",
"]",
"==",
"'TRANSFER'",
":",
"_validate_schema",
"(",
"TX_SCHEMA_TRANSFER",
",",
"tx",
")",
"else",
":",
"_validate_schema",
"(",
"TX_SCHEMA_CREATE",
",",
"tx",
")"
] | Validate a transaction dict.
TX_SCHEMA_COMMON contains properties that are common to all types of
transaction. TX_SCHEMA_[TRANSFER|CREATE] add additional constraints on top. | [
"Validate",
"a",
"transaction",
"dict",
"."
] | python | train |
neovim/pynvim | pynvim/api/nvim.py | https://github.com/neovim/pynvim/blob/5e577188e6d7133f597ad0ce60dc6a4b1314064a/pynvim/api/nvim.py#L301-L320 | def exec_lua(self, code, *args, **kwargs):
"""Execute lua code.
Additional parameters are available as `...` inside the lua chunk.
Only statements are executed. To evaluate an expression, prefix it
with `return`: `return my_function(...)`
There is a shorthand syntax to call lua functions with arguments:
nvim.lua.func(1,2)
nvim.lua.mymod.myfunction(data, async_=True)
is equivalent to
nvim.exec_lua("return func(...)", 1, 2)
nvim.exec_lua("mymod.myfunction(...)", data, async_=True)
Note that with `async_=True` there is no return value.
"""
return self.request('nvim_execute_lua', code, args, **kwargs) | [
"def",
"exec_lua",
"(",
"self",
",",
"code",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"request",
"(",
"'nvim_execute_lua'",
",",
"code",
",",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Execute lua code.
Additional parameters are available as `...` inside the lua chunk.
Only statements are executed. To evaluate an expression, prefix it
with `return`: `return my_function(...)`
There is a shorthand syntax to call lua functions with arguments:
nvim.lua.func(1,2)
nvim.lua.mymod.myfunction(data, async_=True)
is equivalent to
nvim.exec_lua("return func(...)", 1, 2)
nvim.exec_lua("mymod.myfunction(...)", data, async_=True)
Note that with `async_=True` there is no return value. | [
"Execute",
"lua",
"code",
"."
] | python | train |
jashandeep-sohi/python-blowfish | blowfish.py | https://github.com/jashandeep-sohi/python-blowfish/blob/5ce7f6d54dcef7efd715b26f9a9ffee0d543047e/blowfish.py#L604-L655 | def decrypt_ecb_cts(self, data):
"""
Return an iterator that decrypts `data` using the Electronic Codebook with
Ciphertext Stealing (ECB-CTS) mode of operation.
ECB-CTS mode can only operate on `data` that is greater than 8 bytes in
length.
Each iteration, except the last, always returns a block-sized :obj:`bytes`
object (i.e. 8 bytes). The last iteration may return a :obj:`bytes` object
with a length less than the block-size, if `data` is not a multiple of the
block-size in length.
`data` should be a :obj:`bytes`-like object that is greater than 8 bytes in
length.
If it is not, a :exc:`ValueError` exception is raised.
"""
data_len = len(data)
if data_len <= 8:
raise ValueError("data is not greater than 8 bytes in length")
S1, S2, S3, S4 = self.S
P = self.P
u4_1_pack = self._u4_1_pack
u1_4_unpack = self._u1_4_unpack
u4_2_pack = self._u4_2_pack
u4_2_unpack = self._u4_2_unpack
decrypt = self._decrypt
extra_bytes = data_len % 8
last_block_stop_i = data_len - extra_bytes
cipher_L, cipher_R = u4_2_unpack(data[0:8])
plain_block = u4_2_pack(
*decrypt(cipher_L, cipher_R, P, S1, S2, S3, S4, u4_1_pack, u1_4_unpack)
)
for cipher_L, cipher_R in self._u4_2_iter_unpack(data[8:last_block_stop_i]):
yield plain_block
plain_block = u4_2_pack(
*decrypt(cipher_L, cipher_R, P, S1, S2, S3, S4, u4_1_pack, u1_4_unpack)
)
cipher_L, cipher_R = u4_2_unpack(
data[last_block_stop_i:] + plain_block[extra_bytes:]
)
yield u4_2_pack(
*decrypt(cipher_L, cipher_R, P, S1, S2, S3, S4, u4_1_pack, u1_4_unpack)
)
yield plain_block[:extra_bytes] | [
"def",
"decrypt_ecb_cts",
"(",
"self",
",",
"data",
")",
":",
"data_len",
"=",
"len",
"(",
"data",
")",
"if",
"data_len",
"<=",
"8",
":",
"raise",
"ValueError",
"(",
"\"data is not greater than 8 bytes in length\"",
")",
"S1",
",",
"S2",
",",
"S3",
",",
"S4",
"=",
"self",
".",
"S",
"P",
"=",
"self",
".",
"P",
"u4_1_pack",
"=",
"self",
".",
"_u4_1_pack",
"u1_4_unpack",
"=",
"self",
".",
"_u1_4_unpack",
"u4_2_pack",
"=",
"self",
".",
"_u4_2_pack",
"u4_2_unpack",
"=",
"self",
".",
"_u4_2_unpack",
"decrypt",
"=",
"self",
".",
"_decrypt",
"extra_bytes",
"=",
"data_len",
"%",
"8",
"last_block_stop_i",
"=",
"data_len",
"-",
"extra_bytes",
"cipher_L",
",",
"cipher_R",
"=",
"u4_2_unpack",
"(",
"data",
"[",
"0",
":",
"8",
"]",
")",
"plain_block",
"=",
"u4_2_pack",
"(",
"*",
"decrypt",
"(",
"cipher_L",
",",
"cipher_R",
",",
"P",
",",
"S1",
",",
"S2",
",",
"S3",
",",
"S4",
",",
"u4_1_pack",
",",
"u1_4_unpack",
")",
")",
"for",
"cipher_L",
",",
"cipher_R",
"in",
"self",
".",
"_u4_2_iter_unpack",
"(",
"data",
"[",
"8",
":",
"last_block_stop_i",
"]",
")",
":",
"yield",
"plain_block",
"plain_block",
"=",
"u4_2_pack",
"(",
"*",
"decrypt",
"(",
"cipher_L",
",",
"cipher_R",
",",
"P",
",",
"S1",
",",
"S2",
",",
"S3",
",",
"S4",
",",
"u4_1_pack",
",",
"u1_4_unpack",
")",
")",
"cipher_L",
",",
"cipher_R",
"=",
"u4_2_unpack",
"(",
"data",
"[",
"last_block_stop_i",
":",
"]",
"+",
"plain_block",
"[",
"extra_bytes",
":",
"]",
")",
"yield",
"u4_2_pack",
"(",
"*",
"decrypt",
"(",
"cipher_L",
",",
"cipher_R",
",",
"P",
",",
"S1",
",",
"S2",
",",
"S3",
",",
"S4",
",",
"u4_1_pack",
",",
"u1_4_unpack",
")",
")",
"yield",
"plain_block",
"[",
":",
"extra_bytes",
"]"
] | Return an iterator that decrypts `data` using the Electronic Codebook with
Ciphertext Stealing (ECB-CTS) mode of operation.
ECB-CTS mode can only operate on `data` that is greater than 8 bytes in
length.
Each iteration, except the last, always returns a block-sized :obj:`bytes`
object (i.e. 8 bytes). The last iteration may return a :obj:`bytes` object
with a length less than the block-size, if `data` is not a multiple of the
block-size in length.
`data` should be a :obj:`bytes`-like object that is greater than 8 bytes in
length.
If it is not, a :exc:`ValueError` exception is raised. | [
"Return",
"an",
"iterator",
"that",
"decrypts",
"data",
"using",
"the",
"Electronic",
"Codebook",
"with",
"Ciphertext",
"Stealing",
"(",
"ECB",
"-",
"CTS",
")",
"mode",
"of",
"operation",
".",
"ECB",
"-",
"CTS",
"mode",
"can",
"only",
"operate",
"on",
"data",
"that",
"is",
"greater",
"than",
"8",
"bytes",
"in",
"length",
".",
"Each",
"iteration",
"except",
"the",
"last",
"always",
"returns",
"a",
"block",
"-",
"sized",
":",
"obj",
":",
"bytes",
"object",
"(",
"i",
".",
"e",
".",
"8",
"bytes",
")",
".",
"The",
"last",
"iteration",
"may",
"return",
"a",
":",
"obj",
":",
"bytes",
"object",
"with",
"a",
"length",
"less",
"than",
"the",
"block",
"-",
"size",
"if",
"data",
"is",
"not",
"a",
"multiple",
"of",
"the",
"block",
"-",
"size",
"in",
"length",
".",
"data",
"should",
"be",
"a",
":",
"obj",
":",
"bytes",
"-",
"like",
"object",
"that",
"is",
"greater",
"than",
"8",
"bytes",
"in",
"length",
".",
"If",
"it",
"is",
"not",
"a",
":",
"exc",
":",
"ValueError",
"exception",
"is",
"raised",
"."
] | python | train |
mathiasertl/xmpp-backends | xmpp_backends/base.py | https://github.com/mathiasertl/xmpp-backends/blob/214ef0664dbf90fa300c2483b9b3416559e5d171/xmpp_backends/base.py#L194-L224 | def datetime_to_timestamp(self, dt):
"""Helper function to convert a datetime object to a timestamp.
If datetime instance ``dt`` is naive, it is assumed that it is in UTC.
In Python 3, this just calls ``datetime.timestamp()``, in Python 2, it substracts any timezone offset
and returns the difference since 1970-01-01 00:00:00.
Note that the function always returns an int, even in Python 3.
>>> XmppBackendBase().datetime_to_timestamp(datetime(2017, 9, 17, 19, 59))
1505678340
>>> XmppBackendBase().datetime_to_timestamp(datetime(1984, 11, 6, 13, 21))
468595260
:param dt: The datetime object to convert. If ``None``, returns the current time.
:type dt: datetime
:return: The seconds in UTC.
:rtype: int
"""
if dt is None:
return int(time.time())
if six.PY3:
if not dt.tzinfo:
dt = pytz.utc.localize(dt)
return int(dt.timestamp())
else:
if dt.tzinfo:
dt = dt.replace(tzinfo=None) - dt.utcoffset()
return int((dt - datetime(1970, 1, 1)).total_seconds()) | [
"def",
"datetime_to_timestamp",
"(",
"self",
",",
"dt",
")",
":",
"if",
"dt",
"is",
"None",
":",
"return",
"int",
"(",
"time",
".",
"time",
"(",
")",
")",
"if",
"six",
".",
"PY3",
":",
"if",
"not",
"dt",
".",
"tzinfo",
":",
"dt",
"=",
"pytz",
".",
"utc",
".",
"localize",
"(",
"dt",
")",
"return",
"int",
"(",
"dt",
".",
"timestamp",
"(",
")",
")",
"else",
":",
"if",
"dt",
".",
"tzinfo",
":",
"dt",
"=",
"dt",
".",
"replace",
"(",
"tzinfo",
"=",
"None",
")",
"-",
"dt",
".",
"utcoffset",
"(",
")",
"return",
"int",
"(",
"(",
"dt",
"-",
"datetime",
"(",
"1970",
",",
"1",
",",
"1",
")",
")",
".",
"total_seconds",
"(",
")",
")"
] | Helper function to convert a datetime object to a timestamp.
If datetime instance ``dt`` is naive, it is assumed that it is in UTC.
In Python 3, this just calls ``datetime.timestamp()``, in Python 2, it substracts any timezone offset
and returns the difference since 1970-01-01 00:00:00.
Note that the function always returns an int, even in Python 3.
>>> XmppBackendBase().datetime_to_timestamp(datetime(2017, 9, 17, 19, 59))
1505678340
>>> XmppBackendBase().datetime_to_timestamp(datetime(1984, 11, 6, 13, 21))
468595260
:param dt: The datetime object to convert. If ``None``, returns the current time.
:type dt: datetime
:return: The seconds in UTC.
:rtype: int | [
"Helper",
"function",
"to",
"convert",
"a",
"datetime",
"object",
"to",
"a",
"timestamp",
"."
] | python | train |
Calysto/calysto | calysto/ai/conx.py | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L1137-L1144 | def getActivationsDict(self, nameList):
"""
Returns a dictionary of layer names that map to a list of activations.
"""
retval = {}
for name in nameList:
retval[name] = self.layersByName[name].getActivationsList()
return retval | [
"def",
"getActivationsDict",
"(",
"self",
",",
"nameList",
")",
":",
"retval",
"=",
"{",
"}",
"for",
"name",
"in",
"nameList",
":",
"retval",
"[",
"name",
"]",
"=",
"self",
".",
"layersByName",
"[",
"name",
"]",
".",
"getActivationsList",
"(",
")",
"return",
"retval"
] | Returns a dictionary of layer names that map to a list of activations. | [
"Returns",
"a",
"dictionary",
"of",
"layer",
"names",
"that",
"map",
"to",
"a",
"list",
"of",
"activations",
"."
] | python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.