repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
biolink/ontobio | ontobio/assocmodel.py | https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/assocmodel.py#L319-L331 | def label(self, id):
"""
return label for a subject id
Will make use of both the ontology and the association set
"""
if self.ontology is not None:
label = self.ontology.label(id)
if label is not None:
return label
if self.subject_label_map is not None and id in self.subject_label_map:
return self.subject_label_map[id]
return None | [
"def",
"label",
"(",
"self",
",",
"id",
")",
":",
"if",
"self",
".",
"ontology",
"is",
"not",
"None",
":",
"label",
"=",
"self",
".",
"ontology",
".",
"label",
"(",
"id",
")",
"if",
"label",
"is",
"not",
"None",
":",
"return",
"label",
"if",
"self",
".",
"subject_label_map",
"is",
"not",
"None",
"and",
"id",
"in",
"self",
".",
"subject_label_map",
":",
"return",
"self",
".",
"subject_label_map",
"[",
"id",
"]",
"return",
"None"
] | return label for a subject id
Will make use of both the ontology and the association set | [
"return",
"label",
"for",
"a",
"subject",
"id"
] | python | train |
klahnakoski/mo-logs | mo_logs/__init__.py | https://github.com/klahnakoski/mo-logs/blob/0971277ac9caf28a755b766b70621916957d4fea/mo_logs/__init__.py#L42-L98 | def start(cls, settings=None):
"""
RUN ME FIRST TO SETUP THE THREADED LOGGING
http://victorlin.me/2012/08/good-logging-practice-in-python/
log - LIST OF PARAMETERS FOR LOGGER(S)
trace - SHOW MORE DETAILS IN EVERY LOG LINE (default False)
cprofile - True==ENABLE THE C-PROFILER THAT COMES WITH PYTHON (default False)
USE THE LONG FORM TO SET THE FILENAME {"enabled": True, "filename": "cprofile.tab"}
profile - True==ENABLE pyLibrary SIMPLE PROFILING (default False) (eg with Profiler("some description"):)
USE THE LONG FORM TO SET FILENAME {"enabled": True, "filename": "profile.tab"}
constants - UPDATE MODULE CONSTANTS AT STARTUP (PRIMARILY INTENDED TO CHANGE DEBUG STATE)
"""
global _Thread
if not settings:
return
settings = wrap(settings)
Log.stop()
cls.settings = settings
cls.trace = coalesce(settings.trace, False)
if cls.trace:
from mo_threads import Thread as _Thread
_ = _Thread
# ENABLE CPROFILE
if settings.cprofile is False:
settings.cprofile = {"enabled": False}
elif settings.cprofile is True:
if isinstance(settings.cprofile, bool):
settings.cprofile = {"enabled": True, "filename": "cprofile.tab"}
if settings.cprofile.enabled:
from mo_threads import profiles
profiles.enable_profilers(settings.cprofile.filename)
if settings.profile is True or (is_data(settings.profile) and settings.profile.enabled):
Log.error("REMOVED 2018-09-02, Activedata revision 3f30ff46f5971776f8ba18")
# from mo_logs import profiles
#
# if isinstance(settings.profile, bool):
# profiles.ON = True
# settings.profile = {"enabled": True, "filename": "profile.tab"}
#
# if settings.profile.enabled:
# profiles.ON = True
if settings.constants:
constants.set(settings.constants)
if settings.log:
cls.logging_multi = StructuredLogger_usingMulti()
for log in listwrap(settings.log):
Log.add_log(Log.new_instance(log))
from mo_logs.log_usingThread import StructuredLogger_usingThread
cls.main_log = StructuredLogger_usingThread(cls.logging_multi) | [
"def",
"start",
"(",
"cls",
",",
"settings",
"=",
"None",
")",
":",
"global",
"_Thread",
"if",
"not",
"settings",
":",
"return",
"settings",
"=",
"wrap",
"(",
"settings",
")",
"Log",
".",
"stop",
"(",
")",
"cls",
".",
"settings",
"=",
"settings",
"cls",
".",
"trace",
"=",
"coalesce",
"(",
"settings",
".",
"trace",
",",
"False",
")",
"if",
"cls",
".",
"trace",
":",
"from",
"mo_threads",
"import",
"Thread",
"as",
"_Thread",
"_",
"=",
"_Thread",
"# ENABLE CPROFILE",
"if",
"settings",
".",
"cprofile",
"is",
"False",
":",
"settings",
".",
"cprofile",
"=",
"{",
"\"enabled\"",
":",
"False",
"}",
"elif",
"settings",
".",
"cprofile",
"is",
"True",
":",
"if",
"isinstance",
"(",
"settings",
".",
"cprofile",
",",
"bool",
")",
":",
"settings",
".",
"cprofile",
"=",
"{",
"\"enabled\"",
":",
"True",
",",
"\"filename\"",
":",
"\"cprofile.tab\"",
"}",
"if",
"settings",
".",
"cprofile",
".",
"enabled",
":",
"from",
"mo_threads",
"import",
"profiles",
"profiles",
".",
"enable_profilers",
"(",
"settings",
".",
"cprofile",
".",
"filename",
")",
"if",
"settings",
".",
"profile",
"is",
"True",
"or",
"(",
"is_data",
"(",
"settings",
".",
"profile",
")",
"and",
"settings",
".",
"profile",
".",
"enabled",
")",
":",
"Log",
".",
"error",
"(",
"\"REMOVED 2018-09-02, Activedata revision 3f30ff46f5971776f8ba18\"",
")",
"# from mo_logs import profiles",
"#",
"# if isinstance(settings.profile, bool):",
"# profiles.ON = True",
"# settings.profile = {\"enabled\": True, \"filename\": \"profile.tab\"}",
"#",
"# if settings.profile.enabled:",
"# profiles.ON = True",
"if",
"settings",
".",
"constants",
":",
"constants",
".",
"set",
"(",
"settings",
".",
"constants",
")",
"if",
"settings",
".",
"log",
":",
"cls",
".",
"logging_multi",
"=",
"StructuredLogger_usingMulti",
"(",
")",
"for",
"log",
"in",
"listwrap",
"(",
"settings",
".",
"log",
")",
":",
"Log",
".",
"add_log",
"(",
"Log",
".",
"new_instance",
"(",
"log",
")",
")",
"from",
"mo_logs",
".",
"log_usingThread",
"import",
"StructuredLogger_usingThread",
"cls",
".",
"main_log",
"=",
"StructuredLogger_usingThread",
"(",
"cls",
".",
"logging_multi",
")"
] | RUN ME FIRST TO SETUP THE THREADED LOGGING
http://victorlin.me/2012/08/good-logging-practice-in-python/
log - LIST OF PARAMETERS FOR LOGGER(S)
trace - SHOW MORE DETAILS IN EVERY LOG LINE (default False)
cprofile - True==ENABLE THE C-PROFILER THAT COMES WITH PYTHON (default False)
USE THE LONG FORM TO SET THE FILENAME {"enabled": True, "filename": "cprofile.tab"}
profile - True==ENABLE pyLibrary SIMPLE PROFILING (default False) (eg with Profiler("some description"):)
USE THE LONG FORM TO SET FILENAME {"enabled": True, "filename": "profile.tab"}
constants - UPDATE MODULE CONSTANTS AT STARTUP (PRIMARILY INTENDED TO CHANGE DEBUG STATE) | [
"RUN",
"ME",
"FIRST",
"TO",
"SETUP",
"THE",
"THREADED",
"LOGGING",
"http",
":",
"//",
"victorlin",
".",
"me",
"/",
"2012",
"/",
"08",
"/",
"good",
"-",
"logging",
"-",
"practice",
"-",
"in",
"-",
"python",
"/"
] | python | train |
QuantEcon/QuantEcon.py | quantecon/kalman.py | https://github.com/QuantEcon/QuantEcon.py/blob/26a66c552f2a73967d7efb6e1f4b4c4985a12643/quantecon/kalman.py#L213-L226 | def filtered_to_forecast(self):
"""
Updates the moments of the time t filtering distribution to the
moments of the predictive distribution, which becomes the time
t+1 prior
"""
# === simplify notation === #
A, C = self.ss.A, self.ss.C
Q = np.dot(C, C.T)
# === and then update === #
self.x_hat = dot(A, self.x_hat)
self.Sigma = dot(A, dot(self.Sigma, A.T)) + Q | [
"def",
"filtered_to_forecast",
"(",
"self",
")",
":",
"# === simplify notation === #",
"A",
",",
"C",
"=",
"self",
".",
"ss",
".",
"A",
",",
"self",
".",
"ss",
".",
"C",
"Q",
"=",
"np",
".",
"dot",
"(",
"C",
",",
"C",
".",
"T",
")",
"# === and then update === #",
"self",
".",
"x_hat",
"=",
"dot",
"(",
"A",
",",
"self",
".",
"x_hat",
")",
"self",
".",
"Sigma",
"=",
"dot",
"(",
"A",
",",
"dot",
"(",
"self",
".",
"Sigma",
",",
"A",
".",
"T",
")",
")",
"+",
"Q"
] | Updates the moments of the time t filtering distribution to the
moments of the predictive distribution, which becomes the time
t+1 prior | [
"Updates",
"the",
"moments",
"of",
"the",
"time",
"t",
"filtering",
"distribution",
"to",
"the",
"moments",
"of",
"the",
"predictive",
"distribution",
"which",
"becomes",
"the",
"time",
"t",
"+",
"1",
"prior"
] | python | train |
openclimatedata/pymagicc | pymagicc/definitions/__init__.py | https://github.com/openclimatedata/pymagicc/blob/d896014832cf458d1e95e5878fd6d5961f3e2e05/pymagicc/definitions/__init__.py#L212-L369 | def get_magicc7_to_openscm_variable_mapping(inverse=False):
"""Get the mappings from MAGICC7 to OpenSCM variables.
Parameters
----------
inverse : bool
If True, return the inverse mappings i.e. OpenSCM to MAGICC7 mappings
Returns
-------
dict
Dictionary of mappings
"""
def get_openscm_replacement(in_var):
if in_var.endswith("_INVERSE_EMIS"):
prefix = "Inverse Emissions"
elif in_var.endswith("_EMIS"):
prefix = "Emissions"
elif in_var.endswith("_CONC"):
prefix = "Atmospheric Concentrations"
elif in_var.endswith("_RF"):
prefix = "Radiative Forcing"
elif in_var.endswith("_OT"):
prefix = "Optical Thickness"
else:
raise ValueError("This shouldn't happen")
variable = in_var.split("_")[0]
# I hate edge cases
if variable.endswith("EQ"):
variable = variable.replace("EQ", " Equivalent")
if "GHG" in variable:
variable = variable.replace("GHG", "Greenhouse Gases")
if "BIOMASSAER" in variable:
variable = variable.replace("BIOMASSAER", "Aerosols|MAGICC AFOLU")
if "CO2CH4N2O" in variable:
variable = variable.replace("CO2CH4N2O", "CO2, CH4 and N2O")
aggregate_indicators = {
"KYOTO": "Kyoto Gases",
"FGASSUM": "F Gases",
"MHALOSUM": "Montreal Protocol Halogen Gases",
}
for agg_indicator, long_name in aggregate_indicators.items():
if variable.startswith(agg_indicator):
stripped_var = variable.replace(agg_indicator, "")
if stripped_var:
variable = DATA_HIERARCHY_SEPARATOR.join([stripped_var, long_name])
else:
variable = long_name
edge_case_B = variable.upper() in ("HCFC141B", "HCFC142B")
if variable.endswith("I"):
variable = DATA_HIERARCHY_SEPARATOR.join(
[variable[:-1], "MAGICC Fossil and Industrial"]
)
elif variable.endswith("B") and not edge_case_B:
variable = DATA_HIERARCHY_SEPARATOR.join([variable[:-1], "MAGICC AFOLU"])
case_adjustments = {
"SOX": "SOx",
"NOX": "NOx",
"HFC134A": "HFC134a",
"HFC143A": "HFC143a",
"HFC152A": "HFC152a",
"HFC227EA": "HFC227ea",
"HFC236FA": "HFC236fa",
"HFC245FA": "HFC245fa",
"HFC365MFC": "HFC365mfc",
"HCFC141B": "HCFC141b",
"HCFC142B": "HCFC142b",
"CH3CCL3": "CH3CCl3",
"CCL4": "CCl4",
"CH3CL": "CH3Cl",
"CH2CL2": "CH2Cl2",
"CHCL3": "CHCl3",
"CH3BR": "CH3Br",
"HALON1211": "Halon1211",
"HALON1301": "Halon1301",
"HALON2402": "Halon2402",
"HALON1202": "Halon1202",
"SOLAR": "Solar",
"VOLCANIC": "Volcanic",
"EXTRA": "Extra",
}
variable = apply_string_substitutions(variable, case_adjustments)
return DATA_HIERARCHY_SEPARATOR.join([prefix, variable])
magicc7_suffixes = ["_EMIS", "_CONC", "_RF", "_OT", "_INVERSE_EMIS"]
magicc7_base_vars = MAGICC7_EMISSIONS_UNITS.magicc_variable.tolist() + [
"SOLAR",
"VOLCANIC",
"CO2EQ",
"KYOTOCO2EQ",
"FGASSUMHFC134AEQ",
"MHALOSUMCFC12EQ",
"GHG",
"KYOTOGHG",
"FGASSUM",
"MHALOSUM",
"BIOMASSAER",
"CO2CH4N2O",
"EXTRA",
]
magicc7_vars = [
base_var + suffix
for base_var in magicc7_base_vars
for suffix in magicc7_suffixes
]
replacements = {m7v: get_openscm_replacement(m7v) for m7v in magicc7_vars}
replacements.update(
{
"SURFACE_TEMP": "Surface Temperature",
"TOTAL_INCLVOLCANIC_RF": "Radiative Forcing",
"VOLCANIC_ANNUAL_RF": "Radiative Forcing|Volcanic",
"TOTAL_ANTHRO_RF": "Radiative Forcing|Anthropogenic",
"TOTAER_DIR_RF": "Radiative Forcing|Aerosols|Direct Effect",
"CLOUD_TOT_RF": "Radiative Forcing|Aerosols|Indirect Effect",
"MINERALDUST_RF": "Radiative Forcing|Mineral Dust",
"STRATOZ_RF": "Radiative Forcing|Stratospheric Ozone",
"TROPOZ_RF": "Radiative Forcing|Tropospheric Ozone",
"CH4OXSTRATH2O_RF": "Radiative Forcing|CH4 Oxidation Stratospheric H2O", # what is this
"LANDUSE_RF": "Radiative Forcing|Land-use Change",
"BCSNOW_RF": "Radiative Forcing|Black Carbon on Snow",
"CO2PF_EMIS": "Land to Air Flux|CO2|MAGICC Permafrost",
# "CH4PF_EMIS": "Land to Air Flux|CH4|MAGICC Permafrost", # TODO: test and then add when needed
}
)
agg_ocean_heat_top = "Aggregated Ocean Heat Content"
heat_content_aggreg_depths = {
"HEATCONTENT_AGGREG_DEPTH{}".format(i): "{}{}Depth {}".format(
agg_ocean_heat_top, DATA_HIERARCHY_SEPARATOR, i
)
for i in range(1, 4)
}
replacements.update(heat_content_aggreg_depths)
replacements.update({"HEATCONTENT_AGGREG_TOTAL": agg_ocean_heat_top})
ocean_temp_layer = {
"OCEAN_TEMP_LAYER_{0:03d}".format(i): "Ocean Temperature{}Layer {}".format(
DATA_HIERARCHY_SEPARATOR, i
)
for i in range(1, 999)
}
replacements.update(ocean_temp_layer)
if inverse:
return {v: k for k, v in replacements.items()}
else:
return replacements | [
"def",
"get_magicc7_to_openscm_variable_mapping",
"(",
"inverse",
"=",
"False",
")",
":",
"def",
"get_openscm_replacement",
"(",
"in_var",
")",
":",
"if",
"in_var",
".",
"endswith",
"(",
"\"_INVERSE_EMIS\"",
")",
":",
"prefix",
"=",
"\"Inverse Emissions\"",
"elif",
"in_var",
".",
"endswith",
"(",
"\"_EMIS\"",
")",
":",
"prefix",
"=",
"\"Emissions\"",
"elif",
"in_var",
".",
"endswith",
"(",
"\"_CONC\"",
")",
":",
"prefix",
"=",
"\"Atmospheric Concentrations\"",
"elif",
"in_var",
".",
"endswith",
"(",
"\"_RF\"",
")",
":",
"prefix",
"=",
"\"Radiative Forcing\"",
"elif",
"in_var",
".",
"endswith",
"(",
"\"_OT\"",
")",
":",
"prefix",
"=",
"\"Optical Thickness\"",
"else",
":",
"raise",
"ValueError",
"(",
"\"This shouldn't happen\"",
")",
"variable",
"=",
"in_var",
".",
"split",
"(",
"\"_\"",
")",
"[",
"0",
"]",
"# I hate edge cases",
"if",
"variable",
".",
"endswith",
"(",
"\"EQ\"",
")",
":",
"variable",
"=",
"variable",
".",
"replace",
"(",
"\"EQ\"",
",",
"\" Equivalent\"",
")",
"if",
"\"GHG\"",
"in",
"variable",
":",
"variable",
"=",
"variable",
".",
"replace",
"(",
"\"GHG\"",
",",
"\"Greenhouse Gases\"",
")",
"if",
"\"BIOMASSAER\"",
"in",
"variable",
":",
"variable",
"=",
"variable",
".",
"replace",
"(",
"\"BIOMASSAER\"",
",",
"\"Aerosols|MAGICC AFOLU\"",
")",
"if",
"\"CO2CH4N2O\"",
"in",
"variable",
":",
"variable",
"=",
"variable",
".",
"replace",
"(",
"\"CO2CH4N2O\"",
",",
"\"CO2, CH4 and N2O\"",
")",
"aggregate_indicators",
"=",
"{",
"\"KYOTO\"",
":",
"\"Kyoto Gases\"",
",",
"\"FGASSUM\"",
":",
"\"F Gases\"",
",",
"\"MHALOSUM\"",
":",
"\"Montreal Protocol Halogen Gases\"",
",",
"}",
"for",
"agg_indicator",
",",
"long_name",
"in",
"aggregate_indicators",
".",
"items",
"(",
")",
":",
"if",
"variable",
".",
"startswith",
"(",
"agg_indicator",
")",
":",
"stripped_var",
"=",
"variable",
".",
"replace",
"(",
"agg_indicator",
",",
"\"\"",
")",
"if",
"stripped_var",
":",
"variable",
"=",
"DATA_HIERARCHY_SEPARATOR",
".",
"join",
"(",
"[",
"stripped_var",
",",
"long_name",
"]",
")",
"else",
":",
"variable",
"=",
"long_name",
"edge_case_B",
"=",
"variable",
".",
"upper",
"(",
")",
"in",
"(",
"\"HCFC141B\"",
",",
"\"HCFC142B\"",
")",
"if",
"variable",
".",
"endswith",
"(",
"\"I\"",
")",
":",
"variable",
"=",
"DATA_HIERARCHY_SEPARATOR",
".",
"join",
"(",
"[",
"variable",
"[",
":",
"-",
"1",
"]",
",",
"\"MAGICC Fossil and Industrial\"",
"]",
")",
"elif",
"variable",
".",
"endswith",
"(",
"\"B\"",
")",
"and",
"not",
"edge_case_B",
":",
"variable",
"=",
"DATA_HIERARCHY_SEPARATOR",
".",
"join",
"(",
"[",
"variable",
"[",
":",
"-",
"1",
"]",
",",
"\"MAGICC AFOLU\"",
"]",
")",
"case_adjustments",
"=",
"{",
"\"SOX\"",
":",
"\"SOx\"",
",",
"\"NOX\"",
":",
"\"NOx\"",
",",
"\"HFC134A\"",
":",
"\"HFC134a\"",
",",
"\"HFC143A\"",
":",
"\"HFC143a\"",
",",
"\"HFC152A\"",
":",
"\"HFC152a\"",
",",
"\"HFC227EA\"",
":",
"\"HFC227ea\"",
",",
"\"HFC236FA\"",
":",
"\"HFC236fa\"",
",",
"\"HFC245FA\"",
":",
"\"HFC245fa\"",
",",
"\"HFC365MFC\"",
":",
"\"HFC365mfc\"",
",",
"\"HCFC141B\"",
":",
"\"HCFC141b\"",
",",
"\"HCFC142B\"",
":",
"\"HCFC142b\"",
",",
"\"CH3CCL3\"",
":",
"\"CH3CCl3\"",
",",
"\"CCL4\"",
":",
"\"CCl4\"",
",",
"\"CH3CL\"",
":",
"\"CH3Cl\"",
",",
"\"CH2CL2\"",
":",
"\"CH2Cl2\"",
",",
"\"CHCL3\"",
":",
"\"CHCl3\"",
",",
"\"CH3BR\"",
":",
"\"CH3Br\"",
",",
"\"HALON1211\"",
":",
"\"Halon1211\"",
",",
"\"HALON1301\"",
":",
"\"Halon1301\"",
",",
"\"HALON2402\"",
":",
"\"Halon2402\"",
",",
"\"HALON1202\"",
":",
"\"Halon1202\"",
",",
"\"SOLAR\"",
":",
"\"Solar\"",
",",
"\"VOLCANIC\"",
":",
"\"Volcanic\"",
",",
"\"EXTRA\"",
":",
"\"Extra\"",
",",
"}",
"variable",
"=",
"apply_string_substitutions",
"(",
"variable",
",",
"case_adjustments",
")",
"return",
"DATA_HIERARCHY_SEPARATOR",
".",
"join",
"(",
"[",
"prefix",
",",
"variable",
"]",
")",
"magicc7_suffixes",
"=",
"[",
"\"_EMIS\"",
",",
"\"_CONC\"",
",",
"\"_RF\"",
",",
"\"_OT\"",
",",
"\"_INVERSE_EMIS\"",
"]",
"magicc7_base_vars",
"=",
"MAGICC7_EMISSIONS_UNITS",
".",
"magicc_variable",
".",
"tolist",
"(",
")",
"+",
"[",
"\"SOLAR\"",
",",
"\"VOLCANIC\"",
",",
"\"CO2EQ\"",
",",
"\"KYOTOCO2EQ\"",
",",
"\"FGASSUMHFC134AEQ\"",
",",
"\"MHALOSUMCFC12EQ\"",
",",
"\"GHG\"",
",",
"\"KYOTOGHG\"",
",",
"\"FGASSUM\"",
",",
"\"MHALOSUM\"",
",",
"\"BIOMASSAER\"",
",",
"\"CO2CH4N2O\"",
",",
"\"EXTRA\"",
",",
"]",
"magicc7_vars",
"=",
"[",
"base_var",
"+",
"suffix",
"for",
"base_var",
"in",
"magicc7_base_vars",
"for",
"suffix",
"in",
"magicc7_suffixes",
"]",
"replacements",
"=",
"{",
"m7v",
":",
"get_openscm_replacement",
"(",
"m7v",
")",
"for",
"m7v",
"in",
"magicc7_vars",
"}",
"replacements",
".",
"update",
"(",
"{",
"\"SURFACE_TEMP\"",
":",
"\"Surface Temperature\"",
",",
"\"TOTAL_INCLVOLCANIC_RF\"",
":",
"\"Radiative Forcing\"",
",",
"\"VOLCANIC_ANNUAL_RF\"",
":",
"\"Radiative Forcing|Volcanic\"",
",",
"\"TOTAL_ANTHRO_RF\"",
":",
"\"Radiative Forcing|Anthropogenic\"",
",",
"\"TOTAER_DIR_RF\"",
":",
"\"Radiative Forcing|Aerosols|Direct Effect\"",
",",
"\"CLOUD_TOT_RF\"",
":",
"\"Radiative Forcing|Aerosols|Indirect Effect\"",
",",
"\"MINERALDUST_RF\"",
":",
"\"Radiative Forcing|Mineral Dust\"",
",",
"\"STRATOZ_RF\"",
":",
"\"Radiative Forcing|Stratospheric Ozone\"",
",",
"\"TROPOZ_RF\"",
":",
"\"Radiative Forcing|Tropospheric Ozone\"",
",",
"\"CH4OXSTRATH2O_RF\"",
":",
"\"Radiative Forcing|CH4 Oxidation Stratospheric H2O\"",
",",
"# what is this",
"\"LANDUSE_RF\"",
":",
"\"Radiative Forcing|Land-use Change\"",
",",
"\"BCSNOW_RF\"",
":",
"\"Radiative Forcing|Black Carbon on Snow\"",
",",
"\"CO2PF_EMIS\"",
":",
"\"Land to Air Flux|CO2|MAGICC Permafrost\"",
",",
"# \"CH4PF_EMIS\": \"Land to Air Flux|CH4|MAGICC Permafrost\", # TODO: test and then add when needed",
"}",
")",
"agg_ocean_heat_top",
"=",
"\"Aggregated Ocean Heat Content\"",
"heat_content_aggreg_depths",
"=",
"{",
"\"HEATCONTENT_AGGREG_DEPTH{}\"",
".",
"format",
"(",
"i",
")",
":",
"\"{}{}Depth {}\"",
".",
"format",
"(",
"agg_ocean_heat_top",
",",
"DATA_HIERARCHY_SEPARATOR",
",",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"4",
")",
"}",
"replacements",
".",
"update",
"(",
"heat_content_aggreg_depths",
")",
"replacements",
".",
"update",
"(",
"{",
"\"HEATCONTENT_AGGREG_TOTAL\"",
":",
"agg_ocean_heat_top",
"}",
")",
"ocean_temp_layer",
"=",
"{",
"\"OCEAN_TEMP_LAYER_{0:03d}\"",
".",
"format",
"(",
"i",
")",
":",
"\"Ocean Temperature{}Layer {}\"",
".",
"format",
"(",
"DATA_HIERARCHY_SEPARATOR",
",",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"999",
")",
"}",
"replacements",
".",
"update",
"(",
"ocean_temp_layer",
")",
"if",
"inverse",
":",
"return",
"{",
"v",
":",
"k",
"for",
"k",
",",
"v",
"in",
"replacements",
".",
"items",
"(",
")",
"}",
"else",
":",
"return",
"replacements"
] | Get the mappings from MAGICC7 to OpenSCM variables.
Parameters
----------
inverse : bool
If True, return the inverse mappings i.e. OpenSCM to MAGICC7 mappings
Returns
-------
dict
Dictionary of mappings | [
"Get",
"the",
"mappings",
"from",
"MAGICC7",
"to",
"OpenSCM",
"variables",
"."
] | python | train |
materialsproject/pymatgen | pymatgen/util/num.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/util/num.py#L156-L169 | def make_symmetric_matrix_from_upper_tri(val):
"""
Given a symmetric matrix in upper triangular matrix form as flat array indexes as:
[A_xx,A_yy,A_zz,A_xy,A_xz,A_yz]
This will generate the full matrix:
[[A_xx,A_xy,A_xz],[A_xy,A_yy,A_yz],[A_xz,A_yz,A_zz]
"""
idx = [0,3,4,1,5,2]
val = np.array(val)[idx]
mask = ~np.tri(3,k=-1,dtype=bool)
out = np.zeros((3,3),dtype=val.dtype)
out[mask] = val
out.T[mask] = val
return out | [
"def",
"make_symmetric_matrix_from_upper_tri",
"(",
"val",
")",
":",
"idx",
"=",
"[",
"0",
",",
"3",
",",
"4",
",",
"1",
",",
"5",
",",
"2",
"]",
"val",
"=",
"np",
".",
"array",
"(",
"val",
")",
"[",
"idx",
"]",
"mask",
"=",
"~",
"np",
".",
"tri",
"(",
"3",
",",
"k",
"=",
"-",
"1",
",",
"dtype",
"=",
"bool",
")",
"out",
"=",
"np",
".",
"zeros",
"(",
"(",
"3",
",",
"3",
")",
",",
"dtype",
"=",
"val",
".",
"dtype",
")",
"out",
"[",
"mask",
"]",
"=",
"val",
"out",
".",
"T",
"[",
"mask",
"]",
"=",
"val",
"return",
"out"
] | Given a symmetric matrix in upper triangular matrix form as flat array indexes as:
[A_xx,A_yy,A_zz,A_xy,A_xz,A_yz]
This will generate the full matrix:
[[A_xx,A_xy,A_xz],[A_xy,A_yy,A_yz],[A_xz,A_yz,A_zz] | [
"Given",
"a",
"symmetric",
"matrix",
"in",
"upper",
"triangular",
"matrix",
"form",
"as",
"flat",
"array",
"indexes",
"as",
":",
"[",
"A_xx",
"A_yy",
"A_zz",
"A_xy",
"A_xz",
"A_yz",
"]",
"This",
"will",
"generate",
"the",
"full",
"matrix",
":",
"[[",
"A_xx",
"A_xy",
"A_xz",
"]",
"[",
"A_xy",
"A_yy",
"A_yz",
"]",
"[",
"A_xz",
"A_yz",
"A_zz",
"]"
] | python | train |
unt-libraries/codalib | codalib/bagatom.py | https://github.com/unt-libraries/codalib/blob/458d117bb48938c1a0e26d9161cb5f730461b4c7/codalib/bagatom.py#L157-L172 | def getNodeByName(node, name):
"""
Get the first child node matching a given local name
"""
if node is None:
raise Exception(
"Cannot search for a child '%s' in a None object" % (name,)
)
if not name:
raise Exception("Unspecified name to find node for.")
try:
childNode = node.xpath("*[local-name() = '%s']" % name)[0]
except:
return None
return childNode | [
"def",
"getNodeByName",
"(",
"node",
",",
"name",
")",
":",
"if",
"node",
"is",
"None",
":",
"raise",
"Exception",
"(",
"\"Cannot search for a child '%s' in a None object\"",
"%",
"(",
"name",
",",
")",
")",
"if",
"not",
"name",
":",
"raise",
"Exception",
"(",
"\"Unspecified name to find node for.\"",
")",
"try",
":",
"childNode",
"=",
"node",
".",
"xpath",
"(",
"\"*[local-name() = '%s']\"",
"%",
"name",
")",
"[",
"0",
"]",
"except",
":",
"return",
"None",
"return",
"childNode"
] | Get the first child node matching a given local name | [
"Get",
"the",
"first",
"child",
"node",
"matching",
"a",
"given",
"local",
"name"
] | python | train |
bwohlberg/sporco | sporco/admm/parcbpdn.py | https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/admm/parcbpdn.py#L838-L848 | def init_pool(self):
"""Initialize multiprocessing pool if necessary."""
# initialize the pool if needed
if self.pool is None:
if self.nproc > 1:
self.pool = mp.Pool(processes=self.nproc)
else:
self.pool = None
else:
print('pool already initialized?') | [
"def",
"init_pool",
"(",
"self",
")",
":",
"# initialize the pool if needed",
"if",
"self",
".",
"pool",
"is",
"None",
":",
"if",
"self",
".",
"nproc",
">",
"1",
":",
"self",
".",
"pool",
"=",
"mp",
".",
"Pool",
"(",
"processes",
"=",
"self",
".",
"nproc",
")",
"else",
":",
"self",
".",
"pool",
"=",
"None",
"else",
":",
"print",
"(",
"'pool already initialized?'",
")"
] | Initialize multiprocessing pool if necessary. | [
"Initialize",
"multiprocessing",
"pool",
"if",
"necessary",
"."
] | python | train |
openstack/python-monascaclient | monascaclient/v2_0/shell.py | https://github.com/openstack/python-monascaclient/blob/03b07534145928eb2debad938da033c232dda105/monascaclient/v2_0/shell.py#L220-L243 | def do_dimension_name_list(mc, args):
'''List names of metric dimensions.'''
fields = {}
if args.metric_name:
fields['metric_name'] = args.metric_name
if args.limit:
fields['limit'] = args.limit
if args.offset:
fields['offset'] = args.offset
if args.tenant_id:
fields['tenant_id'] = args.tenant_id
try:
dimension_names = mc.metrics.list_dimension_names(**fields)
except (osc_exc.ClientException, k_exc.HttpError) as he:
raise osc_exc.CommandError('%s\n%s' % (he.message, he.details))
if args.json:
print(utils.json_formatter(dimension_names))
return
if isinstance(dimension_names, list):
utils.print_list(dimension_names, ['Dimension Names'], formatters={
'Dimension Names': lambda x: x['dimension_name']}) | [
"def",
"do_dimension_name_list",
"(",
"mc",
",",
"args",
")",
":",
"fields",
"=",
"{",
"}",
"if",
"args",
".",
"metric_name",
":",
"fields",
"[",
"'metric_name'",
"]",
"=",
"args",
".",
"metric_name",
"if",
"args",
".",
"limit",
":",
"fields",
"[",
"'limit'",
"]",
"=",
"args",
".",
"limit",
"if",
"args",
".",
"offset",
":",
"fields",
"[",
"'offset'",
"]",
"=",
"args",
".",
"offset",
"if",
"args",
".",
"tenant_id",
":",
"fields",
"[",
"'tenant_id'",
"]",
"=",
"args",
".",
"tenant_id",
"try",
":",
"dimension_names",
"=",
"mc",
".",
"metrics",
".",
"list_dimension_names",
"(",
"*",
"*",
"fields",
")",
"except",
"(",
"osc_exc",
".",
"ClientException",
",",
"k_exc",
".",
"HttpError",
")",
"as",
"he",
":",
"raise",
"osc_exc",
".",
"CommandError",
"(",
"'%s\\n%s'",
"%",
"(",
"he",
".",
"message",
",",
"he",
".",
"details",
")",
")",
"if",
"args",
".",
"json",
":",
"print",
"(",
"utils",
".",
"json_formatter",
"(",
"dimension_names",
")",
")",
"return",
"if",
"isinstance",
"(",
"dimension_names",
",",
"list",
")",
":",
"utils",
".",
"print_list",
"(",
"dimension_names",
",",
"[",
"'Dimension Names'",
"]",
",",
"formatters",
"=",
"{",
"'Dimension Names'",
":",
"lambda",
"x",
":",
"x",
"[",
"'dimension_name'",
"]",
"}",
")"
] | List names of metric dimensions. | [
"List",
"names",
"of",
"metric",
"dimensions",
"."
] | python | train |
ellmetha/django-machina | machina/core/db/models.py | https://github.com/ellmetha/django-machina/blob/89ac083c1eaf1cfdeae6686ee094cc86362e8c69/machina/core/db/models.py#L49-L57 | def model_factory(abstract_class):
""" Given an abstract class, constructs the model that inherits from this class only if a model
with the same (app label, model name) was not already in the app registry.
"""
app_label = abstract_class.Meta.app_label
model_name = abstract_class.__name__.replace('Abstract', '')
if not is_model_registered(app_label, model_name):
return type(str(model_name), (abstract_class, ), {'__module__': __name__, }) | [
"def",
"model_factory",
"(",
"abstract_class",
")",
":",
"app_label",
"=",
"abstract_class",
".",
"Meta",
".",
"app_label",
"model_name",
"=",
"abstract_class",
".",
"__name__",
".",
"replace",
"(",
"'Abstract'",
",",
"''",
")",
"if",
"not",
"is_model_registered",
"(",
"app_label",
",",
"model_name",
")",
":",
"return",
"type",
"(",
"str",
"(",
"model_name",
")",
",",
"(",
"abstract_class",
",",
")",
",",
"{",
"'__module__'",
":",
"__name__",
",",
"}",
")"
] | Given an abstract class, constructs the model that inherits from this class only if a model
with the same (app label, model name) was not already in the app registry. | [
"Given",
"an",
"abstract",
"class",
"constructs",
"the",
"model",
"that",
"inherits",
"from",
"this",
"class",
"only",
"if",
"a",
"model",
"with",
"the",
"same",
"(",
"app",
"label",
"model",
"name",
")",
"was",
"not",
"already",
"in",
"the",
"app",
"registry",
"."
] | python | train |
ecederstrand/exchangelib | exchangelib/folders.py | https://github.com/ecederstrand/exchangelib/blob/736347b337c239fcd6d592db5b29e819f753c1ba/exchangelib/folders.py#L146-L161 | def view(self, start, end, max_items=None, *args, **kwargs):
""" Implements the CalendarView option to FindItem. The difference between filter() and view() is that filter()
only returns the master CalendarItem for recurring items, while view() unfolds recurring items and returns all
CalendarItem occurrences as one would normally expect when presenting a calendar.
Supports the same semantics as filter, except for 'start' and 'end' keyword attributes which are both required
and behave differently than filter. Here, they denote the start and end of the timespan of the view. All items
the overlap the timespan are returned (items that end exactly on 'start' are also returned, for some reason).
EWS does not allow combining CalendarView with search restrictions (filter and exclude).
'max_items' defines the maximum number of items returned in this view. Optional.
"""
qs = QuerySet(self).filter(*args, **kwargs)
qs.calendar_view = CalendarView(start=start, end=end, max_items=max_items)
return qs | [
"def",
"view",
"(",
"self",
",",
"start",
",",
"end",
",",
"max_items",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"qs",
"=",
"QuerySet",
"(",
"self",
")",
".",
"filter",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"qs",
".",
"calendar_view",
"=",
"CalendarView",
"(",
"start",
"=",
"start",
",",
"end",
"=",
"end",
",",
"max_items",
"=",
"max_items",
")",
"return",
"qs"
] | Implements the CalendarView option to FindItem. The difference between filter() and view() is that filter()
only returns the master CalendarItem for recurring items, while view() unfolds recurring items and returns all
CalendarItem occurrences as one would normally expect when presenting a calendar.
Supports the same semantics as filter, except for 'start' and 'end' keyword attributes which are both required
and behave differently than filter. Here, they denote the start and end of the timespan of the view. All items
the overlap the timespan are returned (items that end exactly on 'start' are also returned, for some reason).
EWS does not allow combining CalendarView with search restrictions (filter and exclude).
'max_items' defines the maximum number of items returned in this view. Optional. | [
"Implements",
"the",
"CalendarView",
"option",
"to",
"FindItem",
".",
"The",
"difference",
"between",
"filter",
"()",
"and",
"view",
"()",
"is",
"that",
"filter",
"()",
"only",
"returns",
"the",
"master",
"CalendarItem",
"for",
"recurring",
"items",
"while",
"view",
"()",
"unfolds",
"recurring",
"items",
"and",
"returns",
"all",
"CalendarItem",
"occurrences",
"as",
"one",
"would",
"normally",
"expect",
"when",
"presenting",
"a",
"calendar",
"."
] | python | train |
wrobstory/vincent | vincent/data.py | https://github.com/wrobstory/vincent/blob/c5a06e50179015fbb788a7a42e4570ff4467a9e9/vincent/data.py#L294-L339 | def from_mult_iters(cls, name=None, idx=None, **kwargs):
"""Load values from multiple iters
Parameters
----------
name : string, default None
Name of the data set. If None (default), the name will be set to
``'table'``.
idx: string, default None
Iterable to use for the data index
**kwargs : dict of iterables
The ``values`` field will contain dictionaries with keys for
each of the iterables provided. For example,
d = Data.from_iters(idx='x', x=[0, 1, 5], y=(10, 20, 30))
would result in ``d`` having a ``values`` field with
[{'idx': 0, 'col': 'y', 'val': 10},
{'idx': 1, 'col': 'y', 'val': 20}
If the iterables are not the same length, then ValueError is
raised.
"""
if not name:
name = 'table'
lengths = [len(v) for v in kwargs.values()]
if len(set(lengths)) != 1:
raise ValueError('Iterables must all be same length')
if not idx:
raise ValueError('Must provide iter name index reference')
index = kwargs.pop(idx)
vega_vals = []
for k, v in sorted(kwargs.items()):
for idx, val in zip(index, v):
value = {}
value['idx'] = idx
value['col'] = k
value['val'] = val
vega_vals.append(value)
return cls(name, values=vega_vals) | [
"def",
"from_mult_iters",
"(",
"cls",
",",
"name",
"=",
"None",
",",
"idx",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"name",
":",
"name",
"=",
"'table'",
"lengths",
"=",
"[",
"len",
"(",
"v",
")",
"for",
"v",
"in",
"kwargs",
".",
"values",
"(",
")",
"]",
"if",
"len",
"(",
"set",
"(",
"lengths",
")",
")",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"'Iterables must all be same length'",
")",
"if",
"not",
"idx",
":",
"raise",
"ValueError",
"(",
"'Must provide iter name index reference'",
")",
"index",
"=",
"kwargs",
".",
"pop",
"(",
"idx",
")",
"vega_vals",
"=",
"[",
"]",
"for",
"k",
",",
"v",
"in",
"sorted",
"(",
"kwargs",
".",
"items",
"(",
")",
")",
":",
"for",
"idx",
",",
"val",
"in",
"zip",
"(",
"index",
",",
"v",
")",
":",
"value",
"=",
"{",
"}",
"value",
"[",
"'idx'",
"]",
"=",
"idx",
"value",
"[",
"'col'",
"]",
"=",
"k",
"value",
"[",
"'val'",
"]",
"=",
"val",
"vega_vals",
".",
"append",
"(",
"value",
")",
"return",
"cls",
"(",
"name",
",",
"values",
"=",
"vega_vals",
")"
] | Load values from multiple iters
Parameters
----------
name : string, default None
Name of the data set. If None (default), the name will be set to
``'table'``.
idx: string, default None
Iterable to use for the data index
**kwargs : dict of iterables
The ``values`` field will contain dictionaries with keys for
each of the iterables provided. For example,
d = Data.from_iters(idx='x', x=[0, 1, 5], y=(10, 20, 30))
would result in ``d`` having a ``values`` field with
[{'idx': 0, 'col': 'y', 'val': 10},
{'idx': 1, 'col': 'y', 'val': 20}
If the iterables are not the same length, then ValueError is
raised. | [
"Load",
"values",
"from",
"multiple",
"iters"
] | python | train |
Metatab/metatab | metatab/doc.py | https://github.com/Metatab/metatab/blob/8336ec3e4bd8da84a9a5cb86de1c1086e14b8b22/metatab/doc.py#L524-L543 | def cleanse(self):
"""Clean up some terms, like ensuring that the name is a slug"""
from .util import slugify
self.ensure_identifier()
try:
self.update_name()
except MetatabError:
identifier = self['Root'].find_first('Root.Identifier')
name = self['Root'].find_first('Root.Name')
if name and name.value:
name.value = slugify(name.value)
elif name:
name.value = slugify(identifier.value)
else:
self['Root'].get_or_new_term('Root.Name').value = slugify(identifier.value) | [
"def",
"cleanse",
"(",
"self",
")",
":",
"from",
".",
"util",
"import",
"slugify",
"self",
".",
"ensure_identifier",
"(",
")",
"try",
":",
"self",
".",
"update_name",
"(",
")",
"except",
"MetatabError",
":",
"identifier",
"=",
"self",
"[",
"'Root'",
"]",
".",
"find_first",
"(",
"'Root.Identifier'",
")",
"name",
"=",
"self",
"[",
"'Root'",
"]",
".",
"find_first",
"(",
"'Root.Name'",
")",
"if",
"name",
"and",
"name",
".",
"value",
":",
"name",
".",
"value",
"=",
"slugify",
"(",
"name",
".",
"value",
")",
"elif",
"name",
":",
"name",
".",
"value",
"=",
"slugify",
"(",
"identifier",
".",
"value",
")",
"else",
":",
"self",
"[",
"'Root'",
"]",
".",
"get_or_new_term",
"(",
"'Root.Name'",
")",
".",
"value",
"=",
"slugify",
"(",
"identifier",
".",
"value",
")"
] | Clean up some terms, like ensuring that the name is a slug | [
"Clean",
"up",
"some",
"terms",
"like",
"ensuring",
"that",
"the",
"name",
"is",
"a",
"slug"
] | python | train |
bitesofcode/projexui | projexui/widgets/xchart/xchart.py | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xchart/xchart.py#L517-L530 | def setDatasets(self, datasets):
"""
Sets the dataset list for this chart to the inputed data.
:param datasets | [<XChartDataset>, ..]
"""
self.clearDatasets()
self._datasets = datasets
for dataset in datasets:
self._addDatasetAction(dataset)
self._dataChanged = True
self.recalculate() | [
"def",
"setDatasets",
"(",
"self",
",",
"datasets",
")",
":",
"self",
".",
"clearDatasets",
"(",
")",
"self",
".",
"_datasets",
"=",
"datasets",
"for",
"dataset",
"in",
"datasets",
":",
"self",
".",
"_addDatasetAction",
"(",
"dataset",
")",
"self",
".",
"_dataChanged",
"=",
"True",
"self",
".",
"recalculate",
"(",
")"
] | Sets the dataset list for this chart to the inputed data.
:param datasets | [<XChartDataset>, ..] | [
"Sets",
"the",
"dataset",
"list",
"for",
"this",
"chart",
"to",
"the",
"inputed",
"data",
".",
":",
"param",
"datasets",
"|",
"[",
"<XChartDataset",
">",
"..",
"]"
] | python | train |
joshspeagle/dynesty | dynesty/bounding.py | https://github.com/joshspeagle/dynesty/blob/9e482aafeb5cf84bedb896fa6f07a761d917983e/dynesty/bounding.py#L884-L890 | def scale_to_vol(self, vol):
"""Scale cube to encompass a target volume."""
f = (vol / self.vol_cube) ** (1.0 / self.n) # linear factor
self.expand *= f
self.hside *= f
self.vol_cube = vol | [
"def",
"scale_to_vol",
"(",
"self",
",",
"vol",
")",
":",
"f",
"=",
"(",
"vol",
"/",
"self",
".",
"vol_cube",
")",
"**",
"(",
"1.0",
"/",
"self",
".",
"n",
")",
"# linear factor",
"self",
".",
"expand",
"*=",
"f",
"self",
".",
"hside",
"*=",
"f",
"self",
".",
"vol_cube",
"=",
"vol"
] | Scale cube to encompass a target volume. | [
"Scale",
"cube",
"to",
"encompass",
"a",
"target",
"volume",
"."
] | python | train |
OpenKMIP/PyKMIP | kmip/services/server/crypto/engine.py | https://github.com/OpenKMIP/PyKMIP/blob/b51c5b044bd05f8c85a1d65d13a583a4d8fc1b0e/kmip/services/server/crypto/engine.py#L1191-L1217 | def _create_RSA_private_key(self,
bytes):
"""
Instantiates an RSA key from bytes.
Args:
bytes (byte string): Bytes of RSA private key.
Returns:
private_key
(cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey):
RSA private key created from key bytes.
"""
try:
private_key = serialization.load_pem_private_key(
bytes,
password=None,
backend=default_backend()
)
return private_key
except Exception:
private_key = serialization.load_der_private_key(
bytes,
password=None,
backend=default_backend()
)
return private_key | [
"def",
"_create_RSA_private_key",
"(",
"self",
",",
"bytes",
")",
":",
"try",
":",
"private_key",
"=",
"serialization",
".",
"load_pem_private_key",
"(",
"bytes",
",",
"password",
"=",
"None",
",",
"backend",
"=",
"default_backend",
"(",
")",
")",
"return",
"private_key",
"except",
"Exception",
":",
"private_key",
"=",
"serialization",
".",
"load_der_private_key",
"(",
"bytes",
",",
"password",
"=",
"None",
",",
"backend",
"=",
"default_backend",
"(",
")",
")",
"return",
"private_key"
] | Instantiates an RSA key from bytes.
Args:
bytes (byte string): Bytes of RSA private key.
Returns:
private_key
(cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey):
RSA private key created from key bytes. | [
"Instantiates",
"an",
"RSA",
"key",
"from",
"bytes",
"."
] | python | test |
noobermin/lspreader | lspreader/lspreader.py | https://github.com/noobermin/lspreader/blob/903b9d6427513b07986ffacf76cbca54e18d8be6/lspreader/lspreader.py#L43-L59 | def get_list(file,fmt):
'''makes a list out of the fmt from the LspOutput f using the format
i for int
f for float
d for double
s for string'''
out=[]
for i in fmt:
if i == 'i':
out.append(get_int(file));
elif i == 'f' or i == 'd':
out.append(get_float(file));
elif i == 's':
out.append(get_str(file));
else:
raise ValueError("Unexpected flag '{}'".format(i));
return out; | [
"def",
"get_list",
"(",
"file",
",",
"fmt",
")",
":",
"out",
"=",
"[",
"]",
"for",
"i",
"in",
"fmt",
":",
"if",
"i",
"==",
"'i'",
":",
"out",
".",
"append",
"(",
"get_int",
"(",
"file",
")",
")",
"elif",
"i",
"==",
"'f'",
"or",
"i",
"==",
"'d'",
":",
"out",
".",
"append",
"(",
"get_float",
"(",
"file",
")",
")",
"elif",
"i",
"==",
"'s'",
":",
"out",
".",
"append",
"(",
"get_str",
"(",
"file",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unexpected flag '{}'\"",
".",
"format",
"(",
"i",
")",
")",
"return",
"out"
] | makes a list out of the fmt from the LspOutput f using the format
i for int
f for float
d for double
s for string | [
"makes",
"a",
"list",
"out",
"of",
"the",
"fmt",
"from",
"the",
"LspOutput",
"f",
"using",
"the",
"format",
"i",
"for",
"int",
"f",
"for",
"float",
"d",
"for",
"double",
"s",
"for",
"string"
] | python | train |
delph-in/pydelphin | delphin/tsql.py | https://github.com/delph-in/pydelphin/blob/7bd2cd63ab7cf74803e1d6547b9ebc014b382abd/delphin/tsql.py#L121-L150 | def query(query, ts, **kwargs):
"""
Perform *query* on the testsuite *ts*.
Note: currently only 'select' queries are supported.
Args:
query (str): TSQL query string
ts (:class:`delphin.itsdb.TestSuite`): testsuite to query over
kwargs: keyword arguments passed to the more specific query
function (e.g., :func:`select`)
Example:
>>> list(tsql.query('select i-id where i-length < 4', ts))
[[142], [1061]]
"""
queryobj = _parse_query(query)
if queryobj['querytype'] in ('select', 'retrieve'):
return _select(
queryobj['projection'],
queryobj['tables'],
queryobj['where'],
ts,
mode=kwargs.get('mode', 'list'),
cast=kwargs.get('cast', True))
else:
# not really a syntax error; replace with TSQLError or something
# when the proper exception class exists
raise TSQLSyntaxError(queryobj['querytype'] +
' queries are not supported') | [
"def",
"query",
"(",
"query",
",",
"ts",
",",
"*",
"*",
"kwargs",
")",
":",
"queryobj",
"=",
"_parse_query",
"(",
"query",
")",
"if",
"queryobj",
"[",
"'querytype'",
"]",
"in",
"(",
"'select'",
",",
"'retrieve'",
")",
":",
"return",
"_select",
"(",
"queryobj",
"[",
"'projection'",
"]",
",",
"queryobj",
"[",
"'tables'",
"]",
",",
"queryobj",
"[",
"'where'",
"]",
",",
"ts",
",",
"mode",
"=",
"kwargs",
".",
"get",
"(",
"'mode'",
",",
"'list'",
")",
",",
"cast",
"=",
"kwargs",
".",
"get",
"(",
"'cast'",
",",
"True",
")",
")",
"else",
":",
"# not really a syntax error; replace with TSQLError or something",
"# when the proper exception class exists",
"raise",
"TSQLSyntaxError",
"(",
"queryobj",
"[",
"'querytype'",
"]",
"+",
"' queries are not supported'",
")"
] | Perform *query* on the testsuite *ts*.
Note: currently only 'select' queries are supported.
Args:
query (str): TSQL query string
ts (:class:`delphin.itsdb.TestSuite`): testsuite to query over
kwargs: keyword arguments passed to the more specific query
function (e.g., :func:`select`)
Example:
>>> list(tsql.query('select i-id where i-length < 4', ts))
[[142], [1061]] | [
"Perform",
"*",
"query",
"*",
"on",
"the",
"testsuite",
"*",
"ts",
"*",
"."
] | python | train |
spyder-ide/spyder | spyder/plugins/editor/plugin.py | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/plugin.py#L1941-L1946 | def close_file_from_name(self, filename):
"""Close file from its name"""
filename = osp.abspath(to_text_string(filename))
index = self.editorstacks[0].has_filename(filename)
if index is not None:
self.editorstacks[0].close_file(index) | [
"def",
"close_file_from_name",
"(",
"self",
",",
"filename",
")",
":",
"filename",
"=",
"osp",
".",
"abspath",
"(",
"to_text_string",
"(",
"filename",
")",
")",
"index",
"=",
"self",
".",
"editorstacks",
"[",
"0",
"]",
".",
"has_filename",
"(",
"filename",
")",
"if",
"index",
"is",
"not",
"None",
":",
"self",
".",
"editorstacks",
"[",
"0",
"]",
".",
"close_file",
"(",
"index",
")"
] | Close file from its name | [
"Close",
"file",
"from",
"its",
"name"
] | python | train |
SatelliteQE/nailgun | nailgun/entities.py | https://github.com/SatelliteQE/nailgun/blob/c36d8c20862e87bf6975bd48ac1ca40a9e634eaa/nailgun/entities.py#L4588-L4599 | def create_payload(self):
"""Rename the payload key "prior_id" to "prior".
For more information, see `Bugzilla #1238757
<https://bugzilla.redhat.com/show_bug.cgi?id=1238757>`_.
"""
payload = super(LifecycleEnvironment, self).create_payload()
if (_get_version(self._server_config) < Version('6.1') and
'prior_id' in payload):
payload['prior'] = payload.pop('prior_id')
return payload | [
"def",
"create_payload",
"(",
"self",
")",
":",
"payload",
"=",
"super",
"(",
"LifecycleEnvironment",
",",
"self",
")",
".",
"create_payload",
"(",
")",
"if",
"(",
"_get_version",
"(",
"self",
".",
"_server_config",
")",
"<",
"Version",
"(",
"'6.1'",
")",
"and",
"'prior_id'",
"in",
"payload",
")",
":",
"payload",
"[",
"'prior'",
"]",
"=",
"payload",
".",
"pop",
"(",
"'prior_id'",
")",
"return",
"payload"
] | Rename the payload key "prior_id" to "prior".
For more information, see `Bugzilla #1238757
<https://bugzilla.redhat.com/show_bug.cgi?id=1238757>`_. | [
"Rename",
"the",
"payload",
"key",
"prior_id",
"to",
"prior",
"."
] | python | train |
apache/incubator-mxnet | python/mxnet/symbol/symbol.py | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/symbol/symbol.py#L2744-L2789 | def pow(base, exp):
"""Returns element-wise result of base element raised to powers from exp element.
Both inputs can be Symbol or scalar number.
Broadcasting is not supported. Use `broadcast_pow` instead.
`sym.pow` is being deprecated, please use `sym.power` instead.
Parameters
---------
base : Symbol or scalar
The base symbol
exp : Symbol or scalar
The exponent symbol
Returns
-------
Symbol or scalar
The bases in x raised to the exponents in y.
Examples
--------
>>> mx.sym.pow(2, 3)
8
>>> x = mx.sym.Variable('x')
>>> y = mx.sym.Variable('y')
>>> z = mx.sym.pow(x, 2)
>>> z.eval(x=mx.nd.array([1,2]))[0].asnumpy()
array([ 1., 4.], dtype=float32)
>>> z = mx.sym.pow(3, y)
>>> z.eval(y=mx.nd.array([2,3]))[0].asnumpy()
array([ 9., 27.], dtype=float32)
>>> z = mx.sym.pow(x, y)
>>> z.eval(x=mx.nd.array([3,4]), y=mx.nd.array([2,3]))[0].asnumpy()
array([ 9., 64.], dtype=float32)
"""
if isinstance(base, Symbol) and isinstance(exp, Symbol):
return _internal._Power(base, exp)
if isinstance(base, Symbol) and isinstance(exp, Number):
return _internal._PowerScalar(base, scalar=exp)
if isinstance(base, Number) and isinstance(exp, Symbol):
return _internal._RPowerScalar(exp, scalar=base)
if isinstance(base, Number) and isinstance(exp, Number):
return base**exp
else:
raise TypeError('types (%s, %s) not supported' % (str(type(base)), str(type(exp)))) | [
"def",
"pow",
"(",
"base",
",",
"exp",
")",
":",
"if",
"isinstance",
"(",
"base",
",",
"Symbol",
")",
"and",
"isinstance",
"(",
"exp",
",",
"Symbol",
")",
":",
"return",
"_internal",
".",
"_Power",
"(",
"base",
",",
"exp",
")",
"if",
"isinstance",
"(",
"base",
",",
"Symbol",
")",
"and",
"isinstance",
"(",
"exp",
",",
"Number",
")",
":",
"return",
"_internal",
".",
"_PowerScalar",
"(",
"base",
",",
"scalar",
"=",
"exp",
")",
"if",
"isinstance",
"(",
"base",
",",
"Number",
")",
"and",
"isinstance",
"(",
"exp",
",",
"Symbol",
")",
":",
"return",
"_internal",
".",
"_RPowerScalar",
"(",
"exp",
",",
"scalar",
"=",
"base",
")",
"if",
"isinstance",
"(",
"base",
",",
"Number",
")",
"and",
"isinstance",
"(",
"exp",
",",
"Number",
")",
":",
"return",
"base",
"**",
"exp",
"else",
":",
"raise",
"TypeError",
"(",
"'types (%s, %s) not supported'",
"%",
"(",
"str",
"(",
"type",
"(",
"base",
")",
")",
",",
"str",
"(",
"type",
"(",
"exp",
")",
")",
")",
")"
] | Returns element-wise result of base element raised to powers from exp element.
Both inputs can be Symbol or scalar number.
Broadcasting is not supported. Use `broadcast_pow` instead.
`sym.pow` is being deprecated, please use `sym.power` instead.
Parameters
---------
base : Symbol or scalar
The base symbol
exp : Symbol or scalar
The exponent symbol
Returns
-------
Symbol or scalar
The bases in x raised to the exponents in y.
Examples
--------
>>> mx.sym.pow(2, 3)
8
>>> x = mx.sym.Variable('x')
>>> y = mx.sym.Variable('y')
>>> z = mx.sym.pow(x, 2)
>>> z.eval(x=mx.nd.array([1,2]))[0].asnumpy()
array([ 1., 4.], dtype=float32)
>>> z = mx.sym.pow(3, y)
>>> z.eval(y=mx.nd.array([2,3]))[0].asnumpy()
array([ 9., 27.], dtype=float32)
>>> z = mx.sym.pow(x, y)
>>> z.eval(x=mx.nd.array([3,4]), y=mx.nd.array([2,3]))[0].asnumpy()
array([ 9., 64.], dtype=float32) | [
"Returns",
"element",
"-",
"wise",
"result",
"of",
"base",
"element",
"raised",
"to",
"powers",
"from",
"exp",
"element",
"."
] | python | train |
corydodt/Crosscap | crosscap/openapi.py | https://github.com/corydodt/Crosscap/blob/388a2ec36b8aa85e8f1ed692bb6e43474ba76c8e/crosscap/openapi.py#L154-L165 | def representCleanOpenAPIPathItem(dumper, data):
"""
Unpack operation key/values before representing an OpenAPIPathItem
"""
dct = _orderedCleanDict(data)
if '_operations' in dct:
items = sorted(data._operations.items())
for k, op in items:
dct[k] = op
del dct['_operations']
return dumper.yaml_representers[type(dct)](dumper, dct) | [
"def",
"representCleanOpenAPIPathItem",
"(",
"dumper",
",",
"data",
")",
":",
"dct",
"=",
"_orderedCleanDict",
"(",
"data",
")",
"if",
"'_operations'",
"in",
"dct",
":",
"items",
"=",
"sorted",
"(",
"data",
".",
"_operations",
".",
"items",
"(",
")",
")",
"for",
"k",
",",
"op",
"in",
"items",
":",
"dct",
"[",
"k",
"]",
"=",
"op",
"del",
"dct",
"[",
"'_operations'",
"]",
"return",
"dumper",
".",
"yaml_representers",
"[",
"type",
"(",
"dct",
")",
"]",
"(",
"dumper",
",",
"dct",
")"
] | Unpack operation key/values before representing an OpenAPIPathItem | [
"Unpack",
"operation",
"key",
"/",
"values",
"before",
"representing",
"an",
"OpenAPIPathItem"
] | python | train |
zetaops/zengine | zengine/messaging/views.py | https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/messaging/views.py#L952-L987 | def list_favorites(current):
"""
List user's favorites. If "channel_key" given, will return favorites belong to that channel.
.. code-block:: python
# request:
{
'view':'_zops_list_favorites,
'channel_key': key,
}
# response:
{
'status': 'OK',
'code': 200
'favorites':[{'key': key,
'channel_key': key,
'message_key': key,
'message_summary': string, # max 60 char
'channel_name': string,
},]
}
"""
current.output = {'status': 'OK', 'code': 200, 'favorites': []}
query_set = Favorite(current).objects.filter(user_id=current.user_id)
if current.input['channel_key']:
query_set = query_set.filter(channel_id=current.input['channel_key'])
current.output['favorites'] = [{
'key': fav.key,
'channel_key': fav.channel.key,
'message_key': fav.message.key,
'message_summary': fav.summary,
'channel_name': fav.channel_name
} for fav in query_set] | [
"def",
"list_favorites",
"(",
"current",
")",
":",
"current",
".",
"output",
"=",
"{",
"'status'",
":",
"'OK'",
",",
"'code'",
":",
"200",
",",
"'favorites'",
":",
"[",
"]",
"}",
"query_set",
"=",
"Favorite",
"(",
"current",
")",
".",
"objects",
".",
"filter",
"(",
"user_id",
"=",
"current",
".",
"user_id",
")",
"if",
"current",
".",
"input",
"[",
"'channel_key'",
"]",
":",
"query_set",
"=",
"query_set",
".",
"filter",
"(",
"channel_id",
"=",
"current",
".",
"input",
"[",
"'channel_key'",
"]",
")",
"current",
".",
"output",
"[",
"'favorites'",
"]",
"=",
"[",
"{",
"'key'",
":",
"fav",
".",
"key",
",",
"'channel_key'",
":",
"fav",
".",
"channel",
".",
"key",
",",
"'message_key'",
":",
"fav",
".",
"message",
".",
"key",
",",
"'message_summary'",
":",
"fav",
".",
"summary",
",",
"'channel_name'",
":",
"fav",
".",
"channel_name",
"}",
"for",
"fav",
"in",
"query_set",
"]"
] | List user's favorites. If "channel_key" given, will return favorites belong to that channel.
.. code-block:: python
# request:
{
'view':'_zops_list_favorites,
'channel_key': key,
}
# response:
{
'status': 'OK',
'code': 200
'favorites':[{'key': key,
'channel_key': key,
'message_key': key,
'message_summary': string, # max 60 char
'channel_name': string,
},]
} | [
"List",
"user",
"s",
"favorites",
".",
"If",
"channel_key",
"given",
"will",
"return",
"favorites",
"belong",
"to",
"that",
"channel",
"."
] | python | train |
ARMmbed/yotta | yotta/lib/git_access.py | https://github.com/ARMmbed/yotta/blob/56bc1e56c602fa20307b23fe27518e9cd6c11af1/yotta/lib/git_access.py#L48-L62 | def availableVersions(self):
''' return a list of GitCloneVersion objects for tags which are valid
semantic version idenfitifiers.
'''
r = []
for t in self.vcs.tags():
logger.debug("available version tag: %s", t)
# ignore empty tags:
if not len(t.strip()):
continue
try:
r.append(GitCloneVersion(t, t, self))
except ValueError:
logger.debug('invalid version tag: %s', t)
return r | [
"def",
"availableVersions",
"(",
"self",
")",
":",
"r",
"=",
"[",
"]",
"for",
"t",
"in",
"self",
".",
"vcs",
".",
"tags",
"(",
")",
":",
"logger",
".",
"debug",
"(",
"\"available version tag: %s\"",
",",
"t",
")",
"# ignore empty tags:",
"if",
"not",
"len",
"(",
"t",
".",
"strip",
"(",
")",
")",
":",
"continue",
"try",
":",
"r",
".",
"append",
"(",
"GitCloneVersion",
"(",
"t",
",",
"t",
",",
"self",
")",
")",
"except",
"ValueError",
":",
"logger",
".",
"debug",
"(",
"'invalid version tag: %s'",
",",
"t",
")",
"return",
"r"
] | return a list of GitCloneVersion objects for tags which are valid
semantic version idenfitifiers. | [
"return",
"a",
"list",
"of",
"GitCloneVersion",
"objects",
"for",
"tags",
"which",
"are",
"valid",
"semantic",
"version",
"idenfitifiers",
"."
] | python | valid |
poppy-project/pypot | pypot/kinematics.py | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/kinematics.py#L51-L73 | def forward_kinematics(self, q):
""" Computes the homogeneous transformation matrix of the end effector of the chain.
:param vector q: vector of the joint angles (theta 1, theta 2, ..., theta n)
"""
q = numpy.array(q).flatten()
if len(q) != len(self.links):
raise ValueError('q must contain as element as the number of links')
tr = self.base.copy()
l = []
for link, theta in zip(self.links, q):
tr = tr * link.get_transformation_matrix(theta)
l.append(tr)
tr = tr * self.tool
l.append(tr)
return tr, numpy.asarray(l) | [
"def",
"forward_kinematics",
"(",
"self",
",",
"q",
")",
":",
"q",
"=",
"numpy",
".",
"array",
"(",
"q",
")",
".",
"flatten",
"(",
")",
"if",
"len",
"(",
"q",
")",
"!=",
"len",
"(",
"self",
".",
"links",
")",
":",
"raise",
"ValueError",
"(",
"'q must contain as element as the number of links'",
")",
"tr",
"=",
"self",
".",
"base",
".",
"copy",
"(",
")",
"l",
"=",
"[",
"]",
"for",
"link",
",",
"theta",
"in",
"zip",
"(",
"self",
".",
"links",
",",
"q",
")",
":",
"tr",
"=",
"tr",
"*",
"link",
".",
"get_transformation_matrix",
"(",
"theta",
")",
"l",
".",
"append",
"(",
"tr",
")",
"tr",
"=",
"tr",
"*",
"self",
".",
"tool",
"l",
".",
"append",
"(",
"tr",
")",
"return",
"tr",
",",
"numpy",
".",
"asarray",
"(",
"l",
")"
] | Computes the homogeneous transformation matrix of the end effector of the chain.
:param vector q: vector of the joint angles (theta 1, theta 2, ..., theta n) | [
"Computes",
"the",
"homogeneous",
"transformation",
"matrix",
"of",
"the",
"end",
"effector",
"of",
"the",
"chain",
"."
] | python | train |
csaez/wishlib | wishlib/si/utils.py | https://github.com/csaez/wishlib/blob/c212fa7875006a332a4cefbf69885ced9647bc2f/wishlib/si/utils.py#L38-L47 | def cmd_wrapper(cmd_name, **kwds):
"""Wrap and execute a softimage command accepting named arguments"""
cmd = si.Commands(cmd_name)
if not cmd:
raise Exception(cmd_name + " doesnt found!")
for arg in cmd.Arguments:
value = kwds.get(arg.Name)
if value:
arg.Value = value
return cmd.Execute() | [
"def",
"cmd_wrapper",
"(",
"cmd_name",
",",
"*",
"*",
"kwds",
")",
":",
"cmd",
"=",
"si",
".",
"Commands",
"(",
"cmd_name",
")",
"if",
"not",
"cmd",
":",
"raise",
"Exception",
"(",
"cmd_name",
"+",
"\" doesnt found!\"",
")",
"for",
"arg",
"in",
"cmd",
".",
"Arguments",
":",
"value",
"=",
"kwds",
".",
"get",
"(",
"arg",
".",
"Name",
")",
"if",
"value",
":",
"arg",
".",
"Value",
"=",
"value",
"return",
"cmd",
".",
"Execute",
"(",
")"
] | Wrap and execute a softimage command accepting named arguments | [
"Wrap",
"and",
"execute",
"a",
"softimage",
"command",
"accepting",
"named",
"arguments"
] | python | train |
watson-developer-cloud/python-sdk | ibm_watson/compare_comply_v1.py | https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/compare_comply_v1.py#L3174-L3187 | def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'feedback_id') and self.feedback_id is not None:
_dict['feedback_id'] = self.feedback_id
if hasattr(self, 'user_id') and self.user_id is not None:
_dict['user_id'] = self.user_id
if hasattr(self, 'comment') and self.comment is not None:
_dict['comment'] = self.comment
if hasattr(self, 'created') and self.created is not None:
_dict['created'] = datetime_to_string(self.created)
if hasattr(self, 'feedback_data') and self.feedback_data is not None:
_dict['feedback_data'] = self.feedback_data._to_dict()
return _dict | [
"def",
"_to_dict",
"(",
"self",
")",
":",
"_dict",
"=",
"{",
"}",
"if",
"hasattr",
"(",
"self",
",",
"'feedback_id'",
")",
"and",
"self",
".",
"feedback_id",
"is",
"not",
"None",
":",
"_dict",
"[",
"'feedback_id'",
"]",
"=",
"self",
".",
"feedback_id",
"if",
"hasattr",
"(",
"self",
",",
"'user_id'",
")",
"and",
"self",
".",
"user_id",
"is",
"not",
"None",
":",
"_dict",
"[",
"'user_id'",
"]",
"=",
"self",
".",
"user_id",
"if",
"hasattr",
"(",
"self",
",",
"'comment'",
")",
"and",
"self",
".",
"comment",
"is",
"not",
"None",
":",
"_dict",
"[",
"'comment'",
"]",
"=",
"self",
".",
"comment",
"if",
"hasattr",
"(",
"self",
",",
"'created'",
")",
"and",
"self",
".",
"created",
"is",
"not",
"None",
":",
"_dict",
"[",
"'created'",
"]",
"=",
"datetime_to_string",
"(",
"self",
".",
"created",
")",
"if",
"hasattr",
"(",
"self",
",",
"'feedback_data'",
")",
"and",
"self",
".",
"feedback_data",
"is",
"not",
"None",
":",
"_dict",
"[",
"'feedback_data'",
"]",
"=",
"self",
".",
"feedback_data",
".",
"_to_dict",
"(",
")",
"return",
"_dict"
] | Return a json dictionary representing this model. | [
"Return",
"a",
"json",
"dictionary",
"representing",
"this",
"model",
"."
] | python | train |
codeinthehole/purl | purl/url.py | https://github.com/codeinthehole/purl/blob/e70ed132f1fdc17d00c78199cedb1e3adcb2bc55/purl/url.py#L35-L44 | def to_utf8(string):
"""
Encode a string as a UTF8 bytestring. This function could be passed a
bytestring or unicode string so must distinguish between the two.
"""
if isinstance(string, six.text_type):
return string.encode('utf8')
if isinstance(string, six.binary_type):
return string
return str(string) | [
"def",
"to_utf8",
"(",
"string",
")",
":",
"if",
"isinstance",
"(",
"string",
",",
"six",
".",
"text_type",
")",
":",
"return",
"string",
".",
"encode",
"(",
"'utf8'",
")",
"if",
"isinstance",
"(",
"string",
",",
"six",
".",
"binary_type",
")",
":",
"return",
"string",
"return",
"str",
"(",
"string",
")"
] | Encode a string as a UTF8 bytestring. This function could be passed a
bytestring or unicode string so must distinguish between the two. | [
"Encode",
"a",
"string",
"as",
"a",
"UTF8",
"bytestring",
".",
"This",
"function",
"could",
"be",
"passed",
"a",
"bytestring",
"or",
"unicode",
"string",
"so",
"must",
"distinguish",
"between",
"the",
"two",
"."
] | python | train |
dereneaton/ipyrad | ipyrad/analysis/tree.py | https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/tree.py#L92-L127 | def draw(
self,
show_tip_labels=True,
show_node_support=False,
use_edge_lengths=False,
orient="right",
print_args=False,
*args,
**kwargs):
"""
plot the tree using toyplot.graph.
Parameters:
-----------
show_tip_labels: bool
Show tip names from tree.
use_edge_lengths: bool
Use edge lengths from newick tree.
show_node_support: bool
Show support values at nodes using a set of default
options.
...
"""
## re-decompose tree for new orient and edges args
self._decompose_tree(orient=orient, use_edge_lengths=use_edge_lengths)
## update kwargs with entered args and all other kwargs
dwargs = {}
dwargs["show_tip_labels"] = show_tip_labels
dwargs["show_node_support"] = show_node_support
dwargs.update(kwargs)
## pass to panel plotter
canvas, axes, panel = tree_panel_plot(self, print_args, **dwargs)
return canvas, axes, panel | [
"def",
"draw",
"(",
"self",
",",
"show_tip_labels",
"=",
"True",
",",
"show_node_support",
"=",
"False",
",",
"use_edge_lengths",
"=",
"False",
",",
"orient",
"=",
"\"right\"",
",",
"print_args",
"=",
"False",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"## re-decompose tree for new orient and edges args",
"self",
".",
"_decompose_tree",
"(",
"orient",
"=",
"orient",
",",
"use_edge_lengths",
"=",
"use_edge_lengths",
")",
"## update kwargs with entered args and all other kwargs",
"dwargs",
"=",
"{",
"}",
"dwargs",
"[",
"\"show_tip_labels\"",
"]",
"=",
"show_tip_labels",
"dwargs",
"[",
"\"show_node_support\"",
"]",
"=",
"show_node_support",
"dwargs",
".",
"update",
"(",
"kwargs",
")",
"## pass to panel plotter",
"canvas",
",",
"axes",
",",
"panel",
"=",
"tree_panel_plot",
"(",
"self",
",",
"print_args",
",",
"*",
"*",
"dwargs",
")",
"return",
"canvas",
",",
"axes",
",",
"panel"
] | plot the tree using toyplot.graph.
Parameters:
-----------
show_tip_labels: bool
Show tip names from tree.
use_edge_lengths: bool
Use edge lengths from newick tree.
show_node_support: bool
Show support values at nodes using a set of default
options.
... | [
"plot",
"the",
"tree",
"using",
"toyplot",
".",
"graph",
"."
] | python | valid |
Microsoft/ApplicationInsights-Python | applicationinsights/channel/contracts/ExceptionData.py | https://github.com/Microsoft/ApplicationInsights-Python/blob/8452ab7126f9bb6964637d4aa1258c2af17563d6/applicationinsights/channel/contracts/ExceptionData.py#L114-L123 | def properties(self):
"""The properties property.
Returns:
(hash). the property value. (defaults to: {})
"""
if 'properties' in self._values:
return self._values['properties']
self._values['properties'] = copy.deepcopy(self._defaults['properties'])
return self._values['properties'] | [
"def",
"properties",
"(",
"self",
")",
":",
"if",
"'properties'",
"in",
"self",
".",
"_values",
":",
"return",
"self",
".",
"_values",
"[",
"'properties'",
"]",
"self",
".",
"_values",
"[",
"'properties'",
"]",
"=",
"copy",
".",
"deepcopy",
"(",
"self",
".",
"_defaults",
"[",
"'properties'",
"]",
")",
"return",
"self",
".",
"_values",
"[",
"'properties'",
"]"
] | The properties property.
Returns:
(hash). the property value. (defaults to: {}) | [
"The",
"properties",
"property",
".",
"Returns",
":",
"(",
"hash",
")",
".",
"the",
"property",
"value",
".",
"(",
"defaults",
"to",
":",
"{}",
")"
] | python | train |
Vital-Fernandez/dazer | bin/lib/Astro_Libraries/cosmics.py | https://github.com/Vital-Fernandez/dazer/blob/3c9ae8ae6d40ea33f22cc20dc11365d6d6e65244/bin/lib/Astro_Libraries/cosmics.py#L353-L364 | def getsatstars(self, verbose = None):
"""
Returns the mask of saturated stars after finding them if not yet done.
Intended mainly for external use.
"""
if verbose == None:
verbose = self.verbose
if not self.satlevel > 0:
raise RuntimeError, "Cannot determine satstars : you gave satlevel <= 0 !"
if self.satstars == None:
self.findsatstars(verbose = verbose)
return self.satstars | [
"def",
"getsatstars",
"(",
"self",
",",
"verbose",
"=",
"None",
")",
":",
"if",
"verbose",
"==",
"None",
":",
"verbose",
"=",
"self",
".",
"verbose",
"if",
"not",
"self",
".",
"satlevel",
">",
"0",
":",
"raise",
"RuntimeError",
",",
"\"Cannot determine satstars : you gave satlevel <= 0 !\"",
"if",
"self",
".",
"satstars",
"==",
"None",
":",
"self",
".",
"findsatstars",
"(",
"verbose",
"=",
"verbose",
")",
"return",
"self",
".",
"satstars"
] | Returns the mask of saturated stars after finding them if not yet done.
Intended mainly for external use. | [
"Returns",
"the",
"mask",
"of",
"saturated",
"stars",
"after",
"finding",
"them",
"if",
"not",
"yet",
"done",
".",
"Intended",
"mainly",
"for",
"external",
"use",
"."
] | python | train |
Scoppio/RagnarokEngine3 | Tutorials/Platforming Block - PyGame Release/Game/Code/Ragnarok.py | https://github.com/Scoppio/RagnarokEngine3/blob/4395d419ccd64fe9327c41f200b72ee0176ad896/Tutorials/Platforming Block - PyGame Release/Game/Code/Ragnarok.py#L2077-L2090 | def query_all_collisions(collision_object):
"""
Check for and return the full list of objects colliding with collision_object
"""
global collidable_objects
colliding = []
for obj in collidable_objects:
#Make sure we don't check ourself against ourself.
if obj is not collision_object:
if collision_object.is_colliding(obj):
#A collision has been detected. Add the object that we are colliding with.
colliding.append(obj)
return colliding | [
"def",
"query_all_collisions",
"(",
"collision_object",
")",
":",
"global",
"collidable_objects",
"colliding",
"=",
"[",
"]",
"for",
"obj",
"in",
"collidable_objects",
":",
"#Make sure we don't check ourself against ourself.",
"if",
"obj",
"is",
"not",
"collision_object",
":",
"if",
"collision_object",
".",
"is_colliding",
"(",
"obj",
")",
":",
"#A collision has been detected. Add the object that we are colliding with.",
"colliding",
".",
"append",
"(",
"obj",
")",
"return",
"colliding"
] | Check for and return the full list of objects colliding with collision_object | [
"Check",
"for",
"and",
"return",
"the",
"full",
"list",
"of",
"objects",
"colliding",
"with",
"collision_object"
] | python | train |
sorgerlab/indra | indra/tools/reading/readers.py | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/reading/readers.py#L773-L780 | def get_reader_class(reader_name):
"""Get a particular reader class by name."""
for reader_class in get_reader_classes():
if reader_class.name.lower() == reader_name.lower():
return reader_class
else:
logger.error("No such reader: %s" % reader_name)
return None | [
"def",
"get_reader_class",
"(",
"reader_name",
")",
":",
"for",
"reader_class",
"in",
"get_reader_classes",
"(",
")",
":",
"if",
"reader_class",
".",
"name",
".",
"lower",
"(",
")",
"==",
"reader_name",
".",
"lower",
"(",
")",
":",
"return",
"reader_class",
"else",
":",
"logger",
".",
"error",
"(",
"\"No such reader: %s\"",
"%",
"reader_name",
")",
"return",
"None"
] | Get a particular reader class by name. | [
"Get",
"a",
"particular",
"reader",
"class",
"by",
"name",
"."
] | python | train |
OpenHydrology/floodestimation | floodestimation/analysis.py | https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/analysis.py#L602-L614 | def _vec_lnqmed_residuals(self, catchments):
"""
Return ln(QMED) model errors for a list of catchments
:param catchments: List of gauged catchments
:type catchments: list of :class:`Catchment`
:return: Model errors
:rtype: list of float
"""
result = np.empty(len(catchments))
for index, donor in enumerate(catchments):
result[index] = self._lnqmed_residual(donor)
return result | [
"def",
"_vec_lnqmed_residuals",
"(",
"self",
",",
"catchments",
")",
":",
"result",
"=",
"np",
".",
"empty",
"(",
"len",
"(",
"catchments",
")",
")",
"for",
"index",
",",
"donor",
"in",
"enumerate",
"(",
"catchments",
")",
":",
"result",
"[",
"index",
"]",
"=",
"self",
".",
"_lnqmed_residual",
"(",
"donor",
")",
"return",
"result"
] | Return ln(QMED) model errors for a list of catchments
:param catchments: List of gauged catchments
:type catchments: list of :class:`Catchment`
:return: Model errors
:rtype: list of float | [
"Return",
"ln",
"(",
"QMED",
")",
"model",
"errors",
"for",
"a",
"list",
"of",
"catchments"
] | python | train |
jorgeecardona/dynect | dynect/__init__.py | https://github.com/jorgeecardona/dynect/blob/d2cd85bc510f00108a3a5bfe515f45daae15a482/dynect/__init__.py#L27-L62 | def token(self):
" Get token when needed."
if hasattr(self, '_token'):
return getattr(self, '_token')
# Json formatted auth.
data = json.dumps({'customer_name': self.customer,
'user_name': self.username,
'password': self.password})
# Start session.
response = requests.post(
'https://api2.dynect.net/REST/Session/', data=data,
headers={'Content-Type': 'application/json'})
# convert to data.
content = json.loads(response.content)
if response.status_code != 200:
# Check for errors.
if self.check_error(content, 'failure', 'INVALID_DATA'):
raise self.CredentialsError(
self.response_message(content, 'ERROR'))
raise self.Failure(self.response_message(content, 'ERROR'),
'Unhandled failure')
# Extract token from content
if 'data' in content and 'token' in content['data']:
token = content['data']['token']
else:
raise self.AuthenticationError(response)
setattr(self, '_token', token)
return token | [
"def",
"token",
"(",
"self",
")",
":",
"if",
"hasattr",
"(",
"self",
",",
"'_token'",
")",
":",
"return",
"getattr",
"(",
"self",
",",
"'_token'",
")",
"# Json formatted auth.",
"data",
"=",
"json",
".",
"dumps",
"(",
"{",
"'customer_name'",
":",
"self",
".",
"customer",
",",
"'user_name'",
":",
"self",
".",
"username",
",",
"'password'",
":",
"self",
".",
"password",
"}",
")",
"# Start session.",
"response",
"=",
"requests",
".",
"post",
"(",
"'https://api2.dynect.net/REST/Session/'",
",",
"data",
"=",
"data",
",",
"headers",
"=",
"{",
"'Content-Type'",
":",
"'application/json'",
"}",
")",
"# convert to data.",
"content",
"=",
"json",
".",
"loads",
"(",
"response",
".",
"content",
")",
"if",
"response",
".",
"status_code",
"!=",
"200",
":",
"# Check for errors.",
"if",
"self",
".",
"check_error",
"(",
"content",
",",
"'failure'",
",",
"'INVALID_DATA'",
")",
":",
"raise",
"self",
".",
"CredentialsError",
"(",
"self",
".",
"response_message",
"(",
"content",
",",
"'ERROR'",
")",
")",
"raise",
"self",
".",
"Failure",
"(",
"self",
".",
"response_message",
"(",
"content",
",",
"'ERROR'",
")",
",",
"'Unhandled failure'",
")",
"# Extract token from content",
"if",
"'data'",
"in",
"content",
"and",
"'token'",
"in",
"content",
"[",
"'data'",
"]",
":",
"token",
"=",
"content",
"[",
"'data'",
"]",
"[",
"'token'",
"]",
"else",
":",
"raise",
"self",
".",
"AuthenticationError",
"(",
"response",
")",
"setattr",
"(",
"self",
",",
"'_token'",
",",
"token",
")",
"return",
"token"
] | Get token when needed. | [
"Get",
"token",
"when",
"needed",
"."
] | python | train |
pantsbuild/pants | src/python/pants/build_graph/build_configuration.py | https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/build_graph/build_configuration.py#L61-L80 | def register_aliases(self, aliases):
"""Registers the given aliases to be exposed in parsed BUILD files.
:param aliases: The BuildFileAliases to register.
:type aliases: :class:`pants.build_graph.build_file_aliases.BuildFileAliases`
"""
if not isinstance(aliases, BuildFileAliases):
raise TypeError('The aliases must be a BuildFileAliases, given {}'.format(aliases))
for alias, target_type in aliases.target_types.items():
self._register_target_alias(alias, target_type)
for alias, target_macro_factory in aliases.target_macro_factories.items():
self._register_target_macro_factory_alias(alias, target_macro_factory)
for alias, obj in aliases.objects.items():
self._register_exposed_object(alias, obj)
for alias, context_aware_object_factory in aliases.context_aware_object_factories.items():
self._register_exposed_context_aware_object_factory(alias, context_aware_object_factory) | [
"def",
"register_aliases",
"(",
"self",
",",
"aliases",
")",
":",
"if",
"not",
"isinstance",
"(",
"aliases",
",",
"BuildFileAliases",
")",
":",
"raise",
"TypeError",
"(",
"'The aliases must be a BuildFileAliases, given {}'",
".",
"format",
"(",
"aliases",
")",
")",
"for",
"alias",
",",
"target_type",
"in",
"aliases",
".",
"target_types",
".",
"items",
"(",
")",
":",
"self",
".",
"_register_target_alias",
"(",
"alias",
",",
"target_type",
")",
"for",
"alias",
",",
"target_macro_factory",
"in",
"aliases",
".",
"target_macro_factories",
".",
"items",
"(",
")",
":",
"self",
".",
"_register_target_macro_factory_alias",
"(",
"alias",
",",
"target_macro_factory",
")",
"for",
"alias",
",",
"obj",
"in",
"aliases",
".",
"objects",
".",
"items",
"(",
")",
":",
"self",
".",
"_register_exposed_object",
"(",
"alias",
",",
"obj",
")",
"for",
"alias",
",",
"context_aware_object_factory",
"in",
"aliases",
".",
"context_aware_object_factories",
".",
"items",
"(",
")",
":",
"self",
".",
"_register_exposed_context_aware_object_factory",
"(",
"alias",
",",
"context_aware_object_factory",
")"
] | Registers the given aliases to be exposed in parsed BUILD files.
:param aliases: The BuildFileAliases to register.
:type aliases: :class:`pants.build_graph.build_file_aliases.BuildFileAliases` | [
"Registers",
"the",
"given",
"aliases",
"to",
"be",
"exposed",
"in",
"parsed",
"BUILD",
"files",
"."
] | python | train |
adafruit/Adafruit_Blinka | src/adafruit_blinka/microcontroller/bcm283x/pulseio/PulseIn.py | https://github.com/adafruit/Adafruit_Blinka/blob/b4a2b3bf7d8cc88477027b827bd0a8e9b19588ff/src/adafruit_blinka/microcontroller/bcm283x/pulseio/PulseIn.py#L99-L105 | def resume(self, trigger_duration=0):
"""Resumes pulse capture after an optional trigger pulse."""
if trigger_duration != 0:
self._mq.send("t%d" % trigger_duration, True, type=1)
else:
self._mq.send("r", True, type=1)
self._paused = False | [
"def",
"resume",
"(",
"self",
",",
"trigger_duration",
"=",
"0",
")",
":",
"if",
"trigger_duration",
"!=",
"0",
":",
"self",
".",
"_mq",
".",
"send",
"(",
"\"t%d\"",
"%",
"trigger_duration",
",",
"True",
",",
"type",
"=",
"1",
")",
"else",
":",
"self",
".",
"_mq",
".",
"send",
"(",
"\"r\"",
",",
"True",
",",
"type",
"=",
"1",
")",
"self",
".",
"_paused",
"=",
"False"
] | Resumes pulse capture after an optional trigger pulse. | [
"Resumes",
"pulse",
"capture",
"after",
"an",
"optional",
"trigger",
"pulse",
"."
] | python | train |
vanheeringen-lab/gimmemotifs | gimmemotifs/moap.py | https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/moap.py#L87-L94 | def register_predictor(cls, name):
"""Register method to keep list of predictors."""
def decorator(subclass):
"""Register as decorator function."""
cls._predictors[name.lower()] = subclass
subclass.name = name.lower()
return subclass
return decorator | [
"def",
"register_predictor",
"(",
"cls",
",",
"name",
")",
":",
"def",
"decorator",
"(",
"subclass",
")",
":",
"\"\"\"Register as decorator function.\"\"\"",
"cls",
".",
"_predictors",
"[",
"name",
".",
"lower",
"(",
")",
"]",
"=",
"subclass",
"subclass",
".",
"name",
"=",
"name",
".",
"lower",
"(",
")",
"return",
"subclass",
"return",
"decorator"
] | Register method to keep list of predictors. | [
"Register",
"method",
"to",
"keep",
"list",
"of",
"predictors",
"."
] | python | train |
ewels/MultiQC | multiqc/modules/fastp/fastp.py | https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/fastp/fastp.py#L368-L383 | def fastp_read_gc_plot(self):
""" Make the read GC plot for Fastp """
data_labels, pdata = self.filter_pconfig_pdata_subplots(self.fastp_gc_content_data, 'Base Content Percent')
pconfig = {
'id': 'fastp-seq-content-gc-plot',
'title': 'Fastp: Read GC Content',
'xlab': 'Read Position',
'ylab': 'R1 Before filtering: Base Content Percent',
'ymax': 100,
'ymin': 0,
'xDecimals': False,
'yLabelFormat': '{value}%',
'tt_label': '{point.x}: {point.y:.2f}%',
'data_labels': data_labels
}
return linegraph.plot(pdata, pconfig) | [
"def",
"fastp_read_gc_plot",
"(",
"self",
")",
":",
"data_labels",
",",
"pdata",
"=",
"self",
".",
"filter_pconfig_pdata_subplots",
"(",
"self",
".",
"fastp_gc_content_data",
",",
"'Base Content Percent'",
")",
"pconfig",
"=",
"{",
"'id'",
":",
"'fastp-seq-content-gc-plot'",
",",
"'title'",
":",
"'Fastp: Read GC Content'",
",",
"'xlab'",
":",
"'Read Position'",
",",
"'ylab'",
":",
"'R1 Before filtering: Base Content Percent'",
",",
"'ymax'",
":",
"100",
",",
"'ymin'",
":",
"0",
",",
"'xDecimals'",
":",
"False",
",",
"'yLabelFormat'",
":",
"'{value}%'",
",",
"'tt_label'",
":",
"'{point.x}: {point.y:.2f}%'",
",",
"'data_labels'",
":",
"data_labels",
"}",
"return",
"linegraph",
".",
"plot",
"(",
"pdata",
",",
"pconfig",
")"
] | Make the read GC plot for Fastp | [
"Make",
"the",
"read",
"GC",
"plot",
"for",
"Fastp"
] | python | train |
nikcub/floyd | floyd/util/object.py | https://github.com/nikcub/floyd/blob/5772d0047efb11c9ce5f7d234a9da4576ce24edc/floyd/util/object.py#L39-L47 | def getmethattr(obj, meth):
"""
Returns either the variable value or method invocation
"""
if hasmethod(obj, meth):
return getattr(obj, meth)()
elif hasvar(obj, meth):
return getattr(obj, meth)
return None | [
"def",
"getmethattr",
"(",
"obj",
",",
"meth",
")",
":",
"if",
"hasmethod",
"(",
"obj",
",",
"meth",
")",
":",
"return",
"getattr",
"(",
"obj",
",",
"meth",
")",
"(",
")",
"elif",
"hasvar",
"(",
"obj",
",",
"meth",
")",
":",
"return",
"getattr",
"(",
"obj",
",",
"meth",
")",
"return",
"None"
] | Returns either the variable value or method invocation | [
"Returns",
"either",
"the",
"variable",
"value",
"or",
"method",
"invocation"
] | python | train |
ellmetha/django-machina | machina/apps/forum_moderation/views.py | https://github.com/ellmetha/django-machina/blob/89ac083c1eaf1cfdeae6686ee094cc86362e8c69/machina/apps/forum_moderation/views.py#L42-L49 | def lock(self, request, *args, **kwargs):
""" Locks the considered topic and retirects the user to the success URL. """
self.object = self.get_object()
success_url = self.get_success_url()
self.object.status = Topic.TOPIC_LOCKED
self.object.save()
messages.success(self.request, self.success_message)
return HttpResponseRedirect(success_url) | [
"def",
"lock",
"(",
"self",
",",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"object",
"=",
"self",
".",
"get_object",
"(",
")",
"success_url",
"=",
"self",
".",
"get_success_url",
"(",
")",
"self",
".",
"object",
".",
"status",
"=",
"Topic",
".",
"TOPIC_LOCKED",
"self",
".",
"object",
".",
"save",
"(",
")",
"messages",
".",
"success",
"(",
"self",
".",
"request",
",",
"self",
".",
"success_message",
")",
"return",
"HttpResponseRedirect",
"(",
"success_url",
")"
] | Locks the considered topic and retirects the user to the success URL. | [
"Locks",
"the",
"considered",
"topic",
"and",
"retirects",
"the",
"user",
"to",
"the",
"success",
"URL",
"."
] | python | train |
dw/mitogen | mitogen/__init__.py | https://github.com/dw/mitogen/blob/a7fdb55e1300a7e0a5e404b09eb730cf9a525da7/mitogen/__init__.py#L67-L120 | def main(log_level='INFO', profiling=_default_profiling):
"""
Convenience decorator primarily useful for writing discardable test
scripts.
In the master process, when `func` is defined in the :mod:`__main__`
module, arranges for `func(router)` to be invoked immediately, with
:py:class:`mitogen.master.Router` construction and destruction handled just
as in :py:func:`mitogen.utils.run_with_router`. In slaves, this function
does nothing.
:param str log_level:
Logging package level to configure via
:py:func:`mitogen.utils.log_to_file`.
:param bool profiling:
If :py:data:`True`, equivalent to setting
:py:attr:`mitogen.master.Router.profiling` prior to router
construction. This causes ``/tmp`` files to be created everywhere at
the end of a successful run with :py:mod:`cProfile` output for every
thread.
Example:
::
import mitogen
import requests
def get_url(url):
return requests.get(url).text
@mitogen.main()
def main(router):
z = router.ssh(hostname='k3')
print(z.call(get_url, 'https://example.org/')))))
"""
def wrapper(func):
if func.__module__ != '__main__':
return func
import mitogen.parent
import mitogen.utils
if profiling:
mitogen.core.enable_profiling()
mitogen.master.Router.profiling = profiling
utils.log_to_file(level=log_level)
return mitogen.core._profile_hook(
'app.main',
utils.run_with_router,
func,
)
return wrapper | [
"def",
"main",
"(",
"log_level",
"=",
"'INFO'",
",",
"profiling",
"=",
"_default_profiling",
")",
":",
"def",
"wrapper",
"(",
"func",
")",
":",
"if",
"func",
".",
"__module__",
"!=",
"'__main__'",
":",
"return",
"func",
"import",
"mitogen",
".",
"parent",
"import",
"mitogen",
".",
"utils",
"if",
"profiling",
":",
"mitogen",
".",
"core",
".",
"enable_profiling",
"(",
")",
"mitogen",
".",
"master",
".",
"Router",
".",
"profiling",
"=",
"profiling",
"utils",
".",
"log_to_file",
"(",
"level",
"=",
"log_level",
")",
"return",
"mitogen",
".",
"core",
".",
"_profile_hook",
"(",
"'app.main'",
",",
"utils",
".",
"run_with_router",
",",
"func",
",",
")",
"return",
"wrapper"
] | Convenience decorator primarily useful for writing discardable test
scripts.
In the master process, when `func` is defined in the :mod:`__main__`
module, arranges for `func(router)` to be invoked immediately, with
:py:class:`mitogen.master.Router` construction and destruction handled just
as in :py:func:`mitogen.utils.run_with_router`. In slaves, this function
does nothing.
:param str log_level:
Logging package level to configure via
:py:func:`mitogen.utils.log_to_file`.
:param bool profiling:
If :py:data:`True`, equivalent to setting
:py:attr:`mitogen.master.Router.profiling` prior to router
construction. This causes ``/tmp`` files to be created everywhere at
the end of a successful run with :py:mod:`cProfile` output for every
thread.
Example:
::
import mitogen
import requests
def get_url(url):
return requests.get(url).text
@mitogen.main()
def main(router):
z = router.ssh(hostname='k3')
print(z.call(get_url, 'https://example.org/'))))) | [
"Convenience",
"decorator",
"primarily",
"useful",
"for",
"writing",
"discardable",
"test",
"scripts",
"."
] | python | train |
brainiak/brainiak | brainiak/searchlight/searchlight.py | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/searchlight/searchlight.py#L523-L557 | def _singlenode_searchlight(l, msk, mysl_rad, bcast_var, extra_params):
"""Run searchlight function on block data in parallel.
`extra_params` contains:
- Searchlight function.
- `Shape` mask.
- Minimum active voxels proportion required to run the searchlight
function.
"""
voxel_fn = extra_params[0]
shape_mask = extra_params[1]
min_active_voxels_proportion = extra_params[2]
outmat = np.empty(msk.shape, dtype=np.object)[mysl_rad:-mysl_rad,
mysl_rad:-mysl_rad,
mysl_rad:-mysl_rad]
for i in range(0, outmat.shape[0]):
for j in range(0, outmat.shape[1]):
for k in range(0, outmat.shape[2]):
if msk[i+mysl_rad, j+mysl_rad, k+mysl_rad]:
searchlight_slice = np.s_[
i:i+2*mysl_rad+1,
j:j+2*mysl_rad+1,
k:k+2*mysl_rad+1]
voxel_fn_mask = msk[searchlight_slice] * shape_mask
if (min_active_voxels_proportion == 0
or np.count_nonzero(voxel_fn_mask) / voxel_fn_mask.size
> min_active_voxels_proportion):
outmat[i, j, k] = voxel_fn(
[ll[searchlight_slice] for ll in l],
msk[searchlight_slice] * shape_mask,
mysl_rad,
bcast_var)
return outmat | [
"def",
"_singlenode_searchlight",
"(",
"l",
",",
"msk",
",",
"mysl_rad",
",",
"bcast_var",
",",
"extra_params",
")",
":",
"voxel_fn",
"=",
"extra_params",
"[",
"0",
"]",
"shape_mask",
"=",
"extra_params",
"[",
"1",
"]",
"min_active_voxels_proportion",
"=",
"extra_params",
"[",
"2",
"]",
"outmat",
"=",
"np",
".",
"empty",
"(",
"msk",
".",
"shape",
",",
"dtype",
"=",
"np",
".",
"object",
")",
"[",
"mysl_rad",
":",
"-",
"mysl_rad",
",",
"mysl_rad",
":",
"-",
"mysl_rad",
",",
"mysl_rad",
":",
"-",
"mysl_rad",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"outmat",
".",
"shape",
"[",
"0",
"]",
")",
":",
"for",
"j",
"in",
"range",
"(",
"0",
",",
"outmat",
".",
"shape",
"[",
"1",
"]",
")",
":",
"for",
"k",
"in",
"range",
"(",
"0",
",",
"outmat",
".",
"shape",
"[",
"2",
"]",
")",
":",
"if",
"msk",
"[",
"i",
"+",
"mysl_rad",
",",
"j",
"+",
"mysl_rad",
",",
"k",
"+",
"mysl_rad",
"]",
":",
"searchlight_slice",
"=",
"np",
".",
"s_",
"[",
"i",
":",
"i",
"+",
"2",
"*",
"mysl_rad",
"+",
"1",
",",
"j",
":",
"j",
"+",
"2",
"*",
"mysl_rad",
"+",
"1",
",",
"k",
":",
"k",
"+",
"2",
"*",
"mysl_rad",
"+",
"1",
"]",
"voxel_fn_mask",
"=",
"msk",
"[",
"searchlight_slice",
"]",
"*",
"shape_mask",
"if",
"(",
"min_active_voxels_proportion",
"==",
"0",
"or",
"np",
".",
"count_nonzero",
"(",
"voxel_fn_mask",
")",
"/",
"voxel_fn_mask",
".",
"size",
">",
"min_active_voxels_proportion",
")",
":",
"outmat",
"[",
"i",
",",
"j",
",",
"k",
"]",
"=",
"voxel_fn",
"(",
"[",
"ll",
"[",
"searchlight_slice",
"]",
"for",
"ll",
"in",
"l",
"]",
",",
"msk",
"[",
"searchlight_slice",
"]",
"*",
"shape_mask",
",",
"mysl_rad",
",",
"bcast_var",
")",
"return",
"outmat"
] | Run searchlight function on block data in parallel.
`extra_params` contains:
- Searchlight function.
- `Shape` mask.
- Minimum active voxels proportion required to run the searchlight
function. | [
"Run",
"searchlight",
"function",
"on",
"block",
"data",
"in",
"parallel",
"."
] | python | train |
reillysiemens/layabout | layabout.py | https://github.com/reillysiemens/layabout/blob/a146c47f2558e66bb51cf708d39909b93eaea7f4/layabout.py#L78-L94 | def connect_with_retry(self) -> None:
""" Attempt to connect to the Slack API. Retry on failures. """
if self.is_connected():
log.debug('Already connected to the Slack API')
return
for retry in range(1, self.retries + 1):
self.connect()
if self.is_connected():
log.debug('Connected to the Slack API')
return
else:
interval = self.backoff(retry)
log.debug("Waiting %.3fs before retrying", interval)
time.sleep(interval)
raise FailedConnection('Failed to connect to the Slack API') | [
"def",
"connect_with_retry",
"(",
"self",
")",
"->",
"None",
":",
"if",
"self",
".",
"is_connected",
"(",
")",
":",
"log",
".",
"debug",
"(",
"'Already connected to the Slack API'",
")",
"return",
"for",
"retry",
"in",
"range",
"(",
"1",
",",
"self",
".",
"retries",
"+",
"1",
")",
":",
"self",
".",
"connect",
"(",
")",
"if",
"self",
".",
"is_connected",
"(",
")",
":",
"log",
".",
"debug",
"(",
"'Connected to the Slack API'",
")",
"return",
"else",
":",
"interval",
"=",
"self",
".",
"backoff",
"(",
"retry",
")",
"log",
".",
"debug",
"(",
"\"Waiting %.3fs before retrying\"",
",",
"interval",
")",
"time",
".",
"sleep",
"(",
"interval",
")",
"raise",
"FailedConnection",
"(",
"'Failed to connect to the Slack API'",
")"
] | Attempt to connect to the Slack API. Retry on failures. | [
"Attempt",
"to",
"connect",
"to",
"the",
"Slack",
"API",
".",
"Retry",
"on",
"failures",
"."
] | python | train |
googlefonts/fontbakery | Lib/fontbakery/profiles/name.py | https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/name.py#L421-L445 | def com_adobe_fonts_check_family_max_4_fonts_per_family_name(ttFonts):
"""Verify that each group of fonts with the same nameID 1
has maximum of 4 fonts"""
from collections import Counter
from fontbakery.utils import get_name_entry_strings
failed = False
family_names = list()
for ttFont in ttFonts:
names_list = get_name_entry_strings(ttFont, NameID.FONT_FAMILY_NAME)
# names_list will likely contain multiple entries, e.g. multiple copies
# of the same name in the same language for different platforms, but
# also different names in different languages, we use set() below
# to remove the duplicates and only store the unique family name(s)
# used for a given font
names_set = set(names_list)
family_names.extend(names_set)
counter = Counter(family_names)
for family_name, count in counter.items():
if count > 4:
failed = True
yield FAIL, ("Family '{}' has {} fonts (should be 4 or fewer)."
).format(family_name, count)
if not failed:
yield PASS, ("There were no more than 4 fonts per family name.") | [
"def",
"com_adobe_fonts_check_family_max_4_fonts_per_family_name",
"(",
"ttFonts",
")",
":",
"from",
"collections",
"import",
"Counter",
"from",
"fontbakery",
".",
"utils",
"import",
"get_name_entry_strings",
"failed",
"=",
"False",
"family_names",
"=",
"list",
"(",
")",
"for",
"ttFont",
"in",
"ttFonts",
":",
"names_list",
"=",
"get_name_entry_strings",
"(",
"ttFont",
",",
"NameID",
".",
"FONT_FAMILY_NAME",
")",
"# names_list will likely contain multiple entries, e.g. multiple copies",
"# of the same name in the same language for different platforms, but",
"# also different names in different languages, we use set() below",
"# to remove the duplicates and only store the unique family name(s)",
"# used for a given font",
"names_set",
"=",
"set",
"(",
"names_list",
")",
"family_names",
".",
"extend",
"(",
"names_set",
")",
"counter",
"=",
"Counter",
"(",
"family_names",
")",
"for",
"family_name",
",",
"count",
"in",
"counter",
".",
"items",
"(",
")",
":",
"if",
"count",
">",
"4",
":",
"failed",
"=",
"True",
"yield",
"FAIL",
",",
"(",
"\"Family '{}' has {} fonts (should be 4 or fewer).\"",
")",
".",
"format",
"(",
"family_name",
",",
"count",
")",
"if",
"not",
"failed",
":",
"yield",
"PASS",
",",
"(",
"\"There were no more than 4 fonts per family name.\"",
")"
] | Verify that each group of fonts with the same nameID 1
has maximum of 4 fonts | [
"Verify",
"that",
"each",
"group",
"of",
"fonts",
"with",
"the",
"same",
"nameID",
"1",
"has",
"maximum",
"of",
"4",
"fonts"
] | python | train |
seb-m/pyinotify | python3/pyinotify.py | https://github.com/seb-m/pyinotify/blob/0f3f8950d12e4a6534320153eed1a90a778da4ae/python3/pyinotify.py#L417-L433 | def maskname(mask):
"""
Returns the event name associated to mask. IN_ISDIR is appended to
the result when appropriate. Note: only one event is returned, because
only one event can be raised at a given time.
@param mask: mask.
@type mask: int
@return: event name.
@rtype: str
"""
ms = mask
name = '%s'
if mask & IN_ISDIR:
ms = mask - IN_ISDIR
name = '%s|IN_ISDIR'
return name % EventsCodes.ALL_VALUES[ms] | [
"def",
"maskname",
"(",
"mask",
")",
":",
"ms",
"=",
"mask",
"name",
"=",
"'%s'",
"if",
"mask",
"&",
"IN_ISDIR",
":",
"ms",
"=",
"mask",
"-",
"IN_ISDIR",
"name",
"=",
"'%s|IN_ISDIR'",
"return",
"name",
"%",
"EventsCodes",
".",
"ALL_VALUES",
"[",
"ms",
"]"
] | Returns the event name associated to mask. IN_ISDIR is appended to
the result when appropriate. Note: only one event is returned, because
only one event can be raised at a given time.
@param mask: mask.
@type mask: int
@return: event name.
@rtype: str | [
"Returns",
"the",
"event",
"name",
"associated",
"to",
"mask",
".",
"IN_ISDIR",
"is",
"appended",
"to",
"the",
"result",
"when",
"appropriate",
".",
"Note",
":",
"only",
"one",
"event",
"is",
"returned",
"because",
"only",
"one",
"event",
"can",
"be",
"raised",
"at",
"a",
"given",
"time",
"."
] | python | train |
cs01/pygdbmi | pygdbmi/gdbmiparser.py | https://github.com/cs01/pygdbmi/blob/709c781794d3c3b903891f83da011d2d995895d1/pygdbmi/gdbmiparser.py#L288-L299 | def _parse_key(stream):
"""Parse key, value combination
returns :
Parsed key (string)
"""
logger.debug("parsing key")
key = stream.advance_past_chars(["="])
logger.debug("parsed key:")
logger.debug("%s", fmt_green(key))
return key | [
"def",
"_parse_key",
"(",
"stream",
")",
":",
"logger",
".",
"debug",
"(",
"\"parsing key\"",
")",
"key",
"=",
"stream",
".",
"advance_past_chars",
"(",
"[",
"\"=\"",
"]",
")",
"logger",
".",
"debug",
"(",
"\"parsed key:\"",
")",
"logger",
".",
"debug",
"(",
"\"%s\"",
",",
"fmt_green",
"(",
"key",
")",
")",
"return",
"key"
] | Parse key, value combination
returns :
Parsed key (string) | [
"Parse",
"key",
"value",
"combination",
"returns",
":",
"Parsed",
"key",
"(",
"string",
")"
] | python | valid |
HiPERCAM/hcam_widgets | hcam_widgets/hcam.py | https://github.com/HiPERCAM/hcam_widgets/blob/7219f0d96dd3a8ebe3139c7f542a72c02d02fce8/hcam_widgets/hcam.py#L751-L797 | def getRtplotWins(self):
""""
Returns a string suitable to sending off to rtplot when
it asks for window parameters. Returns null string '' if
the windows are not OK. This operates on the basis of
trying to send something back, even if it might not be
OK as a window setup. Note that we have to take care
here not to update any GUI components because this is
called outside of the main thread.
"""
try:
if self.isFF():
return 'fullframe\r\n'
elif self.isDrift():
xbin = self.wframe.xbin.value()
ybin = self.wframe.ybin.value()
nwin = 2*self.wframe.npair.value()
ret = str(xbin) + ' ' + str(ybin) + ' ' + str(nwin) + '\r\n'
for xsl, xsr, ys, nx, ny in self.wframe:
ret += '{:d} {:d} {:d} {:d}\r\n'.format(
xsl, ys, nx, ny
)
ret += '{:d} {:d} {:d} {:d}'.format(
xsr, ys, nx, ny
)
return ret
else:
xbin = self.wframe.xbin.value()
ybin = self.wframe.ybin.value()
nwin = 4*self.wframe.nquad.value()
ret = str(xbin) + ' ' + str(ybin) + ' ' + str(nwin) + '\r\n'
for xsll, xsul, xslr, xsur, ys, nx, ny in self.wframe:
ret += '{:d} {:d} {:d} {:d}\r\n'.format(
xsll, ys, nx, ny
)
ret += '{:d} {:d} {:d} {:d}\r\n'.format(
xsul, 1025 - ys - ny, nx, ny
)
ret += '{:d} {:d} {:d} {:d}\r\n'.format(
xslr, ys, nx, ny
)
ret += '{:d} {:d} {:d} {:d}\r\n'.format(
xsur, 1025 - ys - ny, nx, ny
)
return ret
except:
return '' | [
"def",
"getRtplotWins",
"(",
"self",
")",
":",
"try",
":",
"if",
"self",
".",
"isFF",
"(",
")",
":",
"return",
"'fullframe\\r\\n'",
"elif",
"self",
".",
"isDrift",
"(",
")",
":",
"xbin",
"=",
"self",
".",
"wframe",
".",
"xbin",
".",
"value",
"(",
")",
"ybin",
"=",
"self",
".",
"wframe",
".",
"ybin",
".",
"value",
"(",
")",
"nwin",
"=",
"2",
"*",
"self",
".",
"wframe",
".",
"npair",
".",
"value",
"(",
")",
"ret",
"=",
"str",
"(",
"xbin",
")",
"+",
"' '",
"+",
"str",
"(",
"ybin",
")",
"+",
"' '",
"+",
"str",
"(",
"nwin",
")",
"+",
"'\\r\\n'",
"for",
"xsl",
",",
"xsr",
",",
"ys",
",",
"nx",
",",
"ny",
"in",
"self",
".",
"wframe",
":",
"ret",
"+=",
"'{:d} {:d} {:d} {:d}\\r\\n'",
".",
"format",
"(",
"xsl",
",",
"ys",
",",
"nx",
",",
"ny",
")",
"ret",
"+=",
"'{:d} {:d} {:d} {:d}'",
".",
"format",
"(",
"xsr",
",",
"ys",
",",
"nx",
",",
"ny",
")",
"return",
"ret",
"else",
":",
"xbin",
"=",
"self",
".",
"wframe",
".",
"xbin",
".",
"value",
"(",
")",
"ybin",
"=",
"self",
".",
"wframe",
".",
"ybin",
".",
"value",
"(",
")",
"nwin",
"=",
"4",
"*",
"self",
".",
"wframe",
".",
"nquad",
".",
"value",
"(",
")",
"ret",
"=",
"str",
"(",
"xbin",
")",
"+",
"' '",
"+",
"str",
"(",
"ybin",
")",
"+",
"' '",
"+",
"str",
"(",
"nwin",
")",
"+",
"'\\r\\n'",
"for",
"xsll",
",",
"xsul",
",",
"xslr",
",",
"xsur",
",",
"ys",
",",
"nx",
",",
"ny",
"in",
"self",
".",
"wframe",
":",
"ret",
"+=",
"'{:d} {:d} {:d} {:d}\\r\\n'",
".",
"format",
"(",
"xsll",
",",
"ys",
",",
"nx",
",",
"ny",
")",
"ret",
"+=",
"'{:d} {:d} {:d} {:d}\\r\\n'",
".",
"format",
"(",
"xsul",
",",
"1025",
"-",
"ys",
"-",
"ny",
",",
"nx",
",",
"ny",
")",
"ret",
"+=",
"'{:d} {:d} {:d} {:d}\\r\\n'",
".",
"format",
"(",
"xslr",
",",
"ys",
",",
"nx",
",",
"ny",
")",
"ret",
"+=",
"'{:d} {:d} {:d} {:d}\\r\\n'",
".",
"format",
"(",
"xsur",
",",
"1025",
"-",
"ys",
"-",
"ny",
",",
"nx",
",",
"ny",
")",
"return",
"ret",
"except",
":",
"return",
"''"
] | Returns a string suitable to sending off to rtplot when
it asks for window parameters. Returns null string '' if
the windows are not OK. This operates on the basis of
trying to send something back, even if it might not be
OK as a window setup. Note that we have to take care
here not to update any GUI components because this is
called outside of the main thread. | [
"Returns",
"a",
"string",
"suitable",
"to",
"sending",
"off",
"to",
"rtplot",
"when",
"it",
"asks",
"for",
"window",
"parameters",
".",
"Returns",
"null",
"string",
"if",
"the",
"windows",
"are",
"not",
"OK",
".",
"This",
"operates",
"on",
"the",
"basis",
"of",
"trying",
"to",
"send",
"something",
"back",
"even",
"if",
"it",
"might",
"not",
"be",
"OK",
"as",
"a",
"window",
"setup",
".",
"Note",
"that",
"we",
"have",
"to",
"take",
"care",
"here",
"not",
"to",
"update",
"any",
"GUI",
"components",
"because",
"this",
"is",
"called",
"outside",
"of",
"the",
"main",
"thread",
"."
] | python | train |
nccgroup/Scout2 | AWSScout2/rules/preprocessing.py | https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/rules/preprocessing.py#L13-L35 | def preprocessing(aws_config, ip_ranges = [], ip_ranges_name_key = None):
"""
Tweak the AWS config to match cross-service resources and clean any fetching artifacts
:param aws_config:
:return:
"""
map_all_sgs(aws_config)
map_all_subnets(aws_config)
set_emr_vpc_ids(aws_config)
#parse_elb_policies(aws_config)
# Various data processing calls
add_security_group_name_to_ec2_grants(aws_config['services']['ec2'], aws_config['aws_account_id'])
process_cloudtrail_trails(aws_config['services']['cloudtrail'])
add_cidr_display_name(aws_config, ip_ranges, ip_ranges_name_key)
merge_route53_and_route53domains(aws_config)
match_instances_and_roles(aws_config)
match_iam_policies_and_buckets(aws_config)
# Preprocessing dictated by metadata
process_metadata_callbacks(aws_config) | [
"def",
"preprocessing",
"(",
"aws_config",
",",
"ip_ranges",
"=",
"[",
"]",
",",
"ip_ranges_name_key",
"=",
"None",
")",
":",
"map_all_sgs",
"(",
"aws_config",
")",
"map_all_subnets",
"(",
"aws_config",
")",
"set_emr_vpc_ids",
"(",
"aws_config",
")",
"#parse_elb_policies(aws_config)",
"# Various data processing calls",
"add_security_group_name_to_ec2_grants",
"(",
"aws_config",
"[",
"'services'",
"]",
"[",
"'ec2'",
"]",
",",
"aws_config",
"[",
"'aws_account_id'",
"]",
")",
"process_cloudtrail_trails",
"(",
"aws_config",
"[",
"'services'",
"]",
"[",
"'cloudtrail'",
"]",
")",
"add_cidr_display_name",
"(",
"aws_config",
",",
"ip_ranges",
",",
"ip_ranges_name_key",
")",
"merge_route53_and_route53domains",
"(",
"aws_config",
")",
"match_instances_and_roles",
"(",
"aws_config",
")",
"match_iam_policies_and_buckets",
"(",
"aws_config",
")",
"# Preprocessing dictated by metadata",
"process_metadata_callbacks",
"(",
"aws_config",
")"
] | Tweak the AWS config to match cross-service resources and clean any fetching artifacts
:param aws_config:
:return: | [
"Tweak",
"the",
"AWS",
"config",
"to",
"match",
"cross",
"-",
"service",
"resources",
"and",
"clean",
"any",
"fetching",
"artifacts"
] | python | train |
gbiggs/rtctree | rtctree/component.py | https://github.com/gbiggs/rtctree/blob/bd725a47ac87c259c8bce06156ccc9ab71111c26/rtctree/component.py#L817-L851 | def state(self):
'''The merged state of all the execution context states, which can be
used as the overall state of this component.
The order of precedence is:
Error > Active > Inactive > Created > Unknown
'''
def merge_state(current, new):
if new == self.ERROR:
return self.ERROR
elif new == self.ACTIVE and current != self.ERROR:
return self.ACTIVE
elif new == self.INACTIVE and \
current not in [self.ACTIVE, self.ERROR]:
return self.INACTIVE
elif new == self.CREATED and \
current not in [self.ACTIVE, self.ERROR, self.INACTIVE]:
return self.CREATED
elif current not in [self.ACTIVE, self.ERROR, self.INACTIVE,
self.CREATED]:
return self.UNKNOWN
return current
with self._mutex:
if not self.owned_ec_states and not self.participating_ec_states:
return self.UNKNOWN
merged_state = self.CREATED
if self.owned_ec_states:
for ec_state in self.owned_ec_states:
merged_state = merge_state(merged_state, ec_state)
if self.participating_ec_states:
for ec_state in self.participating_ec_states:
merged_state = merge_state(merged_state, ec_state)
return merged_state | [
"def",
"state",
"(",
"self",
")",
":",
"def",
"merge_state",
"(",
"current",
",",
"new",
")",
":",
"if",
"new",
"==",
"self",
".",
"ERROR",
":",
"return",
"self",
".",
"ERROR",
"elif",
"new",
"==",
"self",
".",
"ACTIVE",
"and",
"current",
"!=",
"self",
".",
"ERROR",
":",
"return",
"self",
".",
"ACTIVE",
"elif",
"new",
"==",
"self",
".",
"INACTIVE",
"and",
"current",
"not",
"in",
"[",
"self",
".",
"ACTIVE",
",",
"self",
".",
"ERROR",
"]",
":",
"return",
"self",
".",
"INACTIVE",
"elif",
"new",
"==",
"self",
".",
"CREATED",
"and",
"current",
"not",
"in",
"[",
"self",
".",
"ACTIVE",
",",
"self",
".",
"ERROR",
",",
"self",
".",
"INACTIVE",
"]",
":",
"return",
"self",
".",
"CREATED",
"elif",
"current",
"not",
"in",
"[",
"self",
".",
"ACTIVE",
",",
"self",
".",
"ERROR",
",",
"self",
".",
"INACTIVE",
",",
"self",
".",
"CREATED",
"]",
":",
"return",
"self",
".",
"UNKNOWN",
"return",
"current",
"with",
"self",
".",
"_mutex",
":",
"if",
"not",
"self",
".",
"owned_ec_states",
"and",
"not",
"self",
".",
"participating_ec_states",
":",
"return",
"self",
".",
"UNKNOWN",
"merged_state",
"=",
"self",
".",
"CREATED",
"if",
"self",
".",
"owned_ec_states",
":",
"for",
"ec_state",
"in",
"self",
".",
"owned_ec_states",
":",
"merged_state",
"=",
"merge_state",
"(",
"merged_state",
",",
"ec_state",
")",
"if",
"self",
".",
"participating_ec_states",
":",
"for",
"ec_state",
"in",
"self",
".",
"participating_ec_states",
":",
"merged_state",
"=",
"merge_state",
"(",
"merged_state",
",",
"ec_state",
")",
"return",
"merged_state"
] | The merged state of all the execution context states, which can be
used as the overall state of this component.
The order of precedence is:
Error > Active > Inactive > Created > Unknown | [
"The",
"merged",
"state",
"of",
"all",
"the",
"execution",
"context",
"states",
"which",
"can",
"be",
"used",
"as",
"the",
"overall",
"state",
"of",
"this",
"component",
"."
] | python | train |
wakatime/wakatime | wakatime/packages/pytz/tzinfo.py | https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pytz/tzinfo.py#L16-L23 | def memorized_timedelta(seconds):
'''Create only one instance of each distinct timedelta'''
try:
return _timedelta_cache[seconds]
except KeyError:
delta = timedelta(seconds=seconds)
_timedelta_cache[seconds] = delta
return delta | [
"def",
"memorized_timedelta",
"(",
"seconds",
")",
":",
"try",
":",
"return",
"_timedelta_cache",
"[",
"seconds",
"]",
"except",
"KeyError",
":",
"delta",
"=",
"timedelta",
"(",
"seconds",
"=",
"seconds",
")",
"_timedelta_cache",
"[",
"seconds",
"]",
"=",
"delta",
"return",
"delta"
] | Create only one instance of each distinct timedelta | [
"Create",
"only",
"one",
"instance",
"of",
"each",
"distinct",
"timedelta"
] | python | train |
bwohlberg/sporco | sporco/fista/fista.py | https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/fista/fista.py#L860-L873 | def combination_step(self):
"""Update auxiliary state by a smart combination of previous
updates in the frequency domain (standard FISTA
:cite:`beck-2009-fast`).
"""
# Update t step
tprv = self.t
self.t = 0.5 * float(1. + np.sqrt(1. + 4. * tprv**2))
# Update Y
if not self.opt['FastSolve']:
self.Yfprv = self.Yf.copy()
self.Yf = self.Xf + ((tprv - 1.) / self.t) * (self.Xf - self.Xfprv) | [
"def",
"combination_step",
"(",
"self",
")",
":",
"# Update t step",
"tprv",
"=",
"self",
".",
"t",
"self",
".",
"t",
"=",
"0.5",
"*",
"float",
"(",
"1.",
"+",
"np",
".",
"sqrt",
"(",
"1.",
"+",
"4.",
"*",
"tprv",
"**",
"2",
")",
")",
"# Update Y",
"if",
"not",
"self",
".",
"opt",
"[",
"'FastSolve'",
"]",
":",
"self",
".",
"Yfprv",
"=",
"self",
".",
"Yf",
".",
"copy",
"(",
")",
"self",
".",
"Yf",
"=",
"self",
".",
"Xf",
"+",
"(",
"(",
"tprv",
"-",
"1.",
")",
"/",
"self",
".",
"t",
")",
"*",
"(",
"self",
".",
"Xf",
"-",
"self",
".",
"Xfprv",
")"
] | Update auxiliary state by a smart combination of previous
updates in the frequency domain (standard FISTA
:cite:`beck-2009-fast`). | [
"Update",
"auxiliary",
"state",
"by",
"a",
"smart",
"combination",
"of",
"previous",
"updates",
"in",
"the",
"frequency",
"domain",
"(",
"standard",
"FISTA",
":",
"cite",
":",
"beck",
"-",
"2009",
"-",
"fast",
")",
"."
] | python | train |
SmokinCaterpillar/pypet | pypet/trajectory.py | https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/pypet/trajectory.py#L1429-L1459 | def _add_run_info(self, idx, name='', timestamp=42.0, finish_timestamp=1.337,
runtime='forever and ever', time='>>Maybe time`s gone on strike',
completed=0, parameter_summary='Not yet my friend!',
short_environment_hexsha='N/A'):
"""Adds a new run to the `_run_information` dict."""
if idx in self._single_run_ids:
# Delete old entries, they might be replaced by a new name
old_name = self._single_run_ids[idx]
del self._single_run_ids[old_name]
del self._single_run_ids[idx]
del self._run_information[old_name]
if name == '':
name = self.f_wildcard('$', idx)
# The `_single_run_ids` dict is bidirectional and maps indices to run names and vice versa
self._single_run_ids[name] = idx
self._single_run_ids[idx] = name
info_dict = {'idx': idx,
'timestamp': timestamp,
'finish_timestamp': finish_timestamp,
'runtime': runtime,
'time': time,
'completed': completed,
'name': name,
'parameter_summary': parameter_summary,
'short_environment_hexsha': short_environment_hexsha}
self._run_information[name] = info_dict
self._length = len(self._run_information) | [
"def",
"_add_run_info",
"(",
"self",
",",
"idx",
",",
"name",
"=",
"''",
",",
"timestamp",
"=",
"42.0",
",",
"finish_timestamp",
"=",
"1.337",
",",
"runtime",
"=",
"'forever and ever'",
",",
"time",
"=",
"'>>Maybe time`s gone on strike'",
",",
"completed",
"=",
"0",
",",
"parameter_summary",
"=",
"'Not yet my friend!'",
",",
"short_environment_hexsha",
"=",
"'N/A'",
")",
":",
"if",
"idx",
"in",
"self",
".",
"_single_run_ids",
":",
"# Delete old entries, they might be replaced by a new name",
"old_name",
"=",
"self",
".",
"_single_run_ids",
"[",
"idx",
"]",
"del",
"self",
".",
"_single_run_ids",
"[",
"old_name",
"]",
"del",
"self",
".",
"_single_run_ids",
"[",
"idx",
"]",
"del",
"self",
".",
"_run_information",
"[",
"old_name",
"]",
"if",
"name",
"==",
"''",
":",
"name",
"=",
"self",
".",
"f_wildcard",
"(",
"'$'",
",",
"idx",
")",
"# The `_single_run_ids` dict is bidirectional and maps indices to run names and vice versa",
"self",
".",
"_single_run_ids",
"[",
"name",
"]",
"=",
"idx",
"self",
".",
"_single_run_ids",
"[",
"idx",
"]",
"=",
"name",
"info_dict",
"=",
"{",
"'idx'",
":",
"idx",
",",
"'timestamp'",
":",
"timestamp",
",",
"'finish_timestamp'",
":",
"finish_timestamp",
",",
"'runtime'",
":",
"runtime",
",",
"'time'",
":",
"time",
",",
"'completed'",
":",
"completed",
",",
"'name'",
":",
"name",
",",
"'parameter_summary'",
":",
"parameter_summary",
",",
"'short_environment_hexsha'",
":",
"short_environment_hexsha",
"}",
"self",
".",
"_run_information",
"[",
"name",
"]",
"=",
"info_dict",
"self",
".",
"_length",
"=",
"len",
"(",
"self",
".",
"_run_information",
")"
] | Adds a new run to the `_run_information` dict. | [
"Adds",
"a",
"new",
"run",
"to",
"the",
"_run_information",
"dict",
"."
] | python | test |
csparpa/pyowm | pyowm/weatherapi25/cityidregistry.py | https://github.com/csparpa/pyowm/blob/cdd59eb72f32f7238624ceef9b2e2329a5ebd472/pyowm/weatherapi25/cityidregistry.py#L232-L244 | def _match_line(self, city_name, lines):
"""
The lookup is case insensitive and returns the first matching line,
stripped.
:param city_name: str
:param lines: list of str
:return: str
"""
for line in lines:
toponym = line.split(',')[0]
if toponym.lower() == city_name.lower():
return line.strip()
return None | [
"def",
"_match_line",
"(",
"self",
",",
"city_name",
",",
"lines",
")",
":",
"for",
"line",
"in",
"lines",
":",
"toponym",
"=",
"line",
".",
"split",
"(",
"','",
")",
"[",
"0",
"]",
"if",
"toponym",
".",
"lower",
"(",
")",
"==",
"city_name",
".",
"lower",
"(",
")",
":",
"return",
"line",
".",
"strip",
"(",
")",
"return",
"None"
] | The lookup is case insensitive and returns the first matching line,
stripped.
:param city_name: str
:param lines: list of str
:return: str | [
"The",
"lookup",
"is",
"case",
"insensitive",
"and",
"returns",
"the",
"first",
"matching",
"line",
"stripped",
".",
":",
"param",
"city_name",
":",
"str",
":",
"param",
"lines",
":",
"list",
"of",
"str",
":",
"return",
":",
"str"
] | python | train |
jobovy/galpy | galpy/potential/DiskSCFPotential.py | https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/potential/DiskSCFPotential.py#L409-L433 | def _Rzderiv(self,R,z,phi=0.,t=0.): #pragma: no cover
"""
NAME:
_Rzderiv
PURPOSE:
evaluate the mixed R,z derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
d2phi/dR/dz
HISTORY:
2016-12-26 - Written - Bovy (UofT/CCA)
"""
raise AttributeError
# Implementation above does not work bc SCF.Rzderiv is not implemented
r= numpy.sqrt(R**2.+z**2.)
out= self._scf.Rzderiv(R,z,phi=phi,use_physical=False)
for a,ds,d2s,H,dH in zip(self._Sigma_amp,self._dsigmadR,
self._d2SigmadR2,self._Hz,self._dHzdz):
out+= 4.*numpy.pi*a*(H(z)*R*z/r**2.*(d2s(r)-ds(r)/r)
+ds(r)*dH(z)*R/r)
return out | [
"def",
"_Rzderiv",
"(",
"self",
",",
"R",
",",
"z",
",",
"phi",
"=",
"0.",
",",
"t",
"=",
"0.",
")",
":",
"#pragma: no cover",
"raise",
"AttributeError",
"# Implementation above does not work bc SCF.Rzderiv is not implemented",
"r",
"=",
"numpy",
".",
"sqrt",
"(",
"R",
"**",
"2.",
"+",
"z",
"**",
"2.",
")",
"out",
"=",
"self",
".",
"_scf",
".",
"Rzderiv",
"(",
"R",
",",
"z",
",",
"phi",
"=",
"phi",
",",
"use_physical",
"=",
"False",
")",
"for",
"a",
",",
"ds",
",",
"d2s",
",",
"H",
",",
"dH",
"in",
"zip",
"(",
"self",
".",
"_Sigma_amp",
",",
"self",
".",
"_dsigmadR",
",",
"self",
".",
"_d2SigmadR2",
",",
"self",
".",
"_Hz",
",",
"self",
".",
"_dHzdz",
")",
":",
"out",
"+=",
"4.",
"*",
"numpy",
".",
"pi",
"*",
"a",
"*",
"(",
"H",
"(",
"z",
")",
"*",
"R",
"*",
"z",
"/",
"r",
"**",
"2.",
"*",
"(",
"d2s",
"(",
"r",
")",
"-",
"ds",
"(",
"r",
")",
"/",
"r",
")",
"+",
"ds",
"(",
"r",
")",
"*",
"dH",
"(",
"z",
")",
"*",
"R",
"/",
"r",
")",
"return",
"out"
] | NAME:
_Rzderiv
PURPOSE:
evaluate the mixed R,z derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
d2phi/dR/dz
HISTORY:
2016-12-26 - Written - Bovy (UofT/CCA) | [
"NAME",
":",
"_Rzderiv",
"PURPOSE",
":",
"evaluate",
"the",
"mixed",
"R",
"z",
"derivative",
"for",
"this",
"potential",
"INPUT",
":",
"R",
"-",
"Galactocentric",
"cylindrical",
"radius",
"z",
"-",
"vertical",
"height",
"phi",
"-",
"azimuth",
"t",
"-",
"time",
"OUTPUT",
":",
"d2phi",
"/",
"dR",
"/",
"dz",
"HISTORY",
":",
"2016",
"-",
"12",
"-",
"26",
"-",
"Written",
"-",
"Bovy",
"(",
"UofT",
"/",
"CCA",
")"
] | python | train |
FNNDSC/pfmisc | pfmisc/other.py | https://github.com/FNNDSC/pfmisc/blob/960b4d6135fcc50bed0a8e55db2ab1ddad9b99d8/pfmisc/other.py#L1050-L1055 | def touch(fname, times=None):
'''
Emulates the UNIX touch command.
'''
with io.open(fname, 'a'):
os.utime(fname, times) | [
"def",
"touch",
"(",
"fname",
",",
"times",
"=",
"None",
")",
":",
"with",
"io",
".",
"open",
"(",
"fname",
",",
"'a'",
")",
":",
"os",
".",
"utime",
"(",
"fname",
",",
"times",
")"
] | Emulates the UNIX touch command. | [
"Emulates",
"the",
"UNIX",
"touch",
"command",
"."
] | python | train |
theislab/scanpy | scanpy/plotting/_preprocessing.py | https://github.com/theislab/scanpy/blob/9e4e5ee02e04cf618872d9b098e24f0542e8b227/scanpy/plotting/_preprocessing.py#L66-L83 | def filter_genes_dispersion(result, log=False, show=None, save=None):
"""Plot dispersions versus means for genes.
Produces Supp. Fig. 5c of Zheng et al. (2017) and MeanVarPlot() of Seurat.
Parameters
----------
result : `np.recarray`
Result of :func:`~scanpy.api.pp.filter_genes_dispersion`.
log : `bool`
Plot on logarithmic axes.
show : bool, optional (default: `None`)
Show the plot, do not return axis.
save : `bool` or `str`, optional (default: `None`)
If `True` or a `str`, save the figure. A string is appended to the
default filename. Infer the filetype if ending on {{'.pdf', '.png', '.svg'}}.
"""
highly_variable_genes(result, log=False, show=None, save=None, highly_variable_genes=False) | [
"def",
"filter_genes_dispersion",
"(",
"result",
",",
"log",
"=",
"False",
",",
"show",
"=",
"None",
",",
"save",
"=",
"None",
")",
":",
"highly_variable_genes",
"(",
"result",
",",
"log",
"=",
"False",
",",
"show",
"=",
"None",
",",
"save",
"=",
"None",
",",
"highly_variable_genes",
"=",
"False",
")"
] | Plot dispersions versus means for genes.
Produces Supp. Fig. 5c of Zheng et al. (2017) and MeanVarPlot() of Seurat.
Parameters
----------
result : `np.recarray`
Result of :func:`~scanpy.api.pp.filter_genes_dispersion`.
log : `bool`
Plot on logarithmic axes.
show : bool, optional (default: `None`)
Show the plot, do not return axis.
save : `bool` or `str`, optional (default: `None`)
If `True` or a `str`, save the figure. A string is appended to the
default filename. Infer the filetype if ending on {{'.pdf', '.png', '.svg'}}. | [
"Plot",
"dispersions",
"versus",
"means",
"for",
"genes",
"."
] | python | train |
wummel/linkchecker | linkcheck/director/logger.py | https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/director/logger.py#L57-L64 | def log_url (self, url_data):
"""Send new url to all configured loggers."""
self.check_active_loggers()
do_print = self.do_print(url_data)
# Only send a transport object to the loggers, not the complete
# object instance.
for log in self.loggers:
log.log_filter_url(url_data, do_print) | [
"def",
"log_url",
"(",
"self",
",",
"url_data",
")",
":",
"self",
".",
"check_active_loggers",
"(",
")",
"do_print",
"=",
"self",
".",
"do_print",
"(",
"url_data",
")",
"# Only send a transport object to the loggers, not the complete",
"# object instance.",
"for",
"log",
"in",
"self",
".",
"loggers",
":",
"log",
".",
"log_filter_url",
"(",
"url_data",
",",
"do_print",
")"
] | Send new url to all configured loggers. | [
"Send",
"new",
"url",
"to",
"all",
"configured",
"loggers",
"."
] | python | train |
gesellkammer/sndfileio | sndfileio/dsp.py | https://github.com/gesellkammer/sndfileio/blob/8e2b264cadb652f09d2e775f54090c0a3cb2ced2/sndfileio/dsp.py#L63-L85 | def filter_butter_coeffs(filtertype, freq, samplerate, order=5):
# type: (str, Union[float, Tuple[float, float]], int, int) -> Tuple[np.ndarray, np.ndarray]
"""
calculates the coefficients for a digital butterworth filter
filtertype: 'low', 'high', 'band'
freq : cutoff freq.
in the case of 'band': (low, high)
Returns --> (b, a)
"""
assert filtertype in ('low', 'high', 'band')
nyq = 0.5 * samplerate
if isinstance(freq, tuple):
assert filtertype == 'band'
low, high = freq
low /= nyq
high /= nyq
b, a = signal.butter(order, [low, high], btype='band')
else:
freq = freq / nyq
b, a = signal.butter(order, freq, btype=filtertype)
return b, a | [
"def",
"filter_butter_coeffs",
"(",
"filtertype",
",",
"freq",
",",
"samplerate",
",",
"order",
"=",
"5",
")",
":",
"# type: (str, Union[float, Tuple[float, float]], int, int) -> Tuple[np.ndarray, np.ndarray]",
"assert",
"filtertype",
"in",
"(",
"'low'",
",",
"'high'",
",",
"'band'",
")",
"nyq",
"=",
"0.5",
"*",
"samplerate",
"if",
"isinstance",
"(",
"freq",
",",
"tuple",
")",
":",
"assert",
"filtertype",
"==",
"'band'",
"low",
",",
"high",
"=",
"freq",
"low",
"/=",
"nyq",
"high",
"/=",
"nyq",
"b",
",",
"a",
"=",
"signal",
".",
"butter",
"(",
"order",
",",
"[",
"low",
",",
"high",
"]",
",",
"btype",
"=",
"'band'",
")",
"else",
":",
"freq",
"=",
"freq",
"/",
"nyq",
"b",
",",
"a",
"=",
"signal",
".",
"butter",
"(",
"order",
",",
"freq",
",",
"btype",
"=",
"filtertype",
")",
"return",
"b",
",",
"a"
] | calculates the coefficients for a digital butterworth filter
filtertype: 'low', 'high', 'band'
freq : cutoff freq.
in the case of 'band': (low, high)
Returns --> (b, a) | [
"calculates",
"the",
"coefficients",
"for",
"a",
"digital",
"butterworth",
"filter"
] | python | train |
tornadoweb/tornado | tornado/wsgi.py | https://github.com/tornadoweb/tornado/blob/b8b481770bcdb333a69afde5cce7eaa449128326/tornado/wsgi.py#L148-L183 | def environ(request: httputil.HTTPServerRequest) -> Dict[Text, Any]:
"""Converts a `tornado.httputil.HTTPServerRequest` to a WSGI environment.
"""
hostport = request.host.split(":")
if len(hostport) == 2:
host = hostport[0]
port = int(hostport[1])
else:
host = request.host
port = 443 if request.protocol == "https" else 80
environ = {
"REQUEST_METHOD": request.method,
"SCRIPT_NAME": "",
"PATH_INFO": to_wsgi_str(
escape.url_unescape(request.path, encoding=None, plus=False)
),
"QUERY_STRING": request.query,
"REMOTE_ADDR": request.remote_ip,
"SERVER_NAME": host,
"SERVER_PORT": str(port),
"SERVER_PROTOCOL": request.version,
"wsgi.version": (1, 0),
"wsgi.url_scheme": request.protocol,
"wsgi.input": BytesIO(escape.utf8(request.body)),
"wsgi.errors": sys.stderr,
"wsgi.multithread": False,
"wsgi.multiprocess": True,
"wsgi.run_once": False,
}
if "Content-Type" in request.headers:
environ["CONTENT_TYPE"] = request.headers.pop("Content-Type")
if "Content-Length" in request.headers:
environ["CONTENT_LENGTH"] = request.headers.pop("Content-Length")
for key, value in request.headers.items():
environ["HTTP_" + key.replace("-", "_").upper()] = value
return environ | [
"def",
"environ",
"(",
"request",
":",
"httputil",
".",
"HTTPServerRequest",
")",
"->",
"Dict",
"[",
"Text",
",",
"Any",
"]",
":",
"hostport",
"=",
"request",
".",
"host",
".",
"split",
"(",
"\":\"",
")",
"if",
"len",
"(",
"hostport",
")",
"==",
"2",
":",
"host",
"=",
"hostport",
"[",
"0",
"]",
"port",
"=",
"int",
"(",
"hostport",
"[",
"1",
"]",
")",
"else",
":",
"host",
"=",
"request",
".",
"host",
"port",
"=",
"443",
"if",
"request",
".",
"protocol",
"==",
"\"https\"",
"else",
"80",
"environ",
"=",
"{",
"\"REQUEST_METHOD\"",
":",
"request",
".",
"method",
",",
"\"SCRIPT_NAME\"",
":",
"\"\"",
",",
"\"PATH_INFO\"",
":",
"to_wsgi_str",
"(",
"escape",
".",
"url_unescape",
"(",
"request",
".",
"path",
",",
"encoding",
"=",
"None",
",",
"plus",
"=",
"False",
")",
")",
",",
"\"QUERY_STRING\"",
":",
"request",
".",
"query",
",",
"\"REMOTE_ADDR\"",
":",
"request",
".",
"remote_ip",
",",
"\"SERVER_NAME\"",
":",
"host",
",",
"\"SERVER_PORT\"",
":",
"str",
"(",
"port",
")",
",",
"\"SERVER_PROTOCOL\"",
":",
"request",
".",
"version",
",",
"\"wsgi.version\"",
":",
"(",
"1",
",",
"0",
")",
",",
"\"wsgi.url_scheme\"",
":",
"request",
".",
"protocol",
",",
"\"wsgi.input\"",
":",
"BytesIO",
"(",
"escape",
".",
"utf8",
"(",
"request",
".",
"body",
")",
")",
",",
"\"wsgi.errors\"",
":",
"sys",
".",
"stderr",
",",
"\"wsgi.multithread\"",
":",
"False",
",",
"\"wsgi.multiprocess\"",
":",
"True",
",",
"\"wsgi.run_once\"",
":",
"False",
",",
"}",
"if",
"\"Content-Type\"",
"in",
"request",
".",
"headers",
":",
"environ",
"[",
"\"CONTENT_TYPE\"",
"]",
"=",
"request",
".",
"headers",
".",
"pop",
"(",
"\"Content-Type\"",
")",
"if",
"\"Content-Length\"",
"in",
"request",
".",
"headers",
":",
"environ",
"[",
"\"CONTENT_LENGTH\"",
"]",
"=",
"request",
".",
"headers",
".",
"pop",
"(",
"\"Content-Length\"",
")",
"for",
"key",
",",
"value",
"in",
"request",
".",
"headers",
".",
"items",
"(",
")",
":",
"environ",
"[",
"\"HTTP_\"",
"+",
"key",
".",
"replace",
"(",
"\"-\"",
",",
"\"_\"",
")",
".",
"upper",
"(",
")",
"]",
"=",
"value",
"return",
"environ"
] | Converts a `tornado.httputil.HTTPServerRequest` to a WSGI environment. | [
"Converts",
"a",
"tornado",
".",
"httputil",
".",
"HTTPServerRequest",
"to",
"a",
"WSGI",
"environment",
"."
] | python | train |
pydata/xarray | xarray/core/variable.py | https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/variable.py#L846-L898 | def chunk(self, chunks=None, name=None, lock=False):
"""Coerce this array's data into a dask arrays with the given chunks.
If this variable is a non-dask array, it will be converted to dask
array. If it's a dask array, it will be rechunked to the given chunk
sizes.
If neither chunks is not provided for one or more dimensions, chunk
sizes along that dimension will not be updated; non-dask arrays will be
converted into dask arrays with a single block.
Parameters
----------
chunks : int, tuple or dict, optional
Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or
``{'x': 5, 'y': 5}``.
name : str, optional
Used to generate the name for this array in the internal dask
graph. Does not need not be unique.
lock : optional
Passed on to :py:func:`dask.array.from_array`, if the array is not
already as dask array.
Returns
-------
chunked : xarray.Variable
"""
import dask.array as da
if utils.is_dict_like(chunks):
chunks = dict((self.get_axis_num(dim), chunk)
for dim, chunk in chunks.items())
if chunks is None:
chunks = self.chunks or self.shape
data = self._data
if isinstance(data, da.Array):
data = data.rechunk(chunks)
else:
if utils.is_dict_like(chunks):
chunks = tuple(chunks.get(n, s)
for n, s in enumerate(self.shape))
# da.from_array works by using lazily indexing with a tuple of
# slices. Using OuterIndexer is a pragmatic choice: dask does not
# yet handle different indexing types in an explicit way:
# https://github.com/dask/dask/issues/2883
data = indexing.ImplicitToExplicitIndexingAdapter(
data, indexing.OuterIndexer)
data = da.from_array(data, chunks, name=name, lock=lock)
return type(self)(self.dims, data, self._attrs, self._encoding,
fastpath=True) | [
"def",
"chunk",
"(",
"self",
",",
"chunks",
"=",
"None",
",",
"name",
"=",
"None",
",",
"lock",
"=",
"False",
")",
":",
"import",
"dask",
".",
"array",
"as",
"da",
"if",
"utils",
".",
"is_dict_like",
"(",
"chunks",
")",
":",
"chunks",
"=",
"dict",
"(",
"(",
"self",
".",
"get_axis_num",
"(",
"dim",
")",
",",
"chunk",
")",
"for",
"dim",
",",
"chunk",
"in",
"chunks",
".",
"items",
"(",
")",
")",
"if",
"chunks",
"is",
"None",
":",
"chunks",
"=",
"self",
".",
"chunks",
"or",
"self",
".",
"shape",
"data",
"=",
"self",
".",
"_data",
"if",
"isinstance",
"(",
"data",
",",
"da",
".",
"Array",
")",
":",
"data",
"=",
"data",
".",
"rechunk",
"(",
"chunks",
")",
"else",
":",
"if",
"utils",
".",
"is_dict_like",
"(",
"chunks",
")",
":",
"chunks",
"=",
"tuple",
"(",
"chunks",
".",
"get",
"(",
"n",
",",
"s",
")",
"for",
"n",
",",
"s",
"in",
"enumerate",
"(",
"self",
".",
"shape",
")",
")",
"# da.from_array works by using lazily indexing with a tuple of",
"# slices. Using OuterIndexer is a pragmatic choice: dask does not",
"# yet handle different indexing types in an explicit way:",
"# https://github.com/dask/dask/issues/2883",
"data",
"=",
"indexing",
".",
"ImplicitToExplicitIndexingAdapter",
"(",
"data",
",",
"indexing",
".",
"OuterIndexer",
")",
"data",
"=",
"da",
".",
"from_array",
"(",
"data",
",",
"chunks",
",",
"name",
"=",
"name",
",",
"lock",
"=",
"lock",
")",
"return",
"type",
"(",
"self",
")",
"(",
"self",
".",
"dims",
",",
"data",
",",
"self",
".",
"_attrs",
",",
"self",
".",
"_encoding",
",",
"fastpath",
"=",
"True",
")"
] | Coerce this array's data into a dask arrays with the given chunks.
If this variable is a non-dask array, it will be converted to dask
array. If it's a dask array, it will be rechunked to the given chunk
sizes.
If neither chunks is not provided for one or more dimensions, chunk
sizes along that dimension will not be updated; non-dask arrays will be
converted into dask arrays with a single block.
Parameters
----------
chunks : int, tuple or dict, optional
Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or
``{'x': 5, 'y': 5}``.
name : str, optional
Used to generate the name for this array in the internal dask
graph. Does not need not be unique.
lock : optional
Passed on to :py:func:`dask.array.from_array`, if the array is not
already as dask array.
Returns
-------
chunked : xarray.Variable | [
"Coerce",
"this",
"array",
"s",
"data",
"into",
"a",
"dask",
"arrays",
"with",
"the",
"given",
"chunks",
"."
] | python | train |
PaulHancock/Aegean | AegeanTools/catalogs.py | https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/catalogs.py#L121-L142 | def update_meta_data(meta=None):
"""
Modify the metadata dictionary.
DATE, PROGRAM, and PROGVER are added/modified.
Parameters
----------
meta : dict
The dictionary to be modified, default = None (empty)
Returns
-------
An updated dictionary.
"""
if meta is None:
meta = {}
if 'DATE' not in meta:
meta['DATE'] = strftime("%Y-%m-%d %H:%M:%S", gmtime())
if 'PROGRAM' not in meta:
meta['PROGRAM'] = "AegeanTools.catalogs"
meta['PROGVER'] = "{0}-({1})".format(__version__, __date__)
return meta | [
"def",
"update_meta_data",
"(",
"meta",
"=",
"None",
")",
":",
"if",
"meta",
"is",
"None",
":",
"meta",
"=",
"{",
"}",
"if",
"'DATE'",
"not",
"in",
"meta",
":",
"meta",
"[",
"'DATE'",
"]",
"=",
"strftime",
"(",
"\"%Y-%m-%d %H:%M:%S\"",
",",
"gmtime",
"(",
")",
")",
"if",
"'PROGRAM'",
"not",
"in",
"meta",
":",
"meta",
"[",
"'PROGRAM'",
"]",
"=",
"\"AegeanTools.catalogs\"",
"meta",
"[",
"'PROGVER'",
"]",
"=",
"\"{0}-({1})\"",
".",
"format",
"(",
"__version__",
",",
"__date__",
")",
"return",
"meta"
] | Modify the metadata dictionary.
DATE, PROGRAM, and PROGVER are added/modified.
Parameters
----------
meta : dict
The dictionary to be modified, default = None (empty)
Returns
-------
An updated dictionary. | [
"Modify",
"the",
"metadata",
"dictionary",
".",
"DATE",
"PROGRAM",
"and",
"PROGVER",
"are",
"added",
"/",
"modified",
"."
] | python | train |
cjdrake/pyeda | pyeda/boolalg/bfarray.py | https://github.com/cjdrake/pyeda/blob/554ee53aa678f4b61bcd7e07ba2c74ddc749d665/pyeda/boolalg/bfarray.py#L807-L825 | def arsh(self, num):
"""Arithmetically right shift the farray by *num* places.
The *num* argument must be a non-negative ``int``.
The carry-in will be the value of the most significant bit.
Returns a new farray.
"""
if num < 0 or num > self.size:
raise ValueError("expected 0 <= num <= {0.size}".format(self))
if num == 0:
return self, self.__class__([], ftype=self.ftype)
else:
sign = self._items[-1]
fs = self.__class__(self._items[num:] + [sign] * num,
ftype=self.ftype)
cout = self.__class__(self._items[:num], ftype=self.ftype)
return fs, cout | [
"def",
"arsh",
"(",
"self",
",",
"num",
")",
":",
"if",
"num",
"<",
"0",
"or",
"num",
">",
"self",
".",
"size",
":",
"raise",
"ValueError",
"(",
"\"expected 0 <= num <= {0.size}\"",
".",
"format",
"(",
"self",
")",
")",
"if",
"num",
"==",
"0",
":",
"return",
"self",
",",
"self",
".",
"__class__",
"(",
"[",
"]",
",",
"ftype",
"=",
"self",
".",
"ftype",
")",
"else",
":",
"sign",
"=",
"self",
".",
"_items",
"[",
"-",
"1",
"]",
"fs",
"=",
"self",
".",
"__class__",
"(",
"self",
".",
"_items",
"[",
"num",
":",
"]",
"+",
"[",
"sign",
"]",
"*",
"num",
",",
"ftype",
"=",
"self",
".",
"ftype",
")",
"cout",
"=",
"self",
".",
"__class__",
"(",
"self",
".",
"_items",
"[",
":",
"num",
"]",
",",
"ftype",
"=",
"self",
".",
"ftype",
")",
"return",
"fs",
",",
"cout"
] | Arithmetically right shift the farray by *num* places.
The *num* argument must be a non-negative ``int``.
The carry-in will be the value of the most significant bit.
Returns a new farray. | [
"Arithmetically",
"right",
"shift",
"the",
"farray",
"by",
"*",
"num",
"*",
"places",
"."
] | python | train |
project-ncl/pnc-cli | pnc_cli/buildconfigurationsets.py | https://github.com/project-ncl/pnc-cli/blob/3dc149bf84928f60a8044ac50b58bbaddd451902/pnc_cli/buildconfigurationsets.py#L35-L41 | def list_build_configuration_sets(page_size=200, page_index=0, sort="", q=""):
"""
List all build configuration sets
"""
data = list_build_configuration_sets_raw(page_size, page_index, sort, q)
if data:
return utils.format_json_list(data) | [
"def",
"list_build_configuration_sets",
"(",
"page_size",
"=",
"200",
",",
"page_index",
"=",
"0",
",",
"sort",
"=",
"\"\"",
",",
"q",
"=",
"\"\"",
")",
":",
"data",
"=",
"list_build_configuration_sets_raw",
"(",
"page_size",
",",
"page_index",
",",
"sort",
",",
"q",
")",
"if",
"data",
":",
"return",
"utils",
".",
"format_json_list",
"(",
"data",
")"
] | List all build configuration sets | [
"List",
"all",
"build",
"configuration",
"sets"
] | python | train |
python-rope/rope | rope/contrib/generate.py | https://github.com/python-rope/rope/blob/1c9f9cd5964b099a99a9111e998f0dc728860688/rope/contrib/generate.py#L8-L16 | def create_generate(kind, project, resource, offset):
"""A factory for creating `Generate` objects
`kind` can be 'variable', 'function', 'class', 'module' or
'package'.
"""
generate = eval('Generate' + kind.title())
return generate(project, resource, offset) | [
"def",
"create_generate",
"(",
"kind",
",",
"project",
",",
"resource",
",",
"offset",
")",
":",
"generate",
"=",
"eval",
"(",
"'Generate'",
"+",
"kind",
".",
"title",
"(",
")",
")",
"return",
"generate",
"(",
"project",
",",
"resource",
",",
"offset",
")"
] | A factory for creating `Generate` objects
`kind` can be 'variable', 'function', 'class', 'module' or
'package'. | [
"A",
"factory",
"for",
"creating",
"Generate",
"objects"
] | python | train |
brocade/pynos | pynos/versions/ver_7/ver_7_1_0/yang/brocade_vcs.py | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_vcs.py#L837-L846 | def vcsNodeState_nodeRbridgeid(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcsNodeState = ET.SubElement(config, "vcsNodeState", xmlns="urn:brocade.com:mgmt:brocade-vcs")
nodeRbridgeid = ET.SubElement(vcsNodeState, "nodeRbridgeid")
nodeRbridgeid.text = kwargs.pop('nodeRbridgeid')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"vcsNodeState_nodeRbridgeid",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"vcsNodeState",
"=",
"ET",
".",
"SubElement",
"(",
"config",
",",
"\"vcsNodeState\"",
",",
"xmlns",
"=",
"\"urn:brocade.com:mgmt:brocade-vcs\"",
")",
"nodeRbridgeid",
"=",
"ET",
".",
"SubElement",
"(",
"vcsNodeState",
",",
"\"nodeRbridgeid\"",
")",
"nodeRbridgeid",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'nodeRbridgeid'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] | Auto Generated Code | [
"Auto",
"Generated",
"Code"
] | python | train |
Hackerfleet/hfos | modules/maps/hfos/map/TileTools.py | https://github.com/Hackerfleet/hfos/blob/b6df14eacaffb6be5c844108873ff8763ec7f0c9/modules/maps/hfos/map/TileTools.py#L47-L63 | def convertLatLngToPixelXY(self, lat, lng, level):
'''
returns the x and y values of the pixel corresponding to a latitude
and longitude.
'''
mapSize = self.getMapDimensionsByZoomLevel(level)
lat = self.clipValue(lat, self.min_lat, self.max_lat)
lng = self.clipValue(lng, self.min_lng, self.max_lng)
x = (lng + 180) / 360
sinlat = math.sin(lat * math.pi / 180)
y = 0.5 - math.log((1 + sinlat) / (1 - sinlat)) / (4 * math.pi)
pixelX = int(self.clipValue(x * mapSize + 0.5, 0, mapSize - 1))
pixelY = int(self.clipValue(y * mapSize + 0.5, 0, mapSize - 1))
return (pixelX, pixelY) | [
"def",
"convertLatLngToPixelXY",
"(",
"self",
",",
"lat",
",",
"lng",
",",
"level",
")",
":",
"mapSize",
"=",
"self",
".",
"getMapDimensionsByZoomLevel",
"(",
"level",
")",
"lat",
"=",
"self",
".",
"clipValue",
"(",
"lat",
",",
"self",
".",
"min_lat",
",",
"self",
".",
"max_lat",
")",
"lng",
"=",
"self",
".",
"clipValue",
"(",
"lng",
",",
"self",
".",
"min_lng",
",",
"self",
".",
"max_lng",
")",
"x",
"=",
"(",
"lng",
"+",
"180",
")",
"/",
"360",
"sinlat",
"=",
"math",
".",
"sin",
"(",
"lat",
"*",
"math",
".",
"pi",
"/",
"180",
")",
"y",
"=",
"0.5",
"-",
"math",
".",
"log",
"(",
"(",
"1",
"+",
"sinlat",
")",
"/",
"(",
"1",
"-",
"sinlat",
")",
")",
"/",
"(",
"4",
"*",
"math",
".",
"pi",
")",
"pixelX",
"=",
"int",
"(",
"self",
".",
"clipValue",
"(",
"x",
"*",
"mapSize",
"+",
"0.5",
",",
"0",
",",
"mapSize",
"-",
"1",
")",
")",
"pixelY",
"=",
"int",
"(",
"self",
".",
"clipValue",
"(",
"y",
"*",
"mapSize",
"+",
"0.5",
",",
"0",
",",
"mapSize",
"-",
"1",
")",
")",
"return",
"(",
"pixelX",
",",
"pixelY",
")"
] | returns the x and y values of the pixel corresponding to a latitude
and longitude. | [
"returns",
"the",
"x",
"and",
"y",
"values",
"of",
"the",
"pixel",
"corresponding",
"to",
"a",
"latitude",
"and",
"longitude",
"."
] | python | train |
agermanidis/autosub | autosub/__init__.py | https://github.com/agermanidis/autosub/blob/d32389cb76e63ec6959111c3f989a72f36f726fe/autosub/__init__.py#L175-L191 | def extract_audio(filename, channels=1, rate=16000):
"""
Extract audio from an input file to a temporary WAV file.
"""
temp = tempfile.NamedTemporaryFile(suffix='.wav', delete=False)
if not os.path.isfile(filename):
print("The given file does not exist: {}".format(filename))
raise Exception("Invalid filepath: {}".format(filename))
if not which("ffmpeg"):
print("ffmpeg: Executable not found on machine.")
raise Exception("Dependency not found: ffmpeg")
command = ["ffmpeg", "-y", "-i", filename,
"-ac", str(channels), "-ar", str(rate),
"-loglevel", "error", temp.name]
use_shell = True if os.name == "nt" else False
subprocess.check_output(command, stdin=open(os.devnull), shell=use_shell)
return temp.name, rate | [
"def",
"extract_audio",
"(",
"filename",
",",
"channels",
"=",
"1",
",",
"rate",
"=",
"16000",
")",
":",
"temp",
"=",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"suffix",
"=",
"'.wav'",
",",
"delete",
"=",
"False",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"filename",
")",
":",
"print",
"(",
"\"The given file does not exist: {}\"",
".",
"format",
"(",
"filename",
")",
")",
"raise",
"Exception",
"(",
"\"Invalid filepath: {}\"",
".",
"format",
"(",
"filename",
")",
")",
"if",
"not",
"which",
"(",
"\"ffmpeg\"",
")",
":",
"print",
"(",
"\"ffmpeg: Executable not found on machine.\"",
")",
"raise",
"Exception",
"(",
"\"Dependency not found: ffmpeg\"",
")",
"command",
"=",
"[",
"\"ffmpeg\"",
",",
"\"-y\"",
",",
"\"-i\"",
",",
"filename",
",",
"\"-ac\"",
",",
"str",
"(",
"channels",
")",
",",
"\"-ar\"",
",",
"str",
"(",
"rate",
")",
",",
"\"-loglevel\"",
",",
"\"error\"",
",",
"temp",
".",
"name",
"]",
"use_shell",
"=",
"True",
"if",
"os",
".",
"name",
"==",
"\"nt\"",
"else",
"False",
"subprocess",
".",
"check_output",
"(",
"command",
",",
"stdin",
"=",
"open",
"(",
"os",
".",
"devnull",
")",
",",
"shell",
"=",
"use_shell",
")",
"return",
"temp",
".",
"name",
",",
"rate"
] | Extract audio from an input file to a temporary WAV file. | [
"Extract",
"audio",
"from",
"an",
"input",
"file",
"to",
"a",
"temporary",
"WAV",
"file",
"."
] | python | train |
senaite/senaite.core | bika/lims/browser/samplinground/printform.py | https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/samplinground/printform.py#L100-L120 | def getFormTemplate(self):
"""Returns the current samplinground rendered with the template
specified in the request (param 'template').
Moves the iterator to the next samplinground available.
"""
templates_dir = self._TEMPLATES_DIR
embedt = self.request.get('template', self._DEFAULT_TEMPLATE)
if embedt.find(':') >= 0:
prefix, embedt = embedt.split(':')
templates_dir = queryResourceDirectory(self._TEMPLATES_ADDON_DIR, prefix).directory
embed = ViewPageTemplateFile(os.path.join(templates_dir, embedt))
reptemplate = ""
try:
reptemplate = embed(self)
except:
tbex = traceback.format_exc()
wsid = self._samplingrounds[self._current_sr_index].id
reptemplate = "<div class='error-print'>%s - %s '%s':<pre>%s</pre></div>" % (wsid, _("Unable to load the template"), embedt, tbex)
if self._current_sr_index < len(self._samplingrounds):
self._current_sr_index += 1
return reptemplate | [
"def",
"getFormTemplate",
"(",
"self",
")",
":",
"templates_dir",
"=",
"self",
".",
"_TEMPLATES_DIR",
"embedt",
"=",
"self",
".",
"request",
".",
"get",
"(",
"'template'",
",",
"self",
".",
"_DEFAULT_TEMPLATE",
")",
"if",
"embedt",
".",
"find",
"(",
"':'",
")",
">=",
"0",
":",
"prefix",
",",
"embedt",
"=",
"embedt",
".",
"split",
"(",
"':'",
")",
"templates_dir",
"=",
"queryResourceDirectory",
"(",
"self",
".",
"_TEMPLATES_ADDON_DIR",
",",
"prefix",
")",
".",
"directory",
"embed",
"=",
"ViewPageTemplateFile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"templates_dir",
",",
"embedt",
")",
")",
"reptemplate",
"=",
"\"\"",
"try",
":",
"reptemplate",
"=",
"embed",
"(",
"self",
")",
"except",
":",
"tbex",
"=",
"traceback",
".",
"format_exc",
"(",
")",
"wsid",
"=",
"self",
".",
"_samplingrounds",
"[",
"self",
".",
"_current_sr_index",
"]",
".",
"id",
"reptemplate",
"=",
"\"<div class='error-print'>%s - %s '%s':<pre>%s</pre></div>\"",
"%",
"(",
"wsid",
",",
"_",
"(",
"\"Unable to load the template\"",
")",
",",
"embedt",
",",
"tbex",
")",
"if",
"self",
".",
"_current_sr_index",
"<",
"len",
"(",
"self",
".",
"_samplingrounds",
")",
":",
"self",
".",
"_current_sr_index",
"+=",
"1",
"return",
"reptemplate"
] | Returns the current samplinground rendered with the template
specified in the request (param 'template').
Moves the iterator to the next samplinground available. | [
"Returns",
"the",
"current",
"samplinground",
"rendered",
"with",
"the",
"template",
"specified",
"in",
"the",
"request",
"(",
"param",
"template",
")",
".",
"Moves",
"the",
"iterator",
"to",
"the",
"next",
"samplinground",
"available",
"."
] | python | train |
richardkiss/pycoin | pycoin/coins/Tx.py | https://github.com/richardkiss/pycoin/blob/1e8d0d9fe20ce0347b97847bb529cd1bd84c7442/pycoin/coins/Tx.py#L62-L71 | def as_bin(self, *args, **kwargs):
"""Returns a binary blob containing the streamed transaction.
For information about the parameters, see :func:`Tx.stream <stream>`
:return: binary blob that would parse to the given transaction
"""
f = io.BytesIO()
self.stream(f, *args, **kwargs)
return f.getvalue() | [
"def",
"as_bin",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"f",
"=",
"io",
".",
"BytesIO",
"(",
")",
"self",
".",
"stream",
"(",
"f",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"f",
".",
"getvalue",
"(",
")"
] | Returns a binary blob containing the streamed transaction.
For information about the parameters, see :func:`Tx.stream <stream>`
:return: binary blob that would parse to the given transaction | [
"Returns",
"a",
"binary",
"blob",
"containing",
"the",
"streamed",
"transaction",
"."
] | python | train |
jbasko/configmanager | configmanager/persistence.py | https://github.com/jbasko/configmanager/blob/1d7229ce367143c7210d8e5f0782de03945a1721/configmanager/persistence.py#L35-L55 | def load(self, source, as_defaults=False):
"""
Load configuration values from the specified source.
Args:
source:
as_defaults (bool): if ``True``, contents of ``source`` will be treated as schema of configuration items.
"""
if isinstance(source, six.string_types):
source = os.path.expanduser(source)
with open(source, encoding='utf-8') as f:
self._rw.load_config_from_file(self._config, f, as_defaults=as_defaults)
elif isinstance(source, (list, tuple)):
for s in source:
with open(s, encoding='utf-8') as f:
self._rw.load_config_from_file(self._config, f, as_defaults=as_defaults)
else:
self._rw.load_config_from_file(self._config, source, as_defaults=as_defaults) | [
"def",
"load",
"(",
"self",
",",
"source",
",",
"as_defaults",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"source",
",",
"six",
".",
"string_types",
")",
":",
"source",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"source",
")",
"with",
"open",
"(",
"source",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"f",
":",
"self",
".",
"_rw",
".",
"load_config_from_file",
"(",
"self",
".",
"_config",
",",
"f",
",",
"as_defaults",
"=",
"as_defaults",
")",
"elif",
"isinstance",
"(",
"source",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"for",
"s",
"in",
"source",
":",
"with",
"open",
"(",
"s",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"f",
":",
"self",
".",
"_rw",
".",
"load_config_from_file",
"(",
"self",
".",
"_config",
",",
"f",
",",
"as_defaults",
"=",
"as_defaults",
")",
"else",
":",
"self",
".",
"_rw",
".",
"load_config_from_file",
"(",
"self",
".",
"_config",
",",
"source",
",",
"as_defaults",
"=",
"as_defaults",
")"
] | Load configuration values from the specified source.
Args:
source:
as_defaults (bool): if ``True``, contents of ``source`` will be treated as schema of configuration items. | [
"Load",
"configuration",
"values",
"from",
"the",
"specified",
"source",
"."
] | python | train |
praekelt/panya | panya/models.py | https://github.com/praekelt/panya/blob/0fd621e15a7c11a2716a9554a2f820d6259818e5/panya/models.py#L207-L231 | def can_vote(self, request):
"""
Determnines whether or not the current user can vote.
Returns a bool as well as a string indicating the current vote status,
with vote status being one of: 'closed', 'disabled', 'auth_required', 'can_vote', 'voted'
"""
modelbase_obj = self.modelbase_obj
# can't vote if liking is closed
if modelbase_obj.likes_closed:
return False, 'closed'
# can't vote if liking is disabled
if not modelbase_obj.likes_enabled:
return False, 'disabled'
# anonymous users can't vote if anonymous likes are disabled
if not request.user.is_authenticated() and not modelbase_obj.anonymous_likes:
return False, 'auth_required'
# return false if existing votes are found
if Vote.objects.filter(object_id=modelbase_obj.id, token=request.secretballot_token).count() == 0:
return True, 'can_vote'
else:
return False, 'voted' | [
"def",
"can_vote",
"(",
"self",
",",
"request",
")",
":",
"modelbase_obj",
"=",
"self",
".",
"modelbase_obj",
"# can't vote if liking is closed",
"if",
"modelbase_obj",
".",
"likes_closed",
":",
"return",
"False",
",",
"'closed'",
"# can't vote if liking is disabled",
"if",
"not",
"modelbase_obj",
".",
"likes_enabled",
":",
"return",
"False",
",",
"'disabled'",
"# anonymous users can't vote if anonymous likes are disabled",
"if",
"not",
"request",
".",
"user",
".",
"is_authenticated",
"(",
")",
"and",
"not",
"modelbase_obj",
".",
"anonymous_likes",
":",
"return",
"False",
",",
"'auth_required'",
"# return false if existing votes are found",
"if",
"Vote",
".",
"objects",
".",
"filter",
"(",
"object_id",
"=",
"modelbase_obj",
".",
"id",
",",
"token",
"=",
"request",
".",
"secretballot_token",
")",
".",
"count",
"(",
")",
"==",
"0",
":",
"return",
"True",
",",
"'can_vote'",
"else",
":",
"return",
"False",
",",
"'voted'"
] | Determnines whether or not the current user can vote.
Returns a bool as well as a string indicating the current vote status,
with vote status being one of: 'closed', 'disabled', 'auth_required', 'can_vote', 'voted' | [
"Determnines",
"whether",
"or",
"not",
"the",
"current",
"user",
"can",
"vote",
".",
"Returns",
"a",
"bool",
"as",
"well",
"as",
"a",
"string",
"indicating",
"the",
"current",
"vote",
"status",
"with",
"vote",
"status",
"being",
"one",
"of",
":",
"closed",
"disabled",
"auth_required",
"can_vote",
"voted"
] | python | train |
f3at/feat | src/feat/common/fiber.py | https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/src/feat/common/fiber.py#L207-L216 | def woven(fun):
'''Decorator that will initialize and eventually start nested fibers.'''
def wrapper(*args, **kwargs):
section = WovenSection()
section.enter()
result = fun(*args, **kwargs)
return section.exit(result)
return wrapper | [
"def",
"woven",
"(",
"fun",
")",
":",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"section",
"=",
"WovenSection",
"(",
")",
"section",
".",
"enter",
"(",
")",
"result",
"=",
"fun",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"section",
".",
"exit",
"(",
"result",
")",
"return",
"wrapper"
] | Decorator that will initialize and eventually start nested fibers. | [
"Decorator",
"that",
"will",
"initialize",
"and",
"eventually",
"start",
"nested",
"fibers",
"."
] | python | train |
DarkEnergySurvey/ugali | ugali/utils/fileio.py | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/fileio.py#L44-L67 | def write(filename,data,**kwargs):
""" Write a recarray to a specific format.
Accepted file formats: [.fits,.fz,.npy,.csv,.txt,.dat]
Parameters:
filename : output file name
data : the recarray data
kwargs : keyword arguments for the writer
Returns:
ret : writer return (usually None)
"""
base,ext = os.path.splitext(filename)
if ext in ('.fits','.fz'):
# Abstract fits here...
return fitsio.write(filename,data,**kwargs)
elif ext in ('.npy'):
return np.save(filename,data,**kwargs)
elif ext in ('.csv'):
return np.savetxt(filename,data,header=','.join(data.dtype.names),delimiter=',',**kwargs)
elif ext in ('.txt','.dat'):
return np.savetxt(filename,data,**kwargs)
msg = "Unrecognized file type: %s"%filename
raise ValueError(msg) | [
"def",
"write",
"(",
"filename",
",",
"data",
",",
"*",
"*",
"kwargs",
")",
":",
"base",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"if",
"ext",
"in",
"(",
"'.fits'",
",",
"'.fz'",
")",
":",
"# Abstract fits here...",
"return",
"fitsio",
".",
"write",
"(",
"filename",
",",
"data",
",",
"*",
"*",
"kwargs",
")",
"elif",
"ext",
"in",
"(",
"'.npy'",
")",
":",
"return",
"np",
".",
"save",
"(",
"filename",
",",
"data",
",",
"*",
"*",
"kwargs",
")",
"elif",
"ext",
"in",
"(",
"'.csv'",
")",
":",
"return",
"np",
".",
"savetxt",
"(",
"filename",
",",
"data",
",",
"header",
"=",
"','",
".",
"join",
"(",
"data",
".",
"dtype",
".",
"names",
")",
",",
"delimiter",
"=",
"','",
",",
"*",
"*",
"kwargs",
")",
"elif",
"ext",
"in",
"(",
"'.txt'",
",",
"'.dat'",
")",
":",
"return",
"np",
".",
"savetxt",
"(",
"filename",
",",
"data",
",",
"*",
"*",
"kwargs",
")",
"msg",
"=",
"\"Unrecognized file type: %s\"",
"%",
"filename",
"raise",
"ValueError",
"(",
"msg",
")"
] | Write a recarray to a specific format.
Accepted file formats: [.fits,.fz,.npy,.csv,.txt,.dat]
Parameters:
filename : output file name
data : the recarray data
kwargs : keyword arguments for the writer
Returns:
ret : writer return (usually None) | [
"Write",
"a",
"recarray",
"to",
"a",
"specific",
"format",
".",
"Accepted",
"file",
"formats",
":",
"[",
".",
"fits",
".",
"fz",
".",
"npy",
".",
"csv",
".",
"txt",
".",
"dat",
"]",
"Parameters",
":",
"filename",
":",
"output",
"file",
"name",
"data",
":",
"the",
"recarray",
"data",
"kwargs",
":",
"keyword",
"arguments",
"for",
"the",
"writer",
"Returns",
":",
"ret",
":",
"writer",
"return",
"(",
"usually",
"None",
")"
] | python | train |
cocaine/cocaine-tools | cocaine/tools/dispatch.py | https://github.com/cocaine/cocaine-tools/blob/d8834f8e04ca42817d5f4e368d471484d4b3419f/cocaine/tools/dispatch.py#L1736-L1745 | def auth_add(name, service, **kwargs):
"""
Adds a member of an authorization group.
"""
ctx = Context(**kwargs)
ctx.execute_action('auth:group:members:add', **{
'storage': ctx.repo.create_secure_service('storage'),
'name': name,
'service': service,
}) | [
"def",
"auth_add",
"(",
"name",
",",
"service",
",",
"*",
"*",
"kwargs",
")",
":",
"ctx",
"=",
"Context",
"(",
"*",
"*",
"kwargs",
")",
"ctx",
".",
"execute_action",
"(",
"'auth:group:members:add'",
",",
"*",
"*",
"{",
"'storage'",
":",
"ctx",
".",
"repo",
".",
"create_secure_service",
"(",
"'storage'",
")",
",",
"'name'",
":",
"name",
",",
"'service'",
":",
"service",
",",
"}",
")"
] | Adds a member of an authorization group. | [
"Adds",
"a",
"member",
"of",
"an",
"authorization",
"group",
"."
] | python | train |
pavoni/pyvera | pyvera/__init__.py | https://github.com/pavoni/pyvera/blob/e05e3d13f76153444787d31948feb5419d77a8c8/pyvera/__init__.py#L1157-L1167 | def get_last_scene_id(self, refresh=False):
"""Get last scene id.
Refresh data from Vera if refresh is True, otherwise use local cache.
Refresh is only needed if you're not using subscriptions.
"""
if refresh:
self.refresh_complex_value('LastSceneID')
self.refresh_complex_value('sl_CentralScene')
val = self.get_complex_value('LastSceneID') or self.get_complex_value('sl_CentralScene')
return val | [
"def",
"get_last_scene_id",
"(",
"self",
",",
"refresh",
"=",
"False",
")",
":",
"if",
"refresh",
":",
"self",
".",
"refresh_complex_value",
"(",
"'LastSceneID'",
")",
"self",
".",
"refresh_complex_value",
"(",
"'sl_CentralScene'",
")",
"val",
"=",
"self",
".",
"get_complex_value",
"(",
"'LastSceneID'",
")",
"or",
"self",
".",
"get_complex_value",
"(",
"'sl_CentralScene'",
")",
"return",
"val"
] | Get last scene id.
Refresh data from Vera if refresh is True, otherwise use local cache.
Refresh is only needed if you're not using subscriptions. | [
"Get",
"last",
"scene",
"id",
"."
] | python | train |
soynatan/django-easy-audit | easyaudit/settings.py | https://github.com/soynatan/django-easy-audit/blob/03e05bc94beb29fc3e4ff86e313a6fef4b766b4b/easyaudit/settings.py#L15-L24 | def get_model_list(class_list):
"""
Receives a list of strings with app_name.model_name format
and turns them into classes. If an item is already a class
it ignores it.
"""
for idx, item in enumerate(class_list):
if isinstance(item, six.string_types):
model_class = apps.get_model(item)
class_list[idx] = model_class | [
"def",
"get_model_list",
"(",
"class_list",
")",
":",
"for",
"idx",
",",
"item",
"in",
"enumerate",
"(",
"class_list",
")",
":",
"if",
"isinstance",
"(",
"item",
",",
"six",
".",
"string_types",
")",
":",
"model_class",
"=",
"apps",
".",
"get_model",
"(",
"item",
")",
"class_list",
"[",
"idx",
"]",
"=",
"model_class"
] | Receives a list of strings with app_name.model_name format
and turns them into classes. If an item is already a class
it ignores it. | [
"Receives",
"a",
"list",
"of",
"strings",
"with",
"app_name",
".",
"model_name",
"format",
"and",
"turns",
"them",
"into",
"classes",
".",
"If",
"an",
"item",
"is",
"already",
"a",
"class",
"it",
"ignores",
"it",
"."
] | python | train |
Diaoul/subliminal | subliminal/providers/addic7ed.py | https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/providers/addic7ed.py#L126-L145 | def _get_show_ids(self):
"""Get the ``dict`` of show ids per series by querying the `shows.php` page.
:return: show id per series, lower case and without quotes.
:rtype: dict
"""
# get the show page
logger.info('Getting show ids')
r = self.session.get(self.server_url + 'shows.php', timeout=10)
r.raise_for_status()
soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser'])
# populate the show ids
show_ids = {}
for show in soup.select('td.version > h3 > a[href^="/show/"]'):
show_ids[sanitize(show.text)] = int(show['href'][6:])
logger.debug('Found %d show ids', len(show_ids))
return show_ids | [
"def",
"_get_show_ids",
"(",
"self",
")",
":",
"# get the show page",
"logger",
".",
"info",
"(",
"'Getting show ids'",
")",
"r",
"=",
"self",
".",
"session",
".",
"get",
"(",
"self",
".",
"server_url",
"+",
"'shows.php'",
",",
"timeout",
"=",
"10",
")",
"r",
".",
"raise_for_status",
"(",
")",
"soup",
"=",
"ParserBeautifulSoup",
"(",
"r",
".",
"content",
",",
"[",
"'lxml'",
",",
"'html.parser'",
"]",
")",
"# populate the show ids",
"show_ids",
"=",
"{",
"}",
"for",
"show",
"in",
"soup",
".",
"select",
"(",
"'td.version > h3 > a[href^=\"/show/\"]'",
")",
":",
"show_ids",
"[",
"sanitize",
"(",
"show",
".",
"text",
")",
"]",
"=",
"int",
"(",
"show",
"[",
"'href'",
"]",
"[",
"6",
":",
"]",
")",
"logger",
".",
"debug",
"(",
"'Found %d show ids'",
",",
"len",
"(",
"show_ids",
")",
")",
"return",
"show_ids"
] | Get the ``dict`` of show ids per series by querying the `shows.php` page.
:return: show id per series, lower case and without quotes.
:rtype: dict | [
"Get",
"the",
"dict",
"of",
"show",
"ids",
"per",
"series",
"by",
"querying",
"the",
"shows",
".",
"php",
"page",
"."
] | python | train |
WhyNotHugo/python-barcode | barcode/base.py | https://github.com/WhyNotHugo/python-barcode/blob/0b237016f32b4d0f3425dab10d52e291070c0558/barcode/base.py#L76-L92 | def write(self, fp, options=None, text=None):
"""Renders the barcode and writes it to the file like object
`fp`.
:parameters:
fp : File like object
Object to write the raw data in.
options : Dict
The same as in `self.render`.
text : str (unicode on Python 2)
Text to render under the barcode.
"""
output = self.render(options, text)
if hasattr(output, 'tostring'):
output.save(fp, format=self.writer.format)
else:
fp.write(output) | [
"def",
"write",
"(",
"self",
",",
"fp",
",",
"options",
"=",
"None",
",",
"text",
"=",
"None",
")",
":",
"output",
"=",
"self",
".",
"render",
"(",
"options",
",",
"text",
")",
"if",
"hasattr",
"(",
"output",
",",
"'tostring'",
")",
":",
"output",
".",
"save",
"(",
"fp",
",",
"format",
"=",
"self",
".",
"writer",
".",
"format",
")",
"else",
":",
"fp",
".",
"write",
"(",
"output",
")"
] | Renders the barcode and writes it to the file like object
`fp`.
:parameters:
fp : File like object
Object to write the raw data in.
options : Dict
The same as in `self.render`.
text : str (unicode on Python 2)
Text to render under the barcode. | [
"Renders",
"the",
"barcode",
"and",
"writes",
"it",
"to",
"the",
"file",
"like",
"object",
"fp",
"."
] | python | train |
alefnula/tea | tea/shell/__init__.py | https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/shell/__init__.py#L190-L211 | def __copyfile2(source, destination):
"""Copy data and all stat info ("cp -p source destination").
The destination may be a directory.
Args:
source (str): Source file (file to copy).
destination (str): Destination file or directory (where to copy).
Returns:
bool: True if the operation is successful, False otherwise.
"""
logger.info("copyfile2: %s -> %s" % (source, destination))
try:
__create_destdir(destination)
shutil.copy2(source, destination)
return True
except Exception as e:
logger.error(
"copyfile2: %s -> %s failed! Error: %s", source, destination, e
)
return False | [
"def",
"__copyfile2",
"(",
"source",
",",
"destination",
")",
":",
"logger",
".",
"info",
"(",
"\"copyfile2: %s -> %s\"",
"%",
"(",
"source",
",",
"destination",
")",
")",
"try",
":",
"__create_destdir",
"(",
"destination",
")",
"shutil",
".",
"copy2",
"(",
"source",
",",
"destination",
")",
"return",
"True",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"\"copyfile2: %s -> %s failed! Error: %s\"",
",",
"source",
",",
"destination",
",",
"e",
")",
"return",
"False"
] | Copy data and all stat info ("cp -p source destination").
The destination may be a directory.
Args:
source (str): Source file (file to copy).
destination (str): Destination file or directory (where to copy).
Returns:
bool: True if the operation is successful, False otherwise. | [
"Copy",
"data",
"and",
"all",
"stat",
"info",
"(",
"cp",
"-",
"p",
"source",
"destination",
")",
"."
] | python | train |
cloud9ers/gurumate | environment/lib/python2.7/site-packages/nose/util.py | https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/nose/util.py#L163-L168 | def isclass(obj):
"""Is obj a class? Inspect's isclass is too liberal and returns True
for objects that can't be subclasses of anything.
"""
obj_type = type(obj)
return obj_type in class_types or issubclass(obj_type, type) | [
"def",
"isclass",
"(",
"obj",
")",
":",
"obj_type",
"=",
"type",
"(",
"obj",
")",
"return",
"obj_type",
"in",
"class_types",
"or",
"issubclass",
"(",
"obj_type",
",",
"type",
")"
] | Is obj a class? Inspect's isclass is too liberal and returns True
for objects that can't be subclasses of anything. | [
"Is",
"obj",
"a",
"class?",
"Inspect",
"s",
"isclass",
"is",
"too",
"liberal",
"and",
"returns",
"True",
"for",
"objects",
"that",
"can",
"t",
"be",
"subclasses",
"of",
"anything",
"."
] | python | test |
saltstack/salt | salt/modules/linux_lvm.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/linux_lvm.py#L226-L278 | def pvcreate(devices, override=True, **kwargs):
'''
Set a physical device to be used as an LVM physical volume
override
Skip devices, if they are already LVM physical volumes
CLI Examples:
.. code-block:: bash
salt mymachine lvm.pvcreate /dev/sdb1,/dev/sdb2
salt mymachine lvm.pvcreate /dev/sdb1 dataalignmentoffset=7s
'''
if not devices:
return 'Error: at least one device is required'
if isinstance(devices, six.string_types):
devices = devices.split(',')
cmd = ['pvcreate', '-y']
for device in devices:
if not os.path.exists(device):
raise CommandExecutionError('{0} does not exist'.format(device))
if not pvdisplay(device, quiet=True):
cmd.append(device)
elif not override:
raise CommandExecutionError('Device "{0}" is already an LVM physical volume.'.format(device))
if not cmd[2:]:
# All specified devices are already LVM volumes
return True
valid = ('metadatasize', 'dataalignment', 'dataalignmentoffset',
'pvmetadatacopies', 'metadatacopies', 'metadataignore',
'restorefile', 'norestorefile', 'labelsector',
'setphysicalvolumesize')
no_parameter = ('force', 'norestorefile')
for var in kwargs:
if kwargs[var] and var in valid:
cmd.extend(['--{0}'.format(var), kwargs[var]])
elif kwargs[var] and var in no_parameter:
cmd.append('--{0}'.format(var))
out = __salt__['cmd.run_all'](cmd, python_shell=False)
if out.get('retcode'):
raise CommandExecutionError(out.get('stderr'))
# Verify pvcreate was successful
for device in devices:
if not pvdisplay(device):
raise CommandExecutionError('Device "{0}" was not affected.'.format(device))
return True | [
"def",
"pvcreate",
"(",
"devices",
",",
"override",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"devices",
":",
"return",
"'Error: at least one device is required'",
"if",
"isinstance",
"(",
"devices",
",",
"six",
".",
"string_types",
")",
":",
"devices",
"=",
"devices",
".",
"split",
"(",
"','",
")",
"cmd",
"=",
"[",
"'pvcreate'",
",",
"'-y'",
"]",
"for",
"device",
"in",
"devices",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"device",
")",
":",
"raise",
"CommandExecutionError",
"(",
"'{0} does not exist'",
".",
"format",
"(",
"device",
")",
")",
"if",
"not",
"pvdisplay",
"(",
"device",
",",
"quiet",
"=",
"True",
")",
":",
"cmd",
".",
"append",
"(",
"device",
")",
"elif",
"not",
"override",
":",
"raise",
"CommandExecutionError",
"(",
"'Device \"{0}\" is already an LVM physical volume.'",
".",
"format",
"(",
"device",
")",
")",
"if",
"not",
"cmd",
"[",
"2",
":",
"]",
":",
"# All specified devices are already LVM volumes",
"return",
"True",
"valid",
"=",
"(",
"'metadatasize'",
",",
"'dataalignment'",
",",
"'dataalignmentoffset'",
",",
"'pvmetadatacopies'",
",",
"'metadatacopies'",
",",
"'metadataignore'",
",",
"'restorefile'",
",",
"'norestorefile'",
",",
"'labelsector'",
",",
"'setphysicalvolumesize'",
")",
"no_parameter",
"=",
"(",
"'force'",
",",
"'norestorefile'",
")",
"for",
"var",
"in",
"kwargs",
":",
"if",
"kwargs",
"[",
"var",
"]",
"and",
"var",
"in",
"valid",
":",
"cmd",
".",
"extend",
"(",
"[",
"'--{0}'",
".",
"format",
"(",
"var",
")",
",",
"kwargs",
"[",
"var",
"]",
"]",
")",
"elif",
"kwargs",
"[",
"var",
"]",
"and",
"var",
"in",
"no_parameter",
":",
"cmd",
".",
"append",
"(",
"'--{0}'",
".",
"format",
"(",
"var",
")",
")",
"out",
"=",
"__salt__",
"[",
"'cmd.run_all'",
"]",
"(",
"cmd",
",",
"python_shell",
"=",
"False",
")",
"if",
"out",
".",
"get",
"(",
"'retcode'",
")",
":",
"raise",
"CommandExecutionError",
"(",
"out",
".",
"get",
"(",
"'stderr'",
")",
")",
"# Verify pvcreate was successful",
"for",
"device",
"in",
"devices",
":",
"if",
"not",
"pvdisplay",
"(",
"device",
")",
":",
"raise",
"CommandExecutionError",
"(",
"'Device \"{0}\" was not affected.'",
".",
"format",
"(",
"device",
")",
")",
"return",
"True"
] | Set a physical device to be used as an LVM physical volume
override
Skip devices, if they are already LVM physical volumes
CLI Examples:
.. code-block:: bash
salt mymachine lvm.pvcreate /dev/sdb1,/dev/sdb2
salt mymachine lvm.pvcreate /dev/sdb1 dataalignmentoffset=7s | [
"Set",
"a",
"physical",
"device",
"to",
"be",
"used",
"as",
"an",
"LVM",
"physical",
"volume"
] | python | train |
rwl/godot | godot/node.py | https://github.com/rwl/godot/blob/013687c9e8983d2aa2ceebb8a76c5c4f1e37c90f/godot/node.py#L569-L576 | def _component_default(self):
""" Trait initialiser.
"""
component = Container(fit_window=False, auto_size=True,
bgcolor="green")#, position=list(self.pos) )
component.tools.append( MoveTool(component) )
# component.tools.append( TraitsTool(component) )
return component | [
"def",
"_component_default",
"(",
"self",
")",
":",
"component",
"=",
"Container",
"(",
"fit_window",
"=",
"False",
",",
"auto_size",
"=",
"True",
",",
"bgcolor",
"=",
"\"green\"",
")",
"#, position=list(self.pos) )",
"component",
".",
"tools",
".",
"append",
"(",
"MoveTool",
"(",
"component",
")",
")",
"# component.tools.append( TraitsTool(component) )",
"return",
"component"
] | Trait initialiser. | [
"Trait",
"initialiser",
"."
] | python | test |
fracpete/python-weka-wrapper3 | python/weka/flow/control.py | https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/flow/control.py#L540-L631 | def do_execute(self):
"""
Actual execution of the director.
:return: None if successful, otherwise error message
:rtype: str
"""
self._stopped = False
self._stopping = False
not_finished_actor = self.owner.first_active
pending_actors = []
finished = False
actor_result = None
while not (self.is_stopping() or self.is_stopped()) and not finished:
# determing starting point of next iteration
if len(pending_actors) > 0:
start_index = self.owner.index_of(pending_actors[-1].name)
else:
start_index = self.owner.index_of(not_finished_actor.name)
not_finished_actor = None
# iterate over actors
token = None
last_active = -1
if self.owner.active > 0:
last_active = self.owner.last_active.index
for i in range(start_index, last_active + 1):
# do we have to stop the execution?
if self.is_stopped() or self.is_stopping():
break
curr = self.owner.actors[i]
if curr.skip:
continue
# no token? get pending one or produce new one
if token is None:
if isinstance(curr, OutputProducer) and curr.has_output():
pending_actors.pop()
else:
actor_result = curr.execute()
if actor_result is not None:
self.owner.logger.error(
curr.full_name + " generated following error output:\n" + actor_result)
break
if isinstance(curr, OutputProducer) and curr.has_output():
token = curr.output()
else:
token = None
# still more to come?
if isinstance(curr, OutputProducer) and curr.has_output():
pending_actors.append(curr)
else:
# process token
curr.input = token
actor_result = curr.execute()
if actor_result is not None:
self.owner.logger.error(
curr.full_name + " generated following error output:\n" + actor_result)
break
# was a new token produced?
if isinstance(curr, OutputProducer):
if curr.has_output():
token = curr.output()
else:
token = None
# still more to come?
if curr.has_output():
pending_actors.append(curr)
else:
token = None
# token from last actor generated? -> store
if (i == self.owner.last_active.index) and (token is not None):
if self._record_output:
self._recorded_output.append(token)
# no token produced, ignore rest of actors
if isinstance(curr, OutputProducer) and (token is None):
break
# all actors finished?
finished = (not_finished_actor is None) and (len(pending_actors) == 0)
return actor_result | [
"def",
"do_execute",
"(",
"self",
")",
":",
"self",
".",
"_stopped",
"=",
"False",
"self",
".",
"_stopping",
"=",
"False",
"not_finished_actor",
"=",
"self",
".",
"owner",
".",
"first_active",
"pending_actors",
"=",
"[",
"]",
"finished",
"=",
"False",
"actor_result",
"=",
"None",
"while",
"not",
"(",
"self",
".",
"is_stopping",
"(",
")",
"or",
"self",
".",
"is_stopped",
"(",
")",
")",
"and",
"not",
"finished",
":",
"# determing starting point of next iteration",
"if",
"len",
"(",
"pending_actors",
")",
">",
"0",
":",
"start_index",
"=",
"self",
".",
"owner",
".",
"index_of",
"(",
"pending_actors",
"[",
"-",
"1",
"]",
".",
"name",
")",
"else",
":",
"start_index",
"=",
"self",
".",
"owner",
".",
"index_of",
"(",
"not_finished_actor",
".",
"name",
")",
"not_finished_actor",
"=",
"None",
"# iterate over actors",
"token",
"=",
"None",
"last_active",
"=",
"-",
"1",
"if",
"self",
".",
"owner",
".",
"active",
">",
"0",
":",
"last_active",
"=",
"self",
".",
"owner",
".",
"last_active",
".",
"index",
"for",
"i",
"in",
"range",
"(",
"start_index",
",",
"last_active",
"+",
"1",
")",
":",
"# do we have to stop the execution?",
"if",
"self",
".",
"is_stopped",
"(",
")",
"or",
"self",
".",
"is_stopping",
"(",
")",
":",
"break",
"curr",
"=",
"self",
".",
"owner",
".",
"actors",
"[",
"i",
"]",
"if",
"curr",
".",
"skip",
":",
"continue",
"# no token? get pending one or produce new one",
"if",
"token",
"is",
"None",
":",
"if",
"isinstance",
"(",
"curr",
",",
"OutputProducer",
")",
"and",
"curr",
".",
"has_output",
"(",
")",
":",
"pending_actors",
".",
"pop",
"(",
")",
"else",
":",
"actor_result",
"=",
"curr",
".",
"execute",
"(",
")",
"if",
"actor_result",
"is",
"not",
"None",
":",
"self",
".",
"owner",
".",
"logger",
".",
"error",
"(",
"curr",
".",
"full_name",
"+",
"\" generated following error output:\\n\"",
"+",
"actor_result",
")",
"break",
"if",
"isinstance",
"(",
"curr",
",",
"OutputProducer",
")",
"and",
"curr",
".",
"has_output",
"(",
")",
":",
"token",
"=",
"curr",
".",
"output",
"(",
")",
"else",
":",
"token",
"=",
"None",
"# still more to come?",
"if",
"isinstance",
"(",
"curr",
",",
"OutputProducer",
")",
"and",
"curr",
".",
"has_output",
"(",
")",
":",
"pending_actors",
".",
"append",
"(",
"curr",
")",
"else",
":",
"# process token",
"curr",
".",
"input",
"=",
"token",
"actor_result",
"=",
"curr",
".",
"execute",
"(",
")",
"if",
"actor_result",
"is",
"not",
"None",
":",
"self",
".",
"owner",
".",
"logger",
".",
"error",
"(",
"curr",
".",
"full_name",
"+",
"\" generated following error output:\\n\"",
"+",
"actor_result",
")",
"break",
"# was a new token produced?",
"if",
"isinstance",
"(",
"curr",
",",
"OutputProducer",
")",
":",
"if",
"curr",
".",
"has_output",
"(",
")",
":",
"token",
"=",
"curr",
".",
"output",
"(",
")",
"else",
":",
"token",
"=",
"None",
"# still more to come?",
"if",
"curr",
".",
"has_output",
"(",
")",
":",
"pending_actors",
".",
"append",
"(",
"curr",
")",
"else",
":",
"token",
"=",
"None",
"# token from last actor generated? -> store",
"if",
"(",
"i",
"==",
"self",
".",
"owner",
".",
"last_active",
".",
"index",
")",
"and",
"(",
"token",
"is",
"not",
"None",
")",
":",
"if",
"self",
".",
"_record_output",
":",
"self",
".",
"_recorded_output",
".",
"append",
"(",
"token",
")",
"# no token produced, ignore rest of actors",
"if",
"isinstance",
"(",
"curr",
",",
"OutputProducer",
")",
"and",
"(",
"token",
"is",
"None",
")",
":",
"break",
"# all actors finished?",
"finished",
"=",
"(",
"not_finished_actor",
"is",
"None",
")",
"and",
"(",
"len",
"(",
"pending_actors",
")",
"==",
"0",
")",
"return",
"actor_result"
] | Actual execution of the director.
:return: None if successful, otherwise error message
:rtype: str | [
"Actual",
"execution",
"of",
"the",
"director",
"."
] | python | train |
ClimateImpactLab/DataFS | datafs/core/data_archive.py | https://github.com/ClimateImpactLab/DataFS/blob/0d32c2b4e18d300a11b748a552f6adbc3dd8f59d/datafs/core/data_archive.py#L762-L778 | def get_dependencies(self, version=None):
'''
Parameters
----------
version: str
string representing version number whose dependencies you are
looking up
'''
version = _process_version(self, version)
history = self.get_history()
for v in reversed(history):
if BumpableVersion(v['version']) == version:
return v['dependencies']
raise ValueError('Version {} not found'.format(version)) | [
"def",
"get_dependencies",
"(",
"self",
",",
"version",
"=",
"None",
")",
":",
"version",
"=",
"_process_version",
"(",
"self",
",",
"version",
")",
"history",
"=",
"self",
".",
"get_history",
"(",
")",
"for",
"v",
"in",
"reversed",
"(",
"history",
")",
":",
"if",
"BumpableVersion",
"(",
"v",
"[",
"'version'",
"]",
")",
"==",
"version",
":",
"return",
"v",
"[",
"'dependencies'",
"]",
"raise",
"ValueError",
"(",
"'Version {} not found'",
".",
"format",
"(",
"version",
")",
")"
] | Parameters
----------
version: str
string representing version number whose dependencies you are
looking up | [
"Parameters",
"----------",
"version",
":",
"str",
"string",
"representing",
"version",
"number",
"whose",
"dependencies",
"you",
"are",
"looking",
"up"
] | python | train |
janpipek/physt | physt/plotting/__init__.py | https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/plotting/__init__.py#L139-L146 | def set_default_backend(name: str):
"""Choose a default backend."""
global _default_backend
if name == "bokeh":
raise RuntimeError("Support for bokeh has been discontinued. At some point, we may return to support holoviews.")
if not name in backends:
raise RuntimeError("Backend {0} is not supported and cannot be set as default.".format(name))
_default_backend = name | [
"def",
"set_default_backend",
"(",
"name",
":",
"str",
")",
":",
"global",
"_default_backend",
"if",
"name",
"==",
"\"bokeh\"",
":",
"raise",
"RuntimeError",
"(",
"\"Support for bokeh has been discontinued. At some point, we may return to support holoviews.\"",
")",
"if",
"not",
"name",
"in",
"backends",
":",
"raise",
"RuntimeError",
"(",
"\"Backend {0} is not supported and cannot be set as default.\"",
".",
"format",
"(",
"name",
")",
")",
"_default_backend",
"=",
"name"
] | Choose a default backend. | [
"Choose",
"a",
"default",
"backend",
"."
] | python | train |
faxir/faxir-python | faxir/api/archives_api.py | https://github.com/faxir/faxir-python/blob/75ed2ea487a6be537342baea1077a02b0c8e70c1/faxir/api/archives_api.py#L234-L258 | def list_faxes(self, user_id, **kwargs): # noqa: E501
"""Get fax records # noqa: E501
With this API call you will be able to retrieve a collection of faxes (either sent or received or spam based on the category selected). If you want to filter your archive please provide the `category` parameter # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_faxes(user_id, async=True)
>>> result = thread.get()
:param async bool
:param str user_id: 'self' or user id of a corporate member (required)
:param str category: Category parameter can be one of these values: **inbox**, **sent**, **spam**
:param str after: Start date to get records from that date
:param str before: End date to get records before that date
:param int limit: Limit of fax records you want to get per request
:return: ResponseArchive
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.list_faxes_with_http_info(user_id, **kwargs) # noqa: E501
else:
(data) = self.list_faxes_with_http_info(user_id, **kwargs) # noqa: E501
return data | [
"def",
"list_faxes",
"(",
"self",
",",
"user_id",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async'",
")",
":",
"return",
"self",
".",
"list_faxes_with_http_info",
"(",
"user_id",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"list_faxes_with_http_info",
"(",
"user_id",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"return",
"data"
] | Get fax records # noqa: E501
With this API call you will be able to retrieve a collection of faxes (either sent or received or spam based on the category selected). If you want to filter your archive please provide the `category` parameter # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_faxes(user_id, async=True)
>>> result = thread.get()
:param async bool
:param str user_id: 'self' or user id of a corporate member (required)
:param str category: Category parameter can be one of these values: **inbox**, **sent**, **spam**
:param str after: Start date to get records from that date
:param str before: End date to get records before that date
:param int limit: Limit of fax records you want to get per request
:return: ResponseArchive
If the method is called asynchronously,
returns the request thread. | [
"Get",
"fax",
"records",
"#",
"noqa",
":",
"E501"
] | python | train |
bpsmith/tia | tia/bbg/bbg_com.py | https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/bbg/bbg_com.py#L96-L104 | def get_child_value(parent, name, allow_missing=0):
""" return the value of the child element with name in the parent Element """
if not parent.HasElement(name):
if allow_missing:
return np.nan
else:
raise Exception('failed to find child element %s in parent' % name)
else:
return XmlHelper.as_value(parent.GetElement(name)) | [
"def",
"get_child_value",
"(",
"parent",
",",
"name",
",",
"allow_missing",
"=",
"0",
")",
":",
"if",
"not",
"parent",
".",
"HasElement",
"(",
"name",
")",
":",
"if",
"allow_missing",
":",
"return",
"np",
".",
"nan",
"else",
":",
"raise",
"Exception",
"(",
"'failed to find child element %s in parent'",
"%",
"name",
")",
"else",
":",
"return",
"XmlHelper",
".",
"as_value",
"(",
"parent",
".",
"GetElement",
"(",
"name",
")",
")"
] | return the value of the child element with name in the parent Element | [
"return",
"the",
"value",
"of",
"the",
"child",
"element",
"with",
"name",
"in",
"the",
"parent",
"Element"
] | python | train |
neithere/django-navigation | navigation/templatetags/navigation_tags.py | https://github.com/neithere/django-navigation/blob/aff8d671a8431c84dde65cba6236ea8c16a08b4d/navigation/templatetags/navigation_tags.py#L190-L202 | def get_navigation(request):
""" Returns the rendered navigation block. Requires that the
`navigation.html` template exists. Two context variables are passed to it:
* sections (see :func:`get_breadcrumb_sections`)
* trail (see :func:`get_breadcrumb_trail`)
"""
sections = _get_sections(request)
trail = _get_trail(request, exclude_section=True)
return mark_safe(render_to_string('navigation.html',
dict(sections=sections,trail=trail))) | [
"def",
"get_navigation",
"(",
"request",
")",
":",
"sections",
"=",
"_get_sections",
"(",
"request",
")",
"trail",
"=",
"_get_trail",
"(",
"request",
",",
"exclude_section",
"=",
"True",
")",
"return",
"mark_safe",
"(",
"render_to_string",
"(",
"'navigation.html'",
",",
"dict",
"(",
"sections",
"=",
"sections",
",",
"trail",
"=",
"trail",
")",
")",
")"
] | Returns the rendered navigation block. Requires that the
`navigation.html` template exists. Two context variables are passed to it:
* sections (see :func:`get_breadcrumb_sections`)
* trail (see :func:`get_breadcrumb_trail`) | [
"Returns",
"the",
"rendered",
"navigation",
"block",
".",
"Requires",
"that",
"the",
"navigation",
".",
"html",
"template",
"exists",
".",
"Two",
"context",
"variables",
"are",
"passed",
"to",
"it",
":"
] | python | train |
googleapis/google-cloud-python | bigquery/google/cloud/bigquery/magics.py | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/magics.py#L243-L278 | def _run_query(client, query, job_config=None):
"""Runs a query while printing status updates
Args:
client (google.cloud.bigquery.client.Client):
Client to bundle configuration needed for API requests.
query (str):
SQL query to be executed. Defaults to the standard SQL dialect.
Use the ``job_config`` parameter to change dialects.
job_config (google.cloud.bigquery.job.QueryJobConfig, optional):
Extra configuration options for the job.
Returns:
google.cloud.bigquery.job.QueryJob: the query job created
Example:
>>> client = bigquery.Client()
>>> _run_query(client, "SELECT 17")
Executing query with job ID: bf633912-af2c-4780-b568-5d868058632b
Query executing: 1.66s
Query complete after 2.07s
'bf633912-af2c-4780-b568-5d868058632b'
"""
start_time = time.time()
query_job = client.query(query, job_config=job_config)
print("Executing query with job ID: {}".format(query_job.job_id))
while True:
print("\rQuery executing: {:0.2f}s".format(time.time() - start_time), end="")
try:
query_job.result(timeout=0.5)
break
except futures.TimeoutError:
continue
print("\nQuery complete after {:0.2f}s".format(time.time() - start_time))
return query_job | [
"def",
"_run_query",
"(",
"client",
",",
"query",
",",
"job_config",
"=",
"None",
")",
":",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"query_job",
"=",
"client",
".",
"query",
"(",
"query",
",",
"job_config",
"=",
"job_config",
")",
"print",
"(",
"\"Executing query with job ID: {}\"",
".",
"format",
"(",
"query_job",
".",
"job_id",
")",
")",
"while",
"True",
":",
"print",
"(",
"\"\\rQuery executing: {:0.2f}s\"",
".",
"format",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start_time",
")",
",",
"end",
"=",
"\"\"",
")",
"try",
":",
"query_job",
".",
"result",
"(",
"timeout",
"=",
"0.5",
")",
"break",
"except",
"futures",
".",
"TimeoutError",
":",
"continue",
"print",
"(",
"\"\\nQuery complete after {:0.2f}s\"",
".",
"format",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start_time",
")",
")",
"return",
"query_job"
] | Runs a query while printing status updates
Args:
client (google.cloud.bigquery.client.Client):
Client to bundle configuration needed for API requests.
query (str):
SQL query to be executed. Defaults to the standard SQL dialect.
Use the ``job_config`` parameter to change dialects.
job_config (google.cloud.bigquery.job.QueryJobConfig, optional):
Extra configuration options for the job.
Returns:
google.cloud.bigquery.job.QueryJob: the query job created
Example:
>>> client = bigquery.Client()
>>> _run_query(client, "SELECT 17")
Executing query with job ID: bf633912-af2c-4780-b568-5d868058632b
Query executing: 1.66s
Query complete after 2.07s
'bf633912-af2c-4780-b568-5d868058632b' | [
"Runs",
"a",
"query",
"while",
"printing",
"status",
"updates"
] | python | train |
ska-sa/purr | Purr/Plugins/local_pychart/basecanvas.py | https://github.com/ska-sa/purr/blob/4c848768d0485d0f88b30850d0d5372221b21b66/Purr/Plugins/local_pychart/basecanvas.py#L105-L113 | def close(self):
"""This method closes the canvas and writes
contents to the associated file.
Calling this procedure is optional, because
Pychart calls this procedure for every open canvas on normal exit."""
for i in range(0, len(active_canvases)):
if active_canvases[i] == self:
del active_canvases[i]
return | [
"def",
"close",
"(",
"self",
")",
":",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"active_canvases",
")",
")",
":",
"if",
"active_canvases",
"[",
"i",
"]",
"==",
"self",
":",
"del",
"active_canvases",
"[",
"i",
"]",
"return"
] | This method closes the canvas and writes
contents to the associated file.
Calling this procedure is optional, because
Pychart calls this procedure for every open canvas on normal exit. | [
"This",
"method",
"closes",
"the",
"canvas",
"and",
"writes",
"contents",
"to",
"the",
"associated",
"file",
".",
"Calling",
"this",
"procedure",
"is",
"optional",
"because",
"Pychart",
"calls",
"this",
"procedure",
"for",
"every",
"open",
"canvas",
"on",
"normal",
"exit",
"."
] | python | train |
bcbio/bcbio-nextgen | bcbio/srna/sample.py | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/srna/sample.py#L29-L104 | def trim_srna_sample(data):
"""
Remove 3' adapter for smallRNA-seq
Uses cutadapt but with different parameters than for other pipelines.
"""
data = umi_transform(data)
in_file = data["files"][0]
names = data["rgnames"]['sample']
work_dir = os.path.join(dd.get_work_dir(data), "trimmed")
out_dir = os.path.join(work_dir, names)
log_out = os.path.join(out_dir, "%s.log" % names)
utils.safe_makedir(out_dir)
out_file = replace_directory(append_stem(in_file, ".clean"), out_dir)
trim_reads = data["config"]["algorithm"].get("trim_reads", True)
if utils.file_exists(out_file):
data["files"][0] = out_file
data["clean_fastq"] = out_file
data["collapse"] = _collapse(data["clean_fastq"])
data["size_stats"] = _summary(data['collapse'])
data["log_trimming"] = log_out
return [[data]]
adapter = dd.get_adapters(data)
is_4n = any([a == "4N" for a in adapter])
adapter = [a for a in adapter if re.compile("^([NATGC]+)$").match(a)]
if adapter and not trim_reads:
trim_reads = True
logger.info("Adapter is set up in config file, but trim_reads is not true."
"If you want to skip trimming, skip adapter option from config.")
if trim_reads and not adapter and error_dnapi:
raise ValueError(error_dnapi)
if trim_reads:
adapters = adapter if adapter else _dnapi_prediction(in_file, out_dir)
times = "" if not trim_reads or len(adapters) == 1 else "--times %s" % len(adapters)
if trim_reads and adapters:
adapter_cmd = " ".join(map(lambda x: "-a " + x, adapters))
if any([a for a in adapters if re.compile("^N+$").match(a)]):
adapter_cmd = "-N %s" % adapter_cmd
out_noadapter_file = replace_directory(append_stem(in_file, ".fragments"), out_dir)
out_short_file = replace_directory(append_stem(in_file, ".short"), out_dir)
# atropos = _get_atropos()
atropos = config_utils.get_program("atropos", data, default="atropos")
options = " ".join(data.get('resources', {}).get('atropos', {}).get("options", ""))
if options.strip() == "-u 4 -u -4":
options = ""
is_4n = "4N"
cores = ("--threads %s" % dd.get_num_cores(data) if dd.get_num_cores(data) > 1 else "")
if " ".join(data.get('resources', {}).get('cutadapt', {}).get("options", "")):
raise ValueError("Atropos is now used, but cutadapt options found in YAML file."
"See https://atropos.readthedocs.io/en/latest/")
cmd = _cmd_atropos()
if not utils.file_exists(out_file):
with file_transaction(out_file) as tx_out_file:
do.run(cmd.format(**locals()), "remove adapter for %s" % names)
if utils.file_exists(log_out):
content = open(log_out).read().replace(out_short_file, names)
open(log_out, 'w').write(content)
if is_4n:
options = "-u 4 -u -4"
in_file = append_stem(tx_out_file, ".tmp")
utils.move_safe(tx_out_file, in_file)
cmd = "{atropos} {cores} {options} -se {in_file} -o {tx_out_file} -m 17"
do.run(cmd.format(**locals()), "atropos with this parameters %s for %s" %(options, names))
data["log_trimming"] = log_out
else:
if not trim_reads:
logger.debug("Skip trimming for: %s" % names)
elif not adapters:
logger.info("No adapter founds in %s, this is an issue related"
" to no small RNA enrichment in your sample." % names)
symlink_plus(in_file, out_file)
data["files"][0] = out_file
data["clean_fastq"] = out_file
data["collapse"] = _collapse(data["clean_fastq"])
data["size_stats"] = _summary(data['collapse'])
return [[data]] | [
"def",
"trim_srna_sample",
"(",
"data",
")",
":",
"data",
"=",
"umi_transform",
"(",
"data",
")",
"in_file",
"=",
"data",
"[",
"\"files\"",
"]",
"[",
"0",
"]",
"names",
"=",
"data",
"[",
"\"rgnames\"",
"]",
"[",
"'sample'",
"]",
"work_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dd",
".",
"get_work_dir",
"(",
"data",
")",
",",
"\"trimmed\"",
")",
"out_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"names",
")",
"log_out",
"=",
"os",
".",
"path",
".",
"join",
"(",
"out_dir",
",",
"\"%s.log\"",
"%",
"names",
")",
"utils",
".",
"safe_makedir",
"(",
"out_dir",
")",
"out_file",
"=",
"replace_directory",
"(",
"append_stem",
"(",
"in_file",
",",
"\".clean\"",
")",
",",
"out_dir",
")",
"trim_reads",
"=",
"data",
"[",
"\"config\"",
"]",
"[",
"\"algorithm\"",
"]",
".",
"get",
"(",
"\"trim_reads\"",
",",
"True",
")",
"if",
"utils",
".",
"file_exists",
"(",
"out_file",
")",
":",
"data",
"[",
"\"files\"",
"]",
"[",
"0",
"]",
"=",
"out_file",
"data",
"[",
"\"clean_fastq\"",
"]",
"=",
"out_file",
"data",
"[",
"\"collapse\"",
"]",
"=",
"_collapse",
"(",
"data",
"[",
"\"clean_fastq\"",
"]",
")",
"data",
"[",
"\"size_stats\"",
"]",
"=",
"_summary",
"(",
"data",
"[",
"'collapse'",
"]",
")",
"data",
"[",
"\"log_trimming\"",
"]",
"=",
"log_out",
"return",
"[",
"[",
"data",
"]",
"]",
"adapter",
"=",
"dd",
".",
"get_adapters",
"(",
"data",
")",
"is_4n",
"=",
"any",
"(",
"[",
"a",
"==",
"\"4N\"",
"for",
"a",
"in",
"adapter",
"]",
")",
"adapter",
"=",
"[",
"a",
"for",
"a",
"in",
"adapter",
"if",
"re",
".",
"compile",
"(",
"\"^([NATGC]+)$\"",
")",
".",
"match",
"(",
"a",
")",
"]",
"if",
"adapter",
"and",
"not",
"trim_reads",
":",
"trim_reads",
"=",
"True",
"logger",
".",
"info",
"(",
"\"Adapter is set up in config file, but trim_reads is not true.\"",
"\"If you want to skip trimming, skip adapter option from config.\"",
")",
"if",
"trim_reads",
"and",
"not",
"adapter",
"and",
"error_dnapi",
":",
"raise",
"ValueError",
"(",
"error_dnapi",
")",
"if",
"trim_reads",
":",
"adapters",
"=",
"adapter",
"if",
"adapter",
"else",
"_dnapi_prediction",
"(",
"in_file",
",",
"out_dir",
")",
"times",
"=",
"\"\"",
"if",
"not",
"trim_reads",
"or",
"len",
"(",
"adapters",
")",
"==",
"1",
"else",
"\"--times %s\"",
"%",
"len",
"(",
"adapters",
")",
"if",
"trim_reads",
"and",
"adapters",
":",
"adapter_cmd",
"=",
"\" \"",
".",
"join",
"(",
"map",
"(",
"lambda",
"x",
":",
"\"-a \"",
"+",
"x",
",",
"adapters",
")",
")",
"if",
"any",
"(",
"[",
"a",
"for",
"a",
"in",
"adapters",
"if",
"re",
".",
"compile",
"(",
"\"^N+$\"",
")",
".",
"match",
"(",
"a",
")",
"]",
")",
":",
"adapter_cmd",
"=",
"\"-N %s\"",
"%",
"adapter_cmd",
"out_noadapter_file",
"=",
"replace_directory",
"(",
"append_stem",
"(",
"in_file",
",",
"\".fragments\"",
")",
",",
"out_dir",
")",
"out_short_file",
"=",
"replace_directory",
"(",
"append_stem",
"(",
"in_file",
",",
"\".short\"",
")",
",",
"out_dir",
")",
"# atropos = _get_atropos()",
"atropos",
"=",
"config_utils",
".",
"get_program",
"(",
"\"atropos\"",
",",
"data",
",",
"default",
"=",
"\"atropos\"",
")",
"options",
"=",
"\" \"",
".",
"join",
"(",
"data",
".",
"get",
"(",
"'resources'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'atropos'",
",",
"{",
"}",
")",
".",
"get",
"(",
"\"options\"",
",",
"\"\"",
")",
")",
"if",
"options",
".",
"strip",
"(",
")",
"==",
"\"-u 4 -u -4\"",
":",
"options",
"=",
"\"\"",
"is_4n",
"=",
"\"4N\"",
"cores",
"=",
"(",
"\"--threads %s\"",
"%",
"dd",
".",
"get_num_cores",
"(",
"data",
")",
"if",
"dd",
".",
"get_num_cores",
"(",
"data",
")",
">",
"1",
"else",
"\"\"",
")",
"if",
"\" \"",
".",
"join",
"(",
"data",
".",
"get",
"(",
"'resources'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'cutadapt'",
",",
"{",
"}",
")",
".",
"get",
"(",
"\"options\"",
",",
"\"\"",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"Atropos is now used, but cutadapt options found in YAML file.\"",
"\"See https://atropos.readthedocs.io/en/latest/\"",
")",
"cmd",
"=",
"_cmd_atropos",
"(",
")",
"if",
"not",
"utils",
".",
"file_exists",
"(",
"out_file",
")",
":",
"with",
"file_transaction",
"(",
"out_file",
")",
"as",
"tx_out_file",
":",
"do",
".",
"run",
"(",
"cmd",
".",
"format",
"(",
"*",
"*",
"locals",
"(",
")",
")",
",",
"\"remove adapter for %s\"",
"%",
"names",
")",
"if",
"utils",
".",
"file_exists",
"(",
"log_out",
")",
":",
"content",
"=",
"open",
"(",
"log_out",
")",
".",
"read",
"(",
")",
".",
"replace",
"(",
"out_short_file",
",",
"names",
")",
"open",
"(",
"log_out",
",",
"'w'",
")",
".",
"write",
"(",
"content",
")",
"if",
"is_4n",
":",
"options",
"=",
"\"-u 4 -u -4\"",
"in_file",
"=",
"append_stem",
"(",
"tx_out_file",
",",
"\".tmp\"",
")",
"utils",
".",
"move_safe",
"(",
"tx_out_file",
",",
"in_file",
")",
"cmd",
"=",
"\"{atropos} {cores} {options} -se {in_file} -o {tx_out_file} -m 17\"",
"do",
".",
"run",
"(",
"cmd",
".",
"format",
"(",
"*",
"*",
"locals",
"(",
")",
")",
",",
"\"atropos with this parameters %s for %s\"",
"%",
"(",
"options",
",",
"names",
")",
")",
"data",
"[",
"\"log_trimming\"",
"]",
"=",
"log_out",
"else",
":",
"if",
"not",
"trim_reads",
":",
"logger",
".",
"debug",
"(",
"\"Skip trimming for: %s\"",
"%",
"names",
")",
"elif",
"not",
"adapters",
":",
"logger",
".",
"info",
"(",
"\"No adapter founds in %s, this is an issue related\"",
"\" to no small RNA enrichment in your sample.\"",
"%",
"names",
")",
"symlink_plus",
"(",
"in_file",
",",
"out_file",
")",
"data",
"[",
"\"files\"",
"]",
"[",
"0",
"]",
"=",
"out_file",
"data",
"[",
"\"clean_fastq\"",
"]",
"=",
"out_file",
"data",
"[",
"\"collapse\"",
"]",
"=",
"_collapse",
"(",
"data",
"[",
"\"clean_fastq\"",
"]",
")",
"data",
"[",
"\"size_stats\"",
"]",
"=",
"_summary",
"(",
"data",
"[",
"'collapse'",
"]",
")",
"return",
"[",
"[",
"data",
"]",
"]"
] | Remove 3' adapter for smallRNA-seq
Uses cutadapt but with different parameters than for other pipelines. | [
"Remove",
"3",
"adapter",
"for",
"smallRNA",
"-",
"seq",
"Uses",
"cutadapt",
"but",
"with",
"different",
"parameters",
"than",
"for",
"other",
"pipelines",
"."
] | python | train |
f3at/feat | src/feat/common/error.py | https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/src/feat/common/error.py#L49-L62 | def print_errors(function):
"""Prints the exceptions raised by the decorated function
without interfering. For debugging purpose."""
def wrapper(*args, **kwargs):
try:
return function(*args, **kwargs)
except BaseException as e:
print ("Exception raise calling %s: %s"
% (reflect.canonical_name(function),
get_exception_message(e)))
raise
return wrapper | [
"def",
"print_errors",
"(",
"function",
")",
":",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"return",
"function",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"BaseException",
"as",
"e",
":",
"print",
"(",
"\"Exception raise calling %s: %s\"",
"%",
"(",
"reflect",
".",
"canonical_name",
"(",
"function",
")",
",",
"get_exception_message",
"(",
"e",
")",
")",
")",
"raise",
"return",
"wrapper"
] | Prints the exceptions raised by the decorated function
without interfering. For debugging purpose. | [
"Prints",
"the",
"exceptions",
"raised",
"by",
"the",
"decorated",
"function",
"without",
"interfering",
".",
"For",
"debugging",
"purpose",
"."
] | python | train |
Becksteinlab/GromacsWrapper | gromacs/cbook.py | https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/cbook.py#L1154-L1164 | def parse_groups(output):
"""Parse ``make_ndx`` output and return groups as a list of dicts."""
groups = []
for line in output.split('\n'):
m = NDXGROUP.match(line)
if m:
d = m.groupdict()
groups.append({'name': d['GROUPNAME'],
'nr': int(d['GROUPNUMBER']),
'natoms': int(d['NATOMS'])})
return groups | [
"def",
"parse_groups",
"(",
"output",
")",
":",
"groups",
"=",
"[",
"]",
"for",
"line",
"in",
"output",
".",
"split",
"(",
"'\\n'",
")",
":",
"m",
"=",
"NDXGROUP",
".",
"match",
"(",
"line",
")",
"if",
"m",
":",
"d",
"=",
"m",
".",
"groupdict",
"(",
")",
"groups",
".",
"append",
"(",
"{",
"'name'",
":",
"d",
"[",
"'GROUPNAME'",
"]",
",",
"'nr'",
":",
"int",
"(",
"d",
"[",
"'GROUPNUMBER'",
"]",
")",
",",
"'natoms'",
":",
"int",
"(",
"d",
"[",
"'NATOMS'",
"]",
")",
"}",
")",
"return",
"groups"
] | Parse ``make_ndx`` output and return groups as a list of dicts. | [
"Parse",
"make_ndx",
"output",
"and",
"return",
"groups",
"as",
"a",
"list",
"of",
"dicts",
"."
] | python | valid |
saltstack/salt | salt/modules/supervisord.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/supervisord.py#L366-L385 | def _read_config(conf_file=None):
'''
Reads the config file using configparser
'''
if conf_file is None:
paths = ('/etc/supervisor/supervisord.conf', '/etc/supervisord.conf')
for path in paths:
if os.path.exists(path):
conf_file = path
break
if conf_file is None:
raise CommandExecutionError('No suitable config file found')
config = configparser.ConfigParser()
try:
config.read(conf_file)
except (IOError, OSError) as exc:
raise CommandExecutionError(
'Unable to read from {0}: {1}'.format(conf_file, exc)
)
return config | [
"def",
"_read_config",
"(",
"conf_file",
"=",
"None",
")",
":",
"if",
"conf_file",
"is",
"None",
":",
"paths",
"=",
"(",
"'/etc/supervisor/supervisord.conf'",
",",
"'/etc/supervisord.conf'",
")",
"for",
"path",
"in",
"paths",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"conf_file",
"=",
"path",
"break",
"if",
"conf_file",
"is",
"None",
":",
"raise",
"CommandExecutionError",
"(",
"'No suitable config file found'",
")",
"config",
"=",
"configparser",
".",
"ConfigParser",
"(",
")",
"try",
":",
"config",
".",
"read",
"(",
"conf_file",
")",
"except",
"(",
"IOError",
",",
"OSError",
")",
"as",
"exc",
":",
"raise",
"CommandExecutionError",
"(",
"'Unable to read from {0}: {1}'",
".",
"format",
"(",
"conf_file",
",",
"exc",
")",
")",
"return",
"config"
] | Reads the config file using configparser | [
"Reads",
"the",
"config",
"file",
"using",
"configparser"
] | python | train |
koszullab/metaTOR | metator/scripts/hicstuff.py | https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L361-L386 | def trim_sparse(M, n_std=3, s_min=None, s_max=None):
"""Apply the trimming procedure to a sparse matrix.
"""
try:
from scipy.sparse import coo_matrix
except ImportError as e:
print(str(e))
print("I am peforming dense normalization by default.")
return trim_dense(M.todense())
r = M.tocoo()
sparsity = np.array(r.sum(axis=1)).flatten()
mean = np.mean(sparsity)
std = np.std(sparsity)
if s_min is None:
s_min = mean - n_std * std
if s_max is None:
s_max = mean + n_std * std
f = (sparsity > s_min) * (sparsity < s_max)
indices = [u for u in range(len(r.data)) if f[r.row[u]] and f[r.col[u]]]
rows = np.array([r.row[i] for i in indices])
cols = np.array([r.col[j] for j in indices])
data = np.array([r.data[k] for k in indices])
N = coo_matrix((data, (rows, cols)))
return N | [
"def",
"trim_sparse",
"(",
"M",
",",
"n_std",
"=",
"3",
",",
"s_min",
"=",
"None",
",",
"s_max",
"=",
"None",
")",
":",
"try",
":",
"from",
"scipy",
".",
"sparse",
"import",
"coo_matrix",
"except",
"ImportError",
"as",
"e",
":",
"print",
"(",
"str",
"(",
"e",
")",
")",
"print",
"(",
"\"I am peforming dense normalization by default.\"",
")",
"return",
"trim_dense",
"(",
"M",
".",
"todense",
"(",
")",
")",
"r",
"=",
"M",
".",
"tocoo",
"(",
")",
"sparsity",
"=",
"np",
".",
"array",
"(",
"r",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
")",
".",
"flatten",
"(",
")",
"mean",
"=",
"np",
".",
"mean",
"(",
"sparsity",
")",
"std",
"=",
"np",
".",
"std",
"(",
"sparsity",
")",
"if",
"s_min",
"is",
"None",
":",
"s_min",
"=",
"mean",
"-",
"n_std",
"*",
"std",
"if",
"s_max",
"is",
"None",
":",
"s_max",
"=",
"mean",
"+",
"n_std",
"*",
"std",
"f",
"=",
"(",
"sparsity",
">",
"s_min",
")",
"*",
"(",
"sparsity",
"<",
"s_max",
")",
"indices",
"=",
"[",
"u",
"for",
"u",
"in",
"range",
"(",
"len",
"(",
"r",
".",
"data",
")",
")",
"if",
"f",
"[",
"r",
".",
"row",
"[",
"u",
"]",
"]",
"and",
"f",
"[",
"r",
".",
"col",
"[",
"u",
"]",
"]",
"]",
"rows",
"=",
"np",
".",
"array",
"(",
"[",
"r",
".",
"row",
"[",
"i",
"]",
"for",
"i",
"in",
"indices",
"]",
")",
"cols",
"=",
"np",
".",
"array",
"(",
"[",
"r",
".",
"col",
"[",
"j",
"]",
"for",
"j",
"in",
"indices",
"]",
")",
"data",
"=",
"np",
".",
"array",
"(",
"[",
"r",
".",
"data",
"[",
"k",
"]",
"for",
"k",
"in",
"indices",
"]",
")",
"N",
"=",
"coo_matrix",
"(",
"(",
"data",
",",
"(",
"rows",
",",
"cols",
")",
")",
")",
"return",
"N"
] | Apply the trimming procedure to a sparse matrix. | [
"Apply",
"the",
"trimming",
"procedure",
"to",
"a",
"sparse",
"matrix",
"."
] | python | train |
tgbugs/pyontutils | ilxutils/ilxutils/simple_rdflib.py | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/ilxutils/ilxutils/simple_rdflib.py#L129-L163 | def add_annotation(
self,
subj: URIRef,
pred: URIRef,
obj: Union[Literal, URIRef],
a_p: URIRef ,
a_o: Union[Literal, URIRef],
) -> BNode:
""" Adds annotation to rdflib graph.
The annotation axiom will filled in if this is a new annotation for the triple.
Args:
subj: Entity subject to be annotated
pref: Entities Predicate Anchor to be annotated
obj: Entities Object Anchor to be annotated
a_p: Annotation predicate
a_o: Annotation object
Returns:
A BNode which is an address to the location in the RDF graph that is storing the
annotation information.
"""
bnode: BNode = self.triple2annotation_bnode.get( (subj, pred, obj) )
if not bnode:
a_s: BNode = BNode()
self.triple2annotation_bnode[(subj, pred, obj)]: BNode = a_s
self.g.add((a_s, RDF.type, OWL.Axiom))
self.g.add((a_s, OWL.annotatedSource, self.process_subj_or_pred(subj)))
self.g.add((a_s, OWL.annotatedProperty,self.process_subj_or_pred(pred)))
self.g.add((a_s, OWL.annotatedTarget, self.process_obj(obj)))
else:
a_s: BNode = bnode
self.g.add((a_s, self.process_subj_or_pred(a_p), self.process_obj(a_o)))
return bnode | [
"def",
"add_annotation",
"(",
"self",
",",
"subj",
":",
"URIRef",
",",
"pred",
":",
"URIRef",
",",
"obj",
":",
"Union",
"[",
"Literal",
",",
"URIRef",
"]",
",",
"a_p",
":",
"URIRef",
",",
"a_o",
":",
"Union",
"[",
"Literal",
",",
"URIRef",
"]",
",",
")",
"->",
"BNode",
":",
"bnode",
":",
"BNode",
"=",
"self",
".",
"triple2annotation_bnode",
".",
"get",
"(",
"(",
"subj",
",",
"pred",
",",
"obj",
")",
")",
"if",
"not",
"bnode",
":",
"a_s",
":",
"BNode",
"=",
"BNode",
"(",
")",
"self",
".",
"triple2annotation_bnode",
"[",
"(",
"subj",
",",
"pred",
",",
"obj",
")",
"]",
":",
"BNode",
"=",
"a_s",
"self",
".",
"g",
".",
"add",
"(",
"(",
"a_s",
",",
"RDF",
".",
"type",
",",
"OWL",
".",
"Axiom",
")",
")",
"self",
".",
"g",
".",
"add",
"(",
"(",
"a_s",
",",
"OWL",
".",
"annotatedSource",
",",
"self",
".",
"process_subj_or_pred",
"(",
"subj",
")",
")",
")",
"self",
".",
"g",
".",
"add",
"(",
"(",
"a_s",
",",
"OWL",
".",
"annotatedProperty",
",",
"self",
".",
"process_subj_or_pred",
"(",
"pred",
")",
")",
")",
"self",
".",
"g",
".",
"add",
"(",
"(",
"a_s",
",",
"OWL",
".",
"annotatedTarget",
",",
"self",
".",
"process_obj",
"(",
"obj",
")",
")",
")",
"else",
":",
"a_s",
":",
"BNode",
"=",
"bnode",
"self",
".",
"g",
".",
"add",
"(",
"(",
"a_s",
",",
"self",
".",
"process_subj_or_pred",
"(",
"a_p",
")",
",",
"self",
".",
"process_obj",
"(",
"a_o",
")",
")",
")",
"return",
"bnode"
] | Adds annotation to rdflib graph.
The annotation axiom will filled in if this is a new annotation for the triple.
Args:
subj: Entity subject to be annotated
pref: Entities Predicate Anchor to be annotated
obj: Entities Object Anchor to be annotated
a_p: Annotation predicate
a_o: Annotation object
Returns:
A BNode which is an address to the location in the RDF graph that is storing the
annotation information. | [
"Adds",
"annotation",
"to",
"rdflib",
"graph",
"."
] | python | train |
wandb/client | wandb/tensorboard/__init__.py | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/tensorboard/__init__.py#L20-L74 | def patch(save=True, tensorboardX=tensorboardX_loaded):
"""Monkeypatches tensorboard or tensorboardX so that all events are logged to tfevents files and wandb.
We save the tfevents files and graphs to wandb by default.
Arguments:
save, default: True - Passing False will skip sending events.
tensorboardX, default: True if module can be imported - You can override this when calling patch
"""
global Summary, Event
if tensorboardX:
tensorboard_module = "tensorboardX.writer"
if tensorflow_loaded:
wandb.termlog(
"Found TensorboardX and tensorflow, pass tensorboardX=False to patch regular tensorboard.")
from tensorboardX.proto.summary_pb2 import Summary
from tensorboardX.proto.event_pb2 import Event
else:
tensorboard_module = "tensorflow.python.summary.writer.writer"
from tensorflow.summary import Summary, Event
writers = set()
def _add_event(self, event, step, walltime=None):
event.wall_time = time.time() if walltime is None else walltime
if step is not None:
event.step = int(step)
try:
# TensorboardX uses _file_name
if hasattr(self.event_writer._ev_writer, "_file_name"):
name = self.event_writer._ev_writer._file_name
else:
name = self.event_writer._ev_writer.FileName().decode("utf-8")
writers.add(name)
# This is a little hacky, there is a case where the log_dir changes.
# Because the events files will have the same names in sub directories
# we simply overwrite the previous symlink in wandb.save if the log_dir
# changes.
log_dir = os.path.dirname(os.path.commonprefix(list(writers)))
filename = os.path.basename(name)
# Tensorboard loads all tfevents files in a directory and prepends
# their values with the path. Passing namespace to log allows us
# to nest the values in wandb
namespace = name.replace(filename, "").replace(
log_dir, "").strip(os.sep)
if save:
wandb.save(name, base_path=log_dir)
wandb.save(os.path.join(log_dir, "*.pbtxt"),
base_path=log_dir)
log(event, namespace=namespace, step=event.step)
except Exception as e:
wandb.termerror("Unable to log event %s" % e)
# six.reraise(type(e), e, sys.exc_info()[2])
self.event_writer.add_event(event)
writer = wandb.util.get_module(tensorboard_module)
writer.SummaryToEventTransformer._add_event = _add_event | [
"def",
"patch",
"(",
"save",
"=",
"True",
",",
"tensorboardX",
"=",
"tensorboardX_loaded",
")",
":",
"global",
"Summary",
",",
"Event",
"if",
"tensorboardX",
":",
"tensorboard_module",
"=",
"\"tensorboardX.writer\"",
"if",
"tensorflow_loaded",
":",
"wandb",
".",
"termlog",
"(",
"\"Found TensorboardX and tensorflow, pass tensorboardX=False to patch regular tensorboard.\"",
")",
"from",
"tensorboardX",
".",
"proto",
".",
"summary_pb2",
"import",
"Summary",
"from",
"tensorboardX",
".",
"proto",
".",
"event_pb2",
"import",
"Event",
"else",
":",
"tensorboard_module",
"=",
"\"tensorflow.python.summary.writer.writer\"",
"from",
"tensorflow",
".",
"summary",
"import",
"Summary",
",",
"Event",
"writers",
"=",
"set",
"(",
")",
"def",
"_add_event",
"(",
"self",
",",
"event",
",",
"step",
",",
"walltime",
"=",
"None",
")",
":",
"event",
".",
"wall_time",
"=",
"time",
".",
"time",
"(",
")",
"if",
"walltime",
"is",
"None",
"else",
"walltime",
"if",
"step",
"is",
"not",
"None",
":",
"event",
".",
"step",
"=",
"int",
"(",
"step",
")",
"try",
":",
"# TensorboardX uses _file_name",
"if",
"hasattr",
"(",
"self",
".",
"event_writer",
".",
"_ev_writer",
",",
"\"_file_name\"",
")",
":",
"name",
"=",
"self",
".",
"event_writer",
".",
"_ev_writer",
".",
"_file_name",
"else",
":",
"name",
"=",
"self",
".",
"event_writer",
".",
"_ev_writer",
".",
"FileName",
"(",
")",
".",
"decode",
"(",
"\"utf-8\"",
")",
"writers",
".",
"add",
"(",
"name",
")",
"# This is a little hacky, there is a case where the log_dir changes.",
"# Because the events files will have the same names in sub directories",
"# we simply overwrite the previous symlink in wandb.save if the log_dir",
"# changes.",
"log_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"commonprefix",
"(",
"list",
"(",
"writers",
")",
")",
")",
"filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"name",
")",
"# Tensorboard loads all tfevents files in a directory and prepends",
"# their values with the path. Passing namespace to log allows us",
"# to nest the values in wandb",
"namespace",
"=",
"name",
".",
"replace",
"(",
"filename",
",",
"\"\"",
")",
".",
"replace",
"(",
"log_dir",
",",
"\"\"",
")",
".",
"strip",
"(",
"os",
".",
"sep",
")",
"if",
"save",
":",
"wandb",
".",
"save",
"(",
"name",
",",
"base_path",
"=",
"log_dir",
")",
"wandb",
".",
"save",
"(",
"os",
".",
"path",
".",
"join",
"(",
"log_dir",
",",
"\"*.pbtxt\"",
")",
",",
"base_path",
"=",
"log_dir",
")",
"log",
"(",
"event",
",",
"namespace",
"=",
"namespace",
",",
"step",
"=",
"event",
".",
"step",
")",
"except",
"Exception",
"as",
"e",
":",
"wandb",
".",
"termerror",
"(",
"\"Unable to log event %s\"",
"%",
"e",
")",
"# six.reraise(type(e), e, sys.exc_info()[2])",
"self",
".",
"event_writer",
".",
"add_event",
"(",
"event",
")",
"writer",
"=",
"wandb",
".",
"util",
".",
"get_module",
"(",
"tensorboard_module",
")",
"writer",
".",
"SummaryToEventTransformer",
".",
"_add_event",
"=",
"_add_event"
] | Monkeypatches tensorboard or tensorboardX so that all events are logged to tfevents files and wandb.
We save the tfevents files and graphs to wandb by default.
Arguments:
save, default: True - Passing False will skip sending events.
tensorboardX, default: True if module can be imported - You can override this when calling patch | [
"Monkeypatches",
"tensorboard",
"or",
"tensorboardX",
"so",
"that",
"all",
"events",
"are",
"logged",
"to",
"tfevents",
"files",
"and",
"wandb",
".",
"We",
"save",
"the",
"tfevents",
"files",
"and",
"graphs",
"to",
"wandb",
"by",
"default",
"."
] | python | train |
cfobel/clutter-webcam-viewer | clutter_webcam_viewer/warp_control.py | https://github.com/cfobel/clutter-webcam-viewer/blob/b227d2ae02d750194e65c13bcf178550755c3afc/clutter_webcam_viewer/warp_control.py#L15-L55 | def create_ui(self):
'''
Create UI elements and connect signals.
'''
box = Gtk.Box()
rotate_left = Gtk.Button('Rotate left')
rotate_right = Gtk.Button('Rotate right')
flip_horizontal = Gtk.Button('Flip horizontal')
flip_vertical = Gtk.Button('Flip vertical')
reset = Gtk.Button('Reset')
load = Gtk.Button('Load...')
save = Gtk.Button('Save...')
rotate_left.connect('clicked', lambda *args: self.rotate_left())
rotate_right.connect('clicked', lambda *args: self.rotate_right())
flip_horizontal.connect('clicked', lambda *args:
self.flip_horizontal())
flip_vertical.connect('clicked', lambda *args: self.flip_vertical())
reset.connect('clicked', lambda *args: self.reset())
load.connect('clicked', lambda *args: GObject.idle_add(self.load))
save.connect('clicked', lambda *args: GObject.idle_add(self.save))
for b in (rotate_left, rotate_right, flip_horizontal, flip_vertical,
reset, load, save):
box.pack_start(b, False, False, 0)
box.show_all()
self.widget.pack_start(box, False, False, 0)
if self.warp_actor.parent_corners is None:
for b in (rotate_left, rotate_right, flip_horizontal,
flip_vertical, reset, load, save):
b.set_sensitive(False)
def check_init():
if self.warp_actor.parent_corners is not None:
for b in (rotate_left, rotate_right, flip_horizontal,
flip_vertical, reset, load, save):
b.set_sensitive(True)
return False
return True
GObject.timeout_add(100, check_init) | [
"def",
"create_ui",
"(",
"self",
")",
":",
"box",
"=",
"Gtk",
".",
"Box",
"(",
")",
"rotate_left",
"=",
"Gtk",
".",
"Button",
"(",
"'Rotate left'",
")",
"rotate_right",
"=",
"Gtk",
".",
"Button",
"(",
"'Rotate right'",
")",
"flip_horizontal",
"=",
"Gtk",
".",
"Button",
"(",
"'Flip horizontal'",
")",
"flip_vertical",
"=",
"Gtk",
".",
"Button",
"(",
"'Flip vertical'",
")",
"reset",
"=",
"Gtk",
".",
"Button",
"(",
"'Reset'",
")",
"load",
"=",
"Gtk",
".",
"Button",
"(",
"'Load...'",
")",
"save",
"=",
"Gtk",
".",
"Button",
"(",
"'Save...'",
")",
"rotate_left",
".",
"connect",
"(",
"'clicked'",
",",
"lambda",
"*",
"args",
":",
"self",
".",
"rotate_left",
"(",
")",
")",
"rotate_right",
".",
"connect",
"(",
"'clicked'",
",",
"lambda",
"*",
"args",
":",
"self",
".",
"rotate_right",
"(",
")",
")",
"flip_horizontal",
".",
"connect",
"(",
"'clicked'",
",",
"lambda",
"*",
"args",
":",
"self",
".",
"flip_horizontal",
"(",
")",
")",
"flip_vertical",
".",
"connect",
"(",
"'clicked'",
",",
"lambda",
"*",
"args",
":",
"self",
".",
"flip_vertical",
"(",
")",
")",
"reset",
".",
"connect",
"(",
"'clicked'",
",",
"lambda",
"*",
"args",
":",
"self",
".",
"reset",
"(",
")",
")",
"load",
".",
"connect",
"(",
"'clicked'",
",",
"lambda",
"*",
"args",
":",
"GObject",
".",
"idle_add",
"(",
"self",
".",
"load",
")",
")",
"save",
".",
"connect",
"(",
"'clicked'",
",",
"lambda",
"*",
"args",
":",
"GObject",
".",
"idle_add",
"(",
"self",
".",
"save",
")",
")",
"for",
"b",
"in",
"(",
"rotate_left",
",",
"rotate_right",
",",
"flip_horizontal",
",",
"flip_vertical",
",",
"reset",
",",
"load",
",",
"save",
")",
":",
"box",
".",
"pack_start",
"(",
"b",
",",
"False",
",",
"False",
",",
"0",
")",
"box",
".",
"show_all",
"(",
")",
"self",
".",
"widget",
".",
"pack_start",
"(",
"box",
",",
"False",
",",
"False",
",",
"0",
")",
"if",
"self",
".",
"warp_actor",
".",
"parent_corners",
"is",
"None",
":",
"for",
"b",
"in",
"(",
"rotate_left",
",",
"rotate_right",
",",
"flip_horizontal",
",",
"flip_vertical",
",",
"reset",
",",
"load",
",",
"save",
")",
":",
"b",
".",
"set_sensitive",
"(",
"False",
")",
"def",
"check_init",
"(",
")",
":",
"if",
"self",
".",
"warp_actor",
".",
"parent_corners",
"is",
"not",
"None",
":",
"for",
"b",
"in",
"(",
"rotate_left",
",",
"rotate_right",
",",
"flip_horizontal",
",",
"flip_vertical",
",",
"reset",
",",
"load",
",",
"save",
")",
":",
"b",
".",
"set_sensitive",
"(",
"True",
")",
"return",
"False",
"return",
"True",
"GObject",
".",
"timeout_add",
"(",
"100",
",",
"check_init",
")"
] | Create UI elements and connect signals. | [
"Create",
"UI",
"elements",
"and",
"connect",
"signals",
"."
] | python | train |
alpha-xone/xone | xone/utils.py | https://github.com/alpha-xone/xone/blob/68534a30f7f1760b220ba58040be3927f7dfbcf4/xone/utils.py#L469-L509 | def inst_repr(instance, fmt='str', public_only=True):
"""
Generate class instance signature from its __dict__
From python 3.6 dict is ordered and order of attributes will be preserved automatically
Args:
instance: class instance
fmt: ['json', 'str']
public_only: if display public members only
Returns:
str: string or json representation of instance
Examples:
>>> inst_repr(1)
''
>>> class SampleClass(object):
... def __init__(self):
... self.b = 3
... self.a = 4
... self._private_ = 'hidden'
>>>
>>> s = SampleClass()
>>> inst_repr(s)
'{b=3, a=4}'
>>> inst_repr(s, public_only=False)
'{b=3, a=4, _private_=hidden}'
>>> json.loads(inst_repr(s, fmt='json'))
{'b': 3, 'a': 4}
>>> inst_repr(s, fmt='unknown')
''
"""
if not hasattr(instance, '__dict__'): return ''
if public_only: inst_dict = {k: v for k, v in instance.__dict__.items() if k[0] != '_'}
else: inst_dict = instance.__dict__
if fmt == 'json': return json.dumps(inst_dict, indent=2)
elif fmt == 'str': return to_str(inst_dict, public_only=public_only)
return '' | [
"def",
"inst_repr",
"(",
"instance",
",",
"fmt",
"=",
"'str'",
",",
"public_only",
"=",
"True",
")",
":",
"if",
"not",
"hasattr",
"(",
"instance",
",",
"'__dict__'",
")",
":",
"return",
"''",
"if",
"public_only",
":",
"inst_dict",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"instance",
".",
"__dict__",
".",
"items",
"(",
")",
"if",
"k",
"[",
"0",
"]",
"!=",
"'_'",
"}",
"else",
":",
"inst_dict",
"=",
"instance",
".",
"__dict__",
"if",
"fmt",
"==",
"'json'",
":",
"return",
"json",
".",
"dumps",
"(",
"inst_dict",
",",
"indent",
"=",
"2",
")",
"elif",
"fmt",
"==",
"'str'",
":",
"return",
"to_str",
"(",
"inst_dict",
",",
"public_only",
"=",
"public_only",
")",
"return",
"''"
] | Generate class instance signature from its __dict__
From python 3.6 dict is ordered and order of attributes will be preserved automatically
Args:
instance: class instance
fmt: ['json', 'str']
public_only: if display public members only
Returns:
str: string or json representation of instance
Examples:
>>> inst_repr(1)
''
>>> class SampleClass(object):
... def __init__(self):
... self.b = 3
... self.a = 4
... self._private_ = 'hidden'
>>>
>>> s = SampleClass()
>>> inst_repr(s)
'{b=3, a=4}'
>>> inst_repr(s, public_only=False)
'{b=3, a=4, _private_=hidden}'
>>> json.loads(inst_repr(s, fmt='json'))
{'b': 3, 'a': 4}
>>> inst_repr(s, fmt='unknown')
'' | [
"Generate",
"class",
"instance",
"signature",
"from",
"its",
"__dict__",
"From",
"python",
"3",
".",
"6",
"dict",
"is",
"ordered",
"and",
"order",
"of",
"attributes",
"will",
"be",
"preserved",
"automatically"
] | python | train |
tensorflow/tensorboard | tensorboard/backend/event_processing/event_multiplexer.py | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/event_multiplexer.py#L340-L355 | def CompressedHistograms(self, run, tag):
"""Retrieve the compressed histogram events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.CompressedHistogramEvents`.
"""
accumulator = self.GetAccumulator(run)
return accumulator.CompressedHistograms(tag) | [
"def",
"CompressedHistograms",
"(",
"self",
",",
"run",
",",
"tag",
")",
":",
"accumulator",
"=",
"self",
".",
"GetAccumulator",
"(",
"run",
")",
"return",
"accumulator",
".",
"CompressedHistograms",
"(",
"tag",
")"
] | Retrieve the compressed histogram events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.CompressedHistogramEvents`. | [
"Retrieve",
"the",
"compressed",
"histogram",
"events",
"associated",
"with",
"a",
"run",
"and",
"tag",
"."
] | python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.