repo
stringlengths 7
55
| path
stringlengths 4
223
| url
stringlengths 87
315
| code
stringlengths 75
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values | avg_line_len
float64 7.91
980
|
---|---|---|---|---|---|---|---|---|---|
MillionIntegrals/vel | vel/internals/provider.py | https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/internals/provider.py#L54-L57 | def resolve_and_call(self, func, extra_env=None):
""" Resolve function arguments and call them, possibily filling from the environment """
kwargs = self.resolve_parameters(func, extra_env=extra_env)
return func(**kwargs) | [
"def",
"resolve_and_call",
"(",
"self",
",",
"func",
",",
"extra_env",
"=",
"None",
")",
":",
"kwargs",
"=",
"self",
".",
"resolve_parameters",
"(",
"func",
",",
"extra_env",
"=",
"extra_env",
")",
"return",
"func",
"(",
"*",
"*",
"kwargs",
")"
]
| Resolve function arguments and call them, possibily filling from the environment | [
"Resolve",
"function",
"arguments",
"and",
"call",
"them",
"possibily",
"filling",
"from",
"the",
"environment"
]
| python | train | 60.25 |
sunt05/SuPy | src/supy/supy_util.py | https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/src/supy/supy_util.py#L140-L174 | def gen_FS_DF(df_output):
"""generate DataFrame of scores.
Parameters
----------
df_WS_data : type
Description of parameter `df_WS_data`.
Returns
-------
type
Description of returned object.
"""
df_day = pd.pivot_table(
df_output,
values=['T2', 'U10', 'Kdown', 'RH2'],
index=['Year', 'Month', 'Day'],
aggfunc=[min, max, np.mean, ])
df_day_all_year = pd.pivot_table(
df_output,
values=['T2', 'U10', 'Kdown', 'RH2'],
index=['Month', 'Day'],
aggfunc=[min, max, np.mean, ])
array_yr_mon = df_day.index.droplevel(
'Day').to_frame().drop_duplicates().values
df_fs = pd.DataFrame(
{(yr, mon):
(df_day.loc[(yr, mon)].apply(gen_score_ser) -
df_day_all_year.loc[mon].apply(gen_score_ser)).abs().mean()
for yr, mon in array_yr_mon})
return df_fs | [
"def",
"gen_FS_DF",
"(",
"df_output",
")",
":",
"df_day",
"=",
"pd",
".",
"pivot_table",
"(",
"df_output",
",",
"values",
"=",
"[",
"'T2'",
",",
"'U10'",
",",
"'Kdown'",
",",
"'RH2'",
"]",
",",
"index",
"=",
"[",
"'Year'",
",",
"'Month'",
",",
"'Day'",
"]",
",",
"aggfunc",
"=",
"[",
"min",
",",
"max",
",",
"np",
".",
"mean",
",",
"]",
")",
"df_day_all_year",
"=",
"pd",
".",
"pivot_table",
"(",
"df_output",
",",
"values",
"=",
"[",
"'T2'",
",",
"'U10'",
",",
"'Kdown'",
",",
"'RH2'",
"]",
",",
"index",
"=",
"[",
"'Month'",
",",
"'Day'",
"]",
",",
"aggfunc",
"=",
"[",
"min",
",",
"max",
",",
"np",
".",
"mean",
",",
"]",
")",
"array_yr_mon",
"=",
"df_day",
".",
"index",
".",
"droplevel",
"(",
"'Day'",
")",
".",
"to_frame",
"(",
")",
".",
"drop_duplicates",
"(",
")",
".",
"values",
"df_fs",
"=",
"pd",
".",
"DataFrame",
"(",
"{",
"(",
"yr",
",",
"mon",
")",
":",
"(",
"df_day",
".",
"loc",
"[",
"(",
"yr",
",",
"mon",
")",
"]",
".",
"apply",
"(",
"gen_score_ser",
")",
"-",
"df_day_all_year",
".",
"loc",
"[",
"mon",
"]",
".",
"apply",
"(",
"gen_score_ser",
")",
")",
".",
"abs",
"(",
")",
".",
"mean",
"(",
")",
"for",
"yr",
",",
"mon",
"in",
"array_yr_mon",
"}",
")",
"return",
"df_fs"
]
| generate DataFrame of scores.
Parameters
----------
df_WS_data : type
Description of parameter `df_WS_data`.
Returns
-------
type
Description of returned object. | [
"generate",
"DataFrame",
"of",
"scores",
"."
]
| python | train | 25.2 |
Clinical-Genomics/scout | scout/server/blueprints/cases/views.py | https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/server/blueprints/cases/views.py#L767-L782 | def vcf2cytosure(institute_id, case_name, individual_id):
"""Download vcf2cytosure file for individual."""
(display_name, vcf2cytosure) = controllers.vcf2cytosure(store,
institute_id, case_name, individual_id)
outdir = os.path.abspath(os.path.dirname(vcf2cytosure))
filename = os.path.basename(vcf2cytosure)
log.debug("Attempt to deliver file {0} from dir {1}".format(filename, outdir))
attachment_filename = display_name + ".vcf2cytosure.cgh"
return send_from_directory(outdir, filename,
attachment_filename=attachment_filename,
as_attachment=True) | [
"def",
"vcf2cytosure",
"(",
"institute_id",
",",
"case_name",
",",
"individual_id",
")",
":",
"(",
"display_name",
",",
"vcf2cytosure",
")",
"=",
"controllers",
".",
"vcf2cytosure",
"(",
"store",
",",
"institute_id",
",",
"case_name",
",",
"individual_id",
")",
"outdir",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"vcf2cytosure",
")",
")",
"filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"vcf2cytosure",
")",
"log",
".",
"debug",
"(",
"\"Attempt to deliver file {0} from dir {1}\"",
".",
"format",
"(",
"filename",
",",
"outdir",
")",
")",
"attachment_filename",
"=",
"display_name",
"+",
"\".vcf2cytosure.cgh\"",
"return",
"send_from_directory",
"(",
"outdir",
",",
"filename",
",",
"attachment_filename",
"=",
"attachment_filename",
",",
"as_attachment",
"=",
"True",
")"
]
| Download vcf2cytosure file for individual. | [
"Download",
"vcf2cytosure",
"file",
"for",
"individual",
"."
]
| python | test | 36.9375 |
Azure/azure-cli-extensions | src/aks-preview/azext_aks_preview/_completers.py | https://github.com/Azure/azure-cli-extensions/blob/3d4854205b0f0d882f688cfa12383d14506c2e35/src/aks-preview/azext_aks_preview/_completers.py#L14-L18 | def get_k8s_upgrades_completion_list(cmd, prefix, namespace, **kwargs): # pylint: disable=unused-argument
"""Return Kubernetes versions available for upgrading an existing cluster."""
resource_group = getattr(namespace, 'resource_group_name', None)
name = getattr(namespace, 'name', None)
return get_k8s_upgrades(cmd.cli_ctx, resource_group, name) if resource_group and name else None | [
"def",
"get_k8s_upgrades_completion_list",
"(",
"cmd",
",",
"prefix",
",",
"namespace",
",",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable=unused-argument",
"resource_group",
"=",
"getattr",
"(",
"namespace",
",",
"'resource_group_name'",
",",
"None",
")",
"name",
"=",
"getattr",
"(",
"namespace",
",",
"'name'",
",",
"None",
")",
"return",
"get_k8s_upgrades",
"(",
"cmd",
".",
"cli_ctx",
",",
"resource_group",
",",
"name",
")",
"if",
"resource_group",
"and",
"name",
"else",
"None"
]
| Return Kubernetes versions available for upgrading an existing cluster. | [
"Return",
"Kubernetes",
"versions",
"available",
"for",
"upgrading",
"an",
"existing",
"cluster",
"."
]
| python | train | 79.4 |
tkaemming/django-subdomains | subdomains/middleware.py | https://github.com/tkaemming/django-subdomains/blob/be6cc1c556a2007287ef4e647ea1784cf7690a44/subdomains/middleware.py#L51-L66 | def process_request(self, request):
"""
Sets the current request's ``urlconf`` attribute to the urlconf
associated with the subdomain, if it is listed in
``settings.SUBDOMAIN_URLCONFS``.
"""
super(SubdomainURLRoutingMiddleware, self).process_request(request)
subdomain = getattr(request, 'subdomain', UNSET)
if subdomain is not UNSET:
urlconf = settings.SUBDOMAIN_URLCONFS.get(subdomain)
if urlconf is not None:
logger.debug("Using urlconf %s for subdomain: %s",
repr(urlconf), repr(subdomain))
request.urlconf = urlconf | [
"def",
"process_request",
"(",
"self",
",",
"request",
")",
":",
"super",
"(",
"SubdomainURLRoutingMiddleware",
",",
"self",
")",
".",
"process_request",
"(",
"request",
")",
"subdomain",
"=",
"getattr",
"(",
"request",
",",
"'subdomain'",
",",
"UNSET",
")",
"if",
"subdomain",
"is",
"not",
"UNSET",
":",
"urlconf",
"=",
"settings",
".",
"SUBDOMAIN_URLCONFS",
".",
"get",
"(",
"subdomain",
")",
"if",
"urlconf",
"is",
"not",
"None",
":",
"logger",
".",
"debug",
"(",
"\"Using urlconf %s for subdomain: %s\"",
",",
"repr",
"(",
"urlconf",
")",
",",
"repr",
"(",
"subdomain",
")",
")",
"request",
".",
"urlconf",
"=",
"urlconf"
]
| Sets the current request's ``urlconf`` attribute to the urlconf
associated with the subdomain, if it is listed in
``settings.SUBDOMAIN_URLCONFS``. | [
"Sets",
"the",
"current",
"request",
"s",
"urlconf",
"attribute",
"to",
"the",
"urlconf",
"associated",
"with",
"the",
"subdomain",
"if",
"it",
"is",
"listed",
"in",
"settings",
".",
"SUBDOMAIN_URLCONFS",
"."
]
| python | train | 40.4375 |
lark-parser/lark | examples/standalone/json_parser.py | https://github.com/lark-parser/lark/blob/a798dec77907e74520dd7e90c7b6a4acc680633a/examples/standalone/json_parser.py#L1793-L1808 | def open(cls, grammar_filename, rel_to=None, **options):
"""Create an instance of Lark with the grammar given by its filename
If rel_to is provided, the function will find the grammar filename in relation to it.
Example:
>>> Lark.open("grammar_file.lark", rel_to=__file__, parser="lalr")
Lark(...)
"""
if rel_to:
basepath = os.path.dirname(rel_to)
grammar_filename = os.path.join(basepath, grammar_filename)
with open(grammar_filename, encoding='utf8') as f:
return cls(f, **options) | [
"def",
"open",
"(",
"cls",
",",
"grammar_filename",
",",
"rel_to",
"=",
"None",
",",
"*",
"*",
"options",
")",
":",
"if",
"rel_to",
":",
"basepath",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"rel_to",
")",
"grammar_filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"basepath",
",",
"grammar_filename",
")",
"with",
"open",
"(",
"grammar_filename",
",",
"encoding",
"=",
"'utf8'",
")",
"as",
"f",
":",
"return",
"cls",
"(",
"f",
",",
"*",
"*",
"options",
")"
]
| Create an instance of Lark with the grammar given by its filename
If rel_to is provided, the function will find the grammar filename in relation to it.
Example:
>>> Lark.open("grammar_file.lark", rel_to=__file__, parser="lalr")
Lark(...) | [
"Create",
"an",
"instance",
"of",
"Lark",
"with",
"the",
"grammar",
"given",
"by",
"its",
"filename"
]
| python | train | 36.25 |
20tab/twentytab-tree | tree/menu.py | https://github.com/20tab/twentytab-tree/blob/f2c1ced33e6c211bb52a25a7d48155e39fbdc088/tree/menu.py#L138-L142 | def as_p(self, show_leaf=True, current_linkable=False, class_current="active_link"):
"""
It returns breadcrumb as p
"""
return self.__do_menu("as_p", show_leaf, current_linkable, class_current) | [
"def",
"as_p",
"(",
"self",
",",
"show_leaf",
"=",
"True",
",",
"current_linkable",
"=",
"False",
",",
"class_current",
"=",
"\"active_link\"",
")",
":",
"return",
"self",
".",
"__do_menu",
"(",
"\"as_p\"",
",",
"show_leaf",
",",
"current_linkable",
",",
"class_current",
")"
]
| It returns breadcrumb as p | [
"It",
"returns",
"breadcrumb",
"as",
"p"
]
| python | train | 44.2 |
dbrattli/OSlash | oslash/list.py | https://github.com/dbrattli/OSlash/blob/ffdc714c5d454f7519f740254de89f70850929eb/oslash/list.py#L80-L85 | def bind(self, fn: Callable[[Any], 'List']) -> 'List':
"""Flatten and map the List.
Haskell: xs >>= f = concat (map f xs)
"""
return List.concat(self.map(fn)) | [
"def",
"bind",
"(",
"self",
",",
"fn",
":",
"Callable",
"[",
"[",
"Any",
"]",
",",
"'List'",
"]",
")",
"->",
"'List'",
":",
"return",
"List",
".",
"concat",
"(",
"self",
".",
"map",
"(",
"fn",
")",
")"
]
| Flatten and map the List.
Haskell: xs >>= f = concat (map f xs) | [
"Flatten",
"and",
"map",
"the",
"List",
"."
]
| python | train | 31 |
saltstack/salt | salt/modules/vagrant.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/vagrant.py#L94-L109 | def get_vm_info(name):
'''
get the information for a VM.
:param name: salt_id name
:return: dictionary of {'machine': x, 'cwd': y, ...}.
'''
try:
vm_ = __utils__['sdb.sdb_get'](_build_sdb_uri(name), __opts__)
except KeyError:
raise SaltInvocationError(
'Probable sdb driver not found. Check your configuration.')
if vm_ is None or 'machine' not in vm_:
raise SaltInvocationError(
'No Vagrant machine defined for Salt_id {}'.format(name))
return vm_ | [
"def",
"get_vm_info",
"(",
"name",
")",
":",
"try",
":",
"vm_",
"=",
"__utils__",
"[",
"'sdb.sdb_get'",
"]",
"(",
"_build_sdb_uri",
"(",
"name",
")",
",",
"__opts__",
")",
"except",
"KeyError",
":",
"raise",
"SaltInvocationError",
"(",
"'Probable sdb driver not found. Check your configuration.'",
")",
"if",
"vm_",
"is",
"None",
"or",
"'machine'",
"not",
"in",
"vm_",
":",
"raise",
"SaltInvocationError",
"(",
"'No Vagrant machine defined for Salt_id {}'",
".",
"format",
"(",
"name",
")",
")",
"return",
"vm_"
]
| get the information for a VM.
:param name: salt_id name
:return: dictionary of {'machine': x, 'cwd': y, ...}. | [
"get",
"the",
"information",
"for",
"a",
"VM",
"."
]
| python | train | 32.375 |
gem/oq-engine | openquake/calculators/base.py | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/calculators/base.py#L368-L374 | def rtree_filter(self):
"""
:returns: an RtreeFilter
"""
return RtreeFilter(self.src_filter.sitecol,
self.oqparam.maximum_distance,
self.src_filter.filename) | [
"def",
"rtree_filter",
"(",
"self",
")",
":",
"return",
"RtreeFilter",
"(",
"self",
".",
"src_filter",
".",
"sitecol",
",",
"self",
".",
"oqparam",
".",
"maximum_distance",
",",
"self",
".",
"src_filter",
".",
"filename",
")"
]
| :returns: an RtreeFilter | [
":",
"returns",
":",
"an",
"RtreeFilter"
]
| python | train | 33.857143 |
josiah-wolf-oberholtzer/uqbar | uqbar/objects.py | https://github.com/josiah-wolf-oberholtzer/uqbar/blob/eca7fefebbbee1e2ae13bf5d6baa838be66b1db6/uqbar/objects.py#L87-L173 | def get_repr(expr, multiline=False):
"""
Build a repr string for ``expr`` from its vars and signature.
::
>>> class MyObject:
... def __init__(self, arg1, arg2, *var_args, foo=None, bar=None, **kwargs):
... self.arg1 = arg1
... self.arg2 = arg2
... self.var_args = var_args
... self.foo = foo
... self.bar = bar
... self.kwargs = kwargs
...
>>> my_object = MyObject('a', 'b', 'c', 'd', foo='x', quux=['y', 'z'])
::
>>> import uqbar
>>> print(uqbar.objects.get_repr(my_object))
MyObject(
'a',
'b',
'c',
'd',
foo='x',
quux=['y', 'z'],
)
"""
signature = _get_object_signature(expr)
if signature is None:
return "{}()".format(type(expr).__name__)
defaults = {}
for name, parameter in signature.parameters.items():
if parameter.default is not inspect._empty:
defaults[name] = parameter.default
args, var_args, kwargs = get_vars(expr)
args_parts = collections.OrderedDict()
var_args_parts = []
kwargs_parts = {}
has_lines = multiline
parts = []
# Format keyword-optional arguments.
# print(type(expr), args)
for i, (key, value) in enumerate(args.items()):
arg_repr = _dispatch_formatting(value)
if "\n" in arg_repr:
has_lines = True
args_parts[key] = arg_repr
# Format *args
for arg in var_args:
arg_repr = _dispatch_formatting(arg)
if "\n" in arg_repr:
has_lines = True
var_args_parts.append(arg_repr)
# Format **kwargs
for key, value in sorted(kwargs.items()):
if key in defaults and value == defaults[key]:
continue
value = _dispatch_formatting(value)
arg_repr = "{}={}".format(key, value)
has_lines = True
kwargs_parts[key] = arg_repr
for _, part in args_parts.items():
parts.append(part)
parts.extend(var_args_parts)
for _, part in sorted(kwargs_parts.items()):
parts.append(part)
# If we should format on multiple lines, add the appropriate formatting.
if has_lines and parts:
for i, part in enumerate(parts):
parts[i] = "\n".join(" " + line for line in part.split("\n"))
parts.append(" )")
parts = ",\n".join(parts)
return "{}(\n{}".format(type(expr).__name__, parts)
parts = ", ".join(parts)
return "{}({})".format(type(expr).__name__, parts) | [
"def",
"get_repr",
"(",
"expr",
",",
"multiline",
"=",
"False",
")",
":",
"signature",
"=",
"_get_object_signature",
"(",
"expr",
")",
"if",
"signature",
"is",
"None",
":",
"return",
"\"{}()\"",
".",
"format",
"(",
"type",
"(",
"expr",
")",
".",
"__name__",
")",
"defaults",
"=",
"{",
"}",
"for",
"name",
",",
"parameter",
"in",
"signature",
".",
"parameters",
".",
"items",
"(",
")",
":",
"if",
"parameter",
".",
"default",
"is",
"not",
"inspect",
".",
"_empty",
":",
"defaults",
"[",
"name",
"]",
"=",
"parameter",
".",
"default",
"args",
",",
"var_args",
",",
"kwargs",
"=",
"get_vars",
"(",
"expr",
")",
"args_parts",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"var_args_parts",
"=",
"[",
"]",
"kwargs_parts",
"=",
"{",
"}",
"has_lines",
"=",
"multiline",
"parts",
"=",
"[",
"]",
"# Format keyword-optional arguments.",
"# print(type(expr), args)",
"for",
"i",
",",
"(",
"key",
",",
"value",
")",
"in",
"enumerate",
"(",
"args",
".",
"items",
"(",
")",
")",
":",
"arg_repr",
"=",
"_dispatch_formatting",
"(",
"value",
")",
"if",
"\"\\n\"",
"in",
"arg_repr",
":",
"has_lines",
"=",
"True",
"args_parts",
"[",
"key",
"]",
"=",
"arg_repr",
"# Format *args",
"for",
"arg",
"in",
"var_args",
":",
"arg_repr",
"=",
"_dispatch_formatting",
"(",
"arg",
")",
"if",
"\"\\n\"",
"in",
"arg_repr",
":",
"has_lines",
"=",
"True",
"var_args_parts",
".",
"append",
"(",
"arg_repr",
")",
"# Format **kwargs",
"for",
"key",
",",
"value",
"in",
"sorted",
"(",
"kwargs",
".",
"items",
"(",
")",
")",
":",
"if",
"key",
"in",
"defaults",
"and",
"value",
"==",
"defaults",
"[",
"key",
"]",
":",
"continue",
"value",
"=",
"_dispatch_formatting",
"(",
"value",
")",
"arg_repr",
"=",
"\"{}={}\"",
".",
"format",
"(",
"key",
",",
"value",
")",
"has_lines",
"=",
"True",
"kwargs_parts",
"[",
"key",
"]",
"=",
"arg_repr",
"for",
"_",
",",
"part",
"in",
"args_parts",
".",
"items",
"(",
")",
":",
"parts",
".",
"append",
"(",
"part",
")",
"parts",
".",
"extend",
"(",
"var_args_parts",
")",
"for",
"_",
",",
"part",
"in",
"sorted",
"(",
"kwargs_parts",
".",
"items",
"(",
")",
")",
":",
"parts",
".",
"append",
"(",
"part",
")",
"# If we should format on multiple lines, add the appropriate formatting.",
"if",
"has_lines",
"and",
"parts",
":",
"for",
"i",
",",
"part",
"in",
"enumerate",
"(",
"parts",
")",
":",
"parts",
"[",
"i",
"]",
"=",
"\"\\n\"",
".",
"join",
"(",
"\" \"",
"+",
"line",
"for",
"line",
"in",
"part",
".",
"split",
"(",
"\"\\n\"",
")",
")",
"parts",
".",
"append",
"(",
"\" )\"",
")",
"parts",
"=",
"\",\\n\"",
".",
"join",
"(",
"parts",
")",
"return",
"\"{}(\\n{}\"",
".",
"format",
"(",
"type",
"(",
"expr",
")",
".",
"__name__",
",",
"parts",
")",
"parts",
"=",
"\", \"",
".",
"join",
"(",
"parts",
")",
"return",
"\"{}({})\"",
".",
"format",
"(",
"type",
"(",
"expr",
")",
".",
"__name__",
",",
"parts",
")"
]
| Build a repr string for ``expr`` from its vars and signature.
::
>>> class MyObject:
... def __init__(self, arg1, arg2, *var_args, foo=None, bar=None, **kwargs):
... self.arg1 = arg1
... self.arg2 = arg2
... self.var_args = var_args
... self.foo = foo
... self.bar = bar
... self.kwargs = kwargs
...
>>> my_object = MyObject('a', 'b', 'c', 'd', foo='x', quux=['y', 'z'])
::
>>> import uqbar
>>> print(uqbar.objects.get_repr(my_object))
MyObject(
'a',
'b',
'c',
'd',
foo='x',
quux=['y', 'z'],
) | [
"Build",
"a",
"repr",
"string",
"for",
"expr",
"from",
"its",
"vars",
"and",
"signature",
"."
]
| python | train | 29.252874 |
BD2KGenomics/toil-lib | src/toil_lib/files.py | https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/files.py#L40-L51 | def copy_file_job(job, name, file_id, output_dir):
"""
Job version of move_files for one file
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str name: Name of output file (including extension)
:param str file_id: FileStoreID of file
:param str output_dir: Location to place output file
"""
work_dir = job.fileStore.getLocalTempDir()
fpath = job.fileStore.readGlobalFile(file_id, os.path.join(work_dir, name))
copy_files([fpath], output_dir) | [
"def",
"copy_file_job",
"(",
"job",
",",
"name",
",",
"file_id",
",",
"output_dir",
")",
":",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"fpath",
"=",
"job",
".",
"fileStore",
".",
"readGlobalFile",
"(",
"file_id",
",",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"name",
")",
")",
"copy_files",
"(",
"[",
"fpath",
"]",
",",
"output_dir",
")"
]
| Job version of move_files for one file
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str name: Name of output file (including extension)
:param str file_id: FileStoreID of file
:param str output_dir: Location to place output file | [
"Job",
"version",
"of",
"move_files",
"for",
"one",
"file"
]
| python | test | 41.166667 |
saltstack/salt | salt/modules/virt.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/virt.py#L284-L322 | def _parse_qemu_img_info(info):
'''
Parse qemu-img info JSON output into disk infos dictionary
'''
raw_infos = salt.utils.json.loads(info)
disks = []
for disk_infos in raw_infos:
disk = {
'file': disk_infos['filename'],
'file format': disk_infos['format'],
'disk size': disk_infos['actual-size'],
'virtual size': disk_infos['virtual-size'],
'cluster size': disk_infos['cluster-size'] if 'cluster-size' in disk_infos else None,
}
if 'full-backing-filename' in disk_infos.keys():
disk['backing file'] = format(disk_infos['full-backing-filename'])
if 'snapshots' in disk_infos.keys():
disk['snapshots'] = [
{
'id': snapshot['id'],
'tag': snapshot['name'],
'vmsize': snapshot['vm-state-size'],
'date': datetime.datetime.fromtimestamp(
float('{}.{}'.format(snapshot['date-sec'], snapshot['date-nsec']))).isoformat(),
'vmclock': datetime.datetime.utcfromtimestamp(
float('{}.{}'.format(snapshot['vm-clock-sec'],
snapshot['vm-clock-nsec']))).time().isoformat()
} for snapshot in disk_infos['snapshots']]
disks.append(disk)
for disk in disks:
if 'backing file' in disk.keys():
candidates = [info for info in disks if 'file' in info.keys() and info['file'] == disk['backing file']]
if candidates:
disk['backing file'] = candidates[0]
return disks[0] | [
"def",
"_parse_qemu_img_info",
"(",
"info",
")",
":",
"raw_infos",
"=",
"salt",
".",
"utils",
".",
"json",
".",
"loads",
"(",
"info",
")",
"disks",
"=",
"[",
"]",
"for",
"disk_infos",
"in",
"raw_infos",
":",
"disk",
"=",
"{",
"'file'",
":",
"disk_infos",
"[",
"'filename'",
"]",
",",
"'file format'",
":",
"disk_infos",
"[",
"'format'",
"]",
",",
"'disk size'",
":",
"disk_infos",
"[",
"'actual-size'",
"]",
",",
"'virtual size'",
":",
"disk_infos",
"[",
"'virtual-size'",
"]",
",",
"'cluster size'",
":",
"disk_infos",
"[",
"'cluster-size'",
"]",
"if",
"'cluster-size'",
"in",
"disk_infos",
"else",
"None",
",",
"}",
"if",
"'full-backing-filename'",
"in",
"disk_infos",
".",
"keys",
"(",
")",
":",
"disk",
"[",
"'backing file'",
"]",
"=",
"format",
"(",
"disk_infos",
"[",
"'full-backing-filename'",
"]",
")",
"if",
"'snapshots'",
"in",
"disk_infos",
".",
"keys",
"(",
")",
":",
"disk",
"[",
"'snapshots'",
"]",
"=",
"[",
"{",
"'id'",
":",
"snapshot",
"[",
"'id'",
"]",
",",
"'tag'",
":",
"snapshot",
"[",
"'name'",
"]",
",",
"'vmsize'",
":",
"snapshot",
"[",
"'vm-state-size'",
"]",
",",
"'date'",
":",
"datetime",
".",
"datetime",
".",
"fromtimestamp",
"(",
"float",
"(",
"'{}.{}'",
".",
"format",
"(",
"snapshot",
"[",
"'date-sec'",
"]",
",",
"snapshot",
"[",
"'date-nsec'",
"]",
")",
")",
")",
".",
"isoformat",
"(",
")",
",",
"'vmclock'",
":",
"datetime",
".",
"datetime",
".",
"utcfromtimestamp",
"(",
"float",
"(",
"'{}.{}'",
".",
"format",
"(",
"snapshot",
"[",
"'vm-clock-sec'",
"]",
",",
"snapshot",
"[",
"'vm-clock-nsec'",
"]",
")",
")",
")",
".",
"time",
"(",
")",
".",
"isoformat",
"(",
")",
"}",
"for",
"snapshot",
"in",
"disk_infos",
"[",
"'snapshots'",
"]",
"]",
"disks",
".",
"append",
"(",
"disk",
")",
"for",
"disk",
"in",
"disks",
":",
"if",
"'backing file'",
"in",
"disk",
".",
"keys",
"(",
")",
":",
"candidates",
"=",
"[",
"info",
"for",
"info",
"in",
"disks",
"if",
"'file'",
"in",
"info",
".",
"keys",
"(",
")",
"and",
"info",
"[",
"'file'",
"]",
"==",
"disk",
"[",
"'backing file'",
"]",
"]",
"if",
"candidates",
":",
"disk",
"[",
"'backing file'",
"]",
"=",
"candidates",
"[",
"0",
"]",
"return",
"disks",
"[",
"0",
"]"
]
| Parse qemu-img info JSON output into disk infos dictionary | [
"Parse",
"qemu",
"-",
"img",
"info",
"JSON",
"output",
"into",
"disk",
"infos",
"dictionary"
]
| python | train | 44.025641 |
cydrobolt/pifx | pifx/core.py | https://github.com/cydrobolt/pifx/blob/c9de9c2695c3e6e72de4aa0de47b78fc13c457c3/pifx/core.py#L75-L122 | def state_delta(self, selector='all',
power=None, duration=1.0, infrared=None, hue=None,
saturation=None, brightness=None, kelvin=None):
"""Given a state delta, apply the modifications to lights' state
over a given period of time.
selector: required String
The selector to limit which lights are controlled.
power: String
The power state you want to set on the selector. on or off
duration: Double
How long in seconds you want the power action to take.
Range: 0.0 – 3155760000.0 (100 years)
infrared: Double
The maximum brightness of the infrared channel.
hue: Double
Rotate the hue by this angle in degrees.
saturation: Double
Change the saturation by this additive amount; the resulting
saturation is clipped to [0, 1].
brightness: Double
Change the brightness by this additive amount; the resulting
brightness is clipped to [0, 1].
kelvin: Double
Change the kelvin by this additive amount; the resulting kelvin is
clipped to [2500, 9000].
"""
argument_tuples = [
("power", power),
("duration", duration),
("infrared", infrared),
("hue", hue),
("saturation", saturation),
("brightness", brightness),
("kelvin", kelvin)
]
return self.client.perform_request(
method='post', endpoint='lights/{}/state/delta',
endpoint_args=[selector], argument_tuples=argument_tuples) | [
"def",
"state_delta",
"(",
"self",
",",
"selector",
"=",
"'all'",
",",
"power",
"=",
"None",
",",
"duration",
"=",
"1.0",
",",
"infrared",
"=",
"None",
",",
"hue",
"=",
"None",
",",
"saturation",
"=",
"None",
",",
"brightness",
"=",
"None",
",",
"kelvin",
"=",
"None",
")",
":",
"argument_tuples",
"=",
"[",
"(",
"\"power\"",
",",
"power",
")",
",",
"(",
"\"duration\"",
",",
"duration",
")",
",",
"(",
"\"infrared\"",
",",
"infrared",
")",
",",
"(",
"\"hue\"",
",",
"hue",
")",
",",
"(",
"\"saturation\"",
",",
"saturation",
")",
",",
"(",
"\"brightness\"",
",",
"brightness",
")",
",",
"(",
"\"kelvin\"",
",",
"kelvin",
")",
"]",
"return",
"self",
".",
"client",
".",
"perform_request",
"(",
"method",
"=",
"'post'",
",",
"endpoint",
"=",
"'lights/{}/state/delta'",
",",
"endpoint_args",
"=",
"[",
"selector",
"]",
",",
"argument_tuples",
"=",
"argument_tuples",
")"
]
| Given a state delta, apply the modifications to lights' state
over a given period of time.
selector: required String
The selector to limit which lights are controlled.
power: String
The power state you want to set on the selector. on or off
duration: Double
How long in seconds you want the power action to take.
Range: 0.0 – 3155760000.0 (100 years)
infrared: Double
The maximum brightness of the infrared channel.
hue: Double
Rotate the hue by this angle in degrees.
saturation: Double
Change the saturation by this additive amount; the resulting
saturation is clipped to [0, 1].
brightness: Double
Change the brightness by this additive amount; the resulting
brightness is clipped to [0, 1].
kelvin: Double
Change the kelvin by this additive amount; the resulting kelvin is
clipped to [2500, 9000]. | [
"Given",
"a",
"state",
"delta",
"apply",
"the",
"modifications",
"to",
"lights",
"state",
"over",
"a",
"given",
"period",
"of",
"time",
"."
]
| python | train | 33.520833 |
allenai/allennlp | scripts/reformat_text2sql_data.py | https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/scripts/reformat_text2sql_data.py#L38-L91 | def main(output_directory: int, data: str) -> None:
"""
Processes the text2sql data into the following directory structure:
``dataset/{query_split, question_split}/{train,dev,test}.json``
for datasets which have train, dev and test splits, or:
``dataset/{query_split, question_split}/{split_{split_id}}.json``
for datasets which use cross validation.
The JSON format is identical to the original datasets, apart from they
are split into separate files with respect to the split_type. This means that
for the question split, all of the sql data is duplicated for each sentence
which is bucketed together as having the same semantics.
As an example, the following blob would be put "as-is" into the query split
dataset, and split into two datasets with identical blobs for the question split,
differing only in the "sentence" key, where blob1 would end up in the train split
and blob2 would be in the dev split, with the rest of the json duplicated in each.
{
"comments": [],
"old-name": "",
"query-split": "train",
"sentences": [{blob1, "question-split": "train"}, {blob2, "question-split": "dev"}],
"sql": [],
"variables": []
},
Parameters
----------
output_directory : str, required.
The output directory.
data: str, default = None
The path to the data director of https://github.com/jkkummerfeld/text2sql-data.
"""
json_files = glob.glob(os.path.join(data, "*.json"))
for dataset in json_files:
dataset_name = os.path.basename(dataset)[:-5]
print(f"Processing dataset: {dataset} into query and question "
f"splits at output path: {output_directory + '/' + dataset_name}")
full_dataset = json.load(open(dataset))
if not isinstance(full_dataset, list):
full_dataset = [full_dataset]
for split_type in ["query_split", "question_split"]:
dataset_out = os.path.join(output_directory, dataset_name, split_type)
for split, split_dataset in process_dataset(full_dataset, split_type):
dataset_out = os.path.join(output_directory, dataset_name, split_type)
os.makedirs(dataset_out, exist_ok=True)
json.dump(split_dataset, open(os.path.join(dataset_out, split), "w"), indent=4) | [
"def",
"main",
"(",
"output_directory",
":",
"int",
",",
"data",
":",
"str",
")",
"->",
"None",
":",
"json_files",
"=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"data",
",",
"\"*.json\"",
")",
")",
"for",
"dataset",
"in",
"json_files",
":",
"dataset_name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"dataset",
")",
"[",
":",
"-",
"5",
"]",
"print",
"(",
"f\"Processing dataset: {dataset} into query and question \"",
"f\"splits at output path: {output_directory + '/' + dataset_name}\"",
")",
"full_dataset",
"=",
"json",
".",
"load",
"(",
"open",
"(",
"dataset",
")",
")",
"if",
"not",
"isinstance",
"(",
"full_dataset",
",",
"list",
")",
":",
"full_dataset",
"=",
"[",
"full_dataset",
"]",
"for",
"split_type",
"in",
"[",
"\"query_split\"",
",",
"\"question_split\"",
"]",
":",
"dataset_out",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_directory",
",",
"dataset_name",
",",
"split_type",
")",
"for",
"split",
",",
"split_dataset",
"in",
"process_dataset",
"(",
"full_dataset",
",",
"split_type",
")",
":",
"dataset_out",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_directory",
",",
"dataset_name",
",",
"split_type",
")",
"os",
".",
"makedirs",
"(",
"dataset_out",
",",
"exist_ok",
"=",
"True",
")",
"json",
".",
"dump",
"(",
"split_dataset",
",",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dataset_out",
",",
"split",
")",
",",
"\"w\"",
")",
",",
"indent",
"=",
"4",
")"
]
| Processes the text2sql data into the following directory structure:
``dataset/{query_split, question_split}/{train,dev,test}.json``
for datasets which have train, dev and test splits, or:
``dataset/{query_split, question_split}/{split_{split_id}}.json``
for datasets which use cross validation.
The JSON format is identical to the original datasets, apart from they
are split into separate files with respect to the split_type. This means that
for the question split, all of the sql data is duplicated for each sentence
which is bucketed together as having the same semantics.
As an example, the following blob would be put "as-is" into the query split
dataset, and split into two datasets with identical blobs for the question split,
differing only in the "sentence" key, where blob1 would end up in the train split
and blob2 would be in the dev split, with the rest of the json duplicated in each.
{
"comments": [],
"old-name": "",
"query-split": "train",
"sentences": [{blob1, "question-split": "train"}, {blob2, "question-split": "dev"}],
"sql": [],
"variables": []
},
Parameters
----------
output_directory : str, required.
The output directory.
data: str, default = None
The path to the data director of https://github.com/jkkummerfeld/text2sql-data. | [
"Processes",
"the",
"text2sql",
"data",
"into",
"the",
"following",
"directory",
"structure",
":"
]
| python | train | 42.944444 |
cloud9ers/gurumate | environment/lib/python2.7/site-packages/IPython/parallel/client/magics.py | https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/parallel/client/magics.py#L190-L215 | def result(self, line=''):
"""Print the result of the last asynchronous %px command.
This lets you recall the results of %px computations after
asynchronous submission (block=False).
Examples
--------
::
In [23]: %px os.getpid()
Async parallel execution on engine(s): all
In [24]: %pxresult
Out[8:10]: 60920
Out[9:10]: 60921
Out[10:10]: 60922
Out[11:10]: 60923
"""
args = magic_arguments.parse_argstring(self.result, line)
if self.last_result is None:
raise UsageError(NO_LAST_RESULT)
self.last_result.get()
self.last_result.display_outputs(groupby=args.groupby) | [
"def",
"result",
"(",
"self",
",",
"line",
"=",
"''",
")",
":",
"args",
"=",
"magic_arguments",
".",
"parse_argstring",
"(",
"self",
".",
"result",
",",
"line",
")",
"if",
"self",
".",
"last_result",
"is",
"None",
":",
"raise",
"UsageError",
"(",
"NO_LAST_RESULT",
")",
"self",
".",
"last_result",
".",
"get",
"(",
")",
"self",
".",
"last_result",
".",
"display_outputs",
"(",
"groupby",
"=",
"args",
".",
"groupby",
")"
]
| Print the result of the last asynchronous %px command.
This lets you recall the results of %px computations after
asynchronous submission (block=False).
Examples
--------
::
In [23]: %px os.getpid()
Async parallel execution on engine(s): all
In [24]: %pxresult
Out[8:10]: 60920
Out[9:10]: 60921
Out[10:10]: 60922
Out[11:10]: 60923 | [
"Print",
"the",
"result",
"of",
"the",
"last",
"asynchronous",
"%px",
"command",
".",
"This",
"lets",
"you",
"recall",
"the",
"results",
"of",
"%px",
"computations",
"after",
"asynchronous",
"submission",
"(",
"block",
"=",
"False",
")",
"."
]
| python | test | 28.884615 |
saltstack/salt | salt/returners/postgres_local_cache.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/postgres_local_cache.py#L200-L221 | def prep_jid(nocache=False, passed_jid=None):
'''
Return a job id and prepare the job id directory
This is the function responsible for making sure jids don't collide
(unless its passed a jid). So do what you have to do to make sure that
stays the case
'''
conn = _get_conn()
if conn is None:
return None
cur = conn.cursor()
if passed_jid is None:
jid = _gen_jid(cur)
else:
jid = passed_jid
while not jid:
log.info("jid clash, generating a new one")
jid = _gen_jid(cur)
cur.close()
conn.close()
return jid | [
"def",
"prep_jid",
"(",
"nocache",
"=",
"False",
",",
"passed_jid",
"=",
"None",
")",
":",
"conn",
"=",
"_get_conn",
"(",
")",
"if",
"conn",
"is",
"None",
":",
"return",
"None",
"cur",
"=",
"conn",
".",
"cursor",
"(",
")",
"if",
"passed_jid",
"is",
"None",
":",
"jid",
"=",
"_gen_jid",
"(",
"cur",
")",
"else",
":",
"jid",
"=",
"passed_jid",
"while",
"not",
"jid",
":",
"log",
".",
"info",
"(",
"\"jid clash, generating a new one\"",
")",
"jid",
"=",
"_gen_jid",
"(",
"cur",
")",
"cur",
".",
"close",
"(",
")",
"conn",
".",
"close",
"(",
")",
"return",
"jid"
]
| Return a job id and prepare the job id directory
This is the function responsible for making sure jids don't collide
(unless its passed a jid). So do what you have to do to make sure that
stays the case | [
"Return",
"a",
"job",
"id",
"and",
"prepare",
"the",
"job",
"id",
"directory",
"This",
"is",
"the",
"function",
"responsible",
"for",
"making",
"sure",
"jids",
"don",
"t",
"collide",
"(",
"unless",
"its",
"passed",
"a",
"jid",
")",
".",
"So",
"do",
"what",
"you",
"have",
"to",
"do",
"to",
"make",
"sure",
"that",
"stays",
"the",
"case"
]
| python | train | 26.590909 |
python-bugzilla/python-bugzilla | bugzilla/base.py | https://github.com/python-bugzilla/python-bugzilla/blob/7de8b225104f24a1eee3e837bf1e02d60aefe69f/bugzilla/base.py#L658-L682 | def logged_in(self):
"""
This is True if this instance is logged in else False.
We test if this session is authenticated by calling the User.get()
XMLRPC method with ids set. Logged-out users cannot pass the 'ids'
parameter and will result in a 505 error. If we tried to login with a
token, but the token was incorrect or expired, the server returns a
32000 error.
For Bugzilla 5 and later, a new method, User.valid_login is available
to test the validity of the token. However, this will require that the
username be cached along with the token in order to work effectively in
all scenarios and is not currently used. For more information, refer to
the following url.
http://bugzilla.readthedocs.org/en/latest/api/core/v1/user.html#valid-login
"""
try:
self._proxy.User.get({'ids': []})
return True
except Fault as e:
if e.faultCode == 505 or e.faultCode == 32000:
return False
raise e | [
"def",
"logged_in",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"_proxy",
".",
"User",
".",
"get",
"(",
"{",
"'ids'",
":",
"[",
"]",
"}",
")",
"return",
"True",
"except",
"Fault",
"as",
"e",
":",
"if",
"e",
".",
"faultCode",
"==",
"505",
"or",
"e",
".",
"faultCode",
"==",
"32000",
":",
"return",
"False",
"raise",
"e"
]
| This is True if this instance is logged in else False.
We test if this session is authenticated by calling the User.get()
XMLRPC method with ids set. Logged-out users cannot pass the 'ids'
parameter and will result in a 505 error. If we tried to login with a
token, but the token was incorrect or expired, the server returns a
32000 error.
For Bugzilla 5 and later, a new method, User.valid_login is available
to test the validity of the token. However, this will require that the
username be cached along with the token in order to work effectively in
all scenarios and is not currently used. For more information, refer to
the following url.
http://bugzilla.readthedocs.org/en/latest/api/core/v1/user.html#valid-login | [
"This",
"is",
"True",
"if",
"this",
"instance",
"is",
"logged",
"in",
"else",
"False",
"."
]
| python | train | 42.28 |
mdiener/grace | grace/py27/slimit/parser.py | https://github.com/mdiener/grace/blob/2dab13a2cf636da5da989904c5885166fc94d36d/grace/py27/slimit/parser.py#L1153-L1155 | def p_catch(self, p):
"""catch : CATCH LPAREN identifier RPAREN block"""
p[0] = ast.Catch(identifier=p[3], elements=p[5]) | [
"def",
"p_catch",
"(",
"self",
",",
"p",
")",
":",
"p",
"[",
"0",
"]",
"=",
"ast",
".",
"Catch",
"(",
"identifier",
"=",
"p",
"[",
"3",
"]",
",",
"elements",
"=",
"p",
"[",
"5",
"]",
")"
]
| catch : CATCH LPAREN identifier RPAREN block | [
"catch",
":",
"CATCH",
"LPAREN",
"identifier",
"RPAREN",
"block"
]
| python | train | 45 |
ionelmc/python-cogen | cogen/core/proactors/base.py | https://github.com/ionelmc/python-cogen/blob/83b0edb88425eba6e5bfda9f1dcd34642517e2a8/cogen/core/proactors/base.py#L285-L295 | def yield_event(self, act):
"""
Hande completion for a request and return an (op, coro) to be
passed to the scheduler on the last completion loop of a proactor.
"""
if act in self.tokens:
coro = act.coro
op = self.try_run_act(act, self.tokens[act])
if op:
del self.tokens[act]
return op, coro | [
"def",
"yield_event",
"(",
"self",
",",
"act",
")",
":",
"if",
"act",
"in",
"self",
".",
"tokens",
":",
"coro",
"=",
"act",
".",
"coro",
"op",
"=",
"self",
".",
"try_run_act",
"(",
"act",
",",
"self",
".",
"tokens",
"[",
"act",
"]",
")",
"if",
"op",
":",
"del",
"self",
".",
"tokens",
"[",
"act",
"]",
"return",
"op",
",",
"coro"
]
| Hande completion for a request and return an (op, coro) to be
passed to the scheduler on the last completion loop of a proactor. | [
"Hande",
"completion",
"for",
"a",
"request",
"and",
"return",
"an",
"(",
"op",
"coro",
")",
"to",
"be",
"passed",
"to",
"the",
"scheduler",
"on",
"the",
"last",
"completion",
"loop",
"of",
"a",
"proactor",
"."
]
| python | train | 36.363636 |
ehansis/ozelot | examples/superheroes/superheroes/pipeline.py | https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/examples/superheroes/superheroes/pipeline.py#L123-L157 | def get_attribute_data(doc):
"""Helper function: parse attribute data from a wiki html doc
Args:
doc (document parsed with lxml.html): parsed wiki page
Returns:
dict: attributes values and listed links, format ``{<key>: {'value': <value>, 'link': <link>}}``;
only the first hyperlink listed in each attribute value is included
"""
attributes = dict()
for attribute_node in doc.xpath("//div[contains(@class, 'pi-data ')]"):
# label node
node = attribute_node.xpath(".//*[contains(@class, 'pi-data-label')]")[0]
label = " ".join(node.itertext()).strip()
# value node
node = attribute_node.xpath(".//*[contains(@class, 'pi-data-value')]")[0]
# get value, first link, and the link text
value = " ".join(node.itertext()).strip()
link_node = node.find('a')
if link_node is not None:
link = link_node.get('href')
link_text = link_node.text
else:
link = None
link_text = None
# store result
attributes[label] = dict(value=value,
link=link,
link_text=link_text)
return attributes | [
"def",
"get_attribute_data",
"(",
"doc",
")",
":",
"attributes",
"=",
"dict",
"(",
")",
"for",
"attribute_node",
"in",
"doc",
".",
"xpath",
"(",
"\"//div[contains(@class, 'pi-data ')]\"",
")",
":",
"# label node",
"node",
"=",
"attribute_node",
".",
"xpath",
"(",
"\".//*[contains(@class, 'pi-data-label')]\"",
")",
"[",
"0",
"]",
"label",
"=",
"\" \"",
".",
"join",
"(",
"node",
".",
"itertext",
"(",
")",
")",
".",
"strip",
"(",
")",
"# value node",
"node",
"=",
"attribute_node",
".",
"xpath",
"(",
"\".//*[contains(@class, 'pi-data-value')]\"",
")",
"[",
"0",
"]",
"# get value, first link, and the link text",
"value",
"=",
"\" \"",
".",
"join",
"(",
"node",
".",
"itertext",
"(",
")",
")",
".",
"strip",
"(",
")",
"link_node",
"=",
"node",
".",
"find",
"(",
"'a'",
")",
"if",
"link_node",
"is",
"not",
"None",
":",
"link",
"=",
"link_node",
".",
"get",
"(",
"'href'",
")",
"link_text",
"=",
"link_node",
".",
"text",
"else",
":",
"link",
"=",
"None",
"link_text",
"=",
"None",
"# store result",
"attributes",
"[",
"label",
"]",
"=",
"dict",
"(",
"value",
"=",
"value",
",",
"link",
"=",
"link",
",",
"link_text",
"=",
"link_text",
")",
"return",
"attributes"
]
| Helper function: parse attribute data from a wiki html doc
Args:
doc (document parsed with lxml.html): parsed wiki page
Returns:
dict: attributes values and listed links, format ``{<key>: {'value': <value>, 'link': <link>}}``;
only the first hyperlink listed in each attribute value is included | [
"Helper",
"function",
":",
"parse",
"attribute",
"data",
"from",
"a",
"wiki",
"html",
"doc"
]
| python | train | 34.571429 |
cuenca-mx/clabe-python | clabe/__init__.py | https://github.com/cuenca-mx/clabe-python/blob/6a8fe4d306cb3f4f29d30efedabe5d82b442da99/clabe/__init__.py#L35-L46 | def get_bank_name(clabe: str) -> str:
"""
Regresa el nombre del banco basado en los primeros 3 digitos
https://es.wikipedia.org/wiki/CLABE#D.C3.ADgito_control
"""
code = clabe[:3]
try:
bank_name = BANK_NAMES[BANKS[code]]
except KeyError:
raise ValueError(f"Ningún banco tiene código '{code}'")
else:
return bank_name | [
"def",
"get_bank_name",
"(",
"clabe",
":",
"str",
")",
"->",
"str",
":",
"code",
"=",
"clabe",
"[",
":",
"3",
"]",
"try",
":",
"bank_name",
"=",
"BANK_NAMES",
"[",
"BANKS",
"[",
"code",
"]",
"]",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"f\"Ningún banco tiene código '{code}'\")",
"",
"else",
":",
"return",
"bank_name"
]
| Regresa el nombre del banco basado en los primeros 3 digitos
https://es.wikipedia.org/wiki/CLABE#D.C3.ADgito_control | [
"Regresa",
"el",
"nombre",
"del",
"banco",
"basado",
"en",
"los",
"primeros",
"3",
"digitos",
"https",
":",
"//",
"es",
".",
"wikipedia",
".",
"org",
"/",
"wiki",
"/",
"CLABE#D",
".",
"C3",
".",
"ADgito_control"
]
| python | train | 30.083333 |
StackStorm/pybind | pybind/slxos/v17s_1_02/resource_monitor/cpu/__init__.py | https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/resource_monitor/cpu/__init__.py#L295-L316 | def _set_offset_cpu(self, v, load=False):
"""
Setter method for offset_cpu, mapped from YANG variable /resource_monitor/cpu/offset_cpu (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_offset_cpu is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_offset_cpu() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0 .. 70']}), is_leaf=True, yang_name="offset-cpu", rest_name="thresh-offset", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Offset to CPU threshold for testing', u'hidden': u'debug', u'alt-name': u'thresh-offset', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-resource-monitor', defining_module='brocade-resource-monitor', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """offset_cpu must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0 .. 70']}), is_leaf=True, yang_name="offset-cpu", rest_name="thresh-offset", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Offset to CPU threshold for testing', u'hidden': u'debug', u'alt-name': u'thresh-offset', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-resource-monitor', defining_module='brocade-resource-monitor', yang_type='uint32', is_config=True)""",
})
self.__offset_cpu = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_offset_cpu",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"RestrictedClassType",
"(",
"base_type",
"=",
"RestrictedClassType",
"(",
"base_type",
"=",
"long",
",",
"restriction_dict",
"=",
"{",
"'range'",
":",
"[",
"'0..4294967295'",
"]",
"}",
",",
"int_size",
"=",
"32",
")",
",",
"restriction_dict",
"=",
"{",
"'range'",
":",
"[",
"u'0 .. 70'",
"]",
"}",
")",
",",
"is_leaf",
"=",
"True",
",",
"yang_name",
"=",
"\"offset-cpu\"",
",",
"rest_name",
"=",
"\"thresh-offset\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"True",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'info'",
":",
"u'Offset to CPU threshold for testing'",
",",
"u'hidden'",
":",
"u'debug'",
",",
"u'alt-name'",
":",
"u'thresh-offset'",
",",
"u'cli-suppress-no'",
":",
"None",
"}",
"}",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-resource-monitor'",
",",
"defining_module",
"=",
"'brocade-resource-monitor'",
",",
"yang_type",
"=",
"'uint32'",
",",
"is_config",
"=",
"True",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"offset_cpu must be of a type compatible with uint32\"\"\"",
",",
"'defined-type'",
":",
"\"uint32\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0 .. 70']}), is_leaf=True, yang_name=\"offset-cpu\", rest_name=\"thresh-offset\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Offset to CPU threshold for testing', u'hidden': u'debug', u'alt-name': u'thresh-offset', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-resource-monitor', defining_module='brocade-resource-monitor', yang_type='uint32', is_config=True)\"\"\"",
",",
"}",
")",
"self",
".",
"__offset_cpu",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
]
| Setter method for offset_cpu, mapped from YANG variable /resource_monitor/cpu/offset_cpu (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_offset_cpu is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_offset_cpu() directly. | [
"Setter",
"method",
"for",
"offset_cpu",
"mapped",
"from",
"YANG",
"variable",
"/",
"resource_monitor",
"/",
"cpu",
"/",
"offset_cpu",
"(",
"uint32",
")",
"If",
"this",
"variable",
"is",
"read",
"-",
"only",
"(",
"config",
":",
"false",
")",
"in",
"the",
"source",
"YANG",
"file",
"then",
"_set_offset_cpu",
"is",
"considered",
"as",
"a",
"private",
"method",
".",
"Backends",
"looking",
"to",
"populate",
"this",
"variable",
"should",
"do",
"so",
"via",
"calling",
"thisObj",
".",
"_set_offset_cpu",
"()",
"directly",
"."
]
| python | train | 93.181818 |
gmr/helper | helper/unix.py | https://github.com/gmr/helper/blob/fe8e45fc8eabf619429b2940c682c252ee33c082/helper/unix.py#L204-L217 | def _get_exception_log_path():
"""Return the normalized path for the connection log, raising an
exception if it can not written to.
:return: str
"""
app = sys.argv[0].split('/')[-1]
for exception_log in ['/var/log/%s.errors' % app,
'/var/tmp/%s.errors' % app,
'/tmp/%s.errors' % app]:
if os.access(path.dirname(exception_log), os.W_OK):
return exception_log
return None | [
"def",
"_get_exception_log_path",
"(",
")",
":",
"app",
"=",
"sys",
".",
"argv",
"[",
"0",
"]",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"1",
"]",
"for",
"exception_log",
"in",
"[",
"'/var/log/%s.errors'",
"%",
"app",
",",
"'/var/tmp/%s.errors'",
"%",
"app",
",",
"'/tmp/%s.errors'",
"%",
"app",
"]",
":",
"if",
"os",
".",
"access",
"(",
"path",
".",
"dirname",
"(",
"exception_log",
")",
",",
"os",
".",
"W_OK",
")",
":",
"return",
"exception_log",
"return",
"None"
]
| Return the normalized path for the connection log, raising an
exception if it can not written to.
:return: str | [
"Return",
"the",
"normalized",
"path",
"for",
"the",
"connection",
"log",
"raising",
"an",
"exception",
"if",
"it",
"can",
"not",
"written",
"to",
"."
]
| python | train | 35.857143 |
RudolfCardinal/pythonlib | cardinal_pythonlib/psychiatry/drugs.py | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/psychiatry/drugs.py#L1439-L1454 | def drug_names_match_criteria(drug_names: List[str],
names_are_generic: bool = False,
include_categories: bool = False,
**criteria: Dict[str, bool]) -> List[bool]:
"""
Establish whether multiple drugs, passed as a list of drug names, each
matches the specified criteria. See :func:`drug_matches_criteria`.
"""
return [
drug_name_matches_criteria(
dn,
name_is_generic=names_are_generic,
include_categories=include_categories,
**criteria)
for dn in drug_names
] | [
"def",
"drug_names_match_criteria",
"(",
"drug_names",
":",
"List",
"[",
"str",
"]",
",",
"names_are_generic",
":",
"bool",
"=",
"False",
",",
"include_categories",
":",
"bool",
"=",
"False",
",",
"*",
"*",
"criteria",
":",
"Dict",
"[",
"str",
",",
"bool",
"]",
")",
"->",
"List",
"[",
"bool",
"]",
":",
"return",
"[",
"drug_name_matches_criteria",
"(",
"dn",
",",
"name_is_generic",
"=",
"names_are_generic",
",",
"include_categories",
"=",
"include_categories",
",",
"*",
"*",
"criteria",
")",
"for",
"dn",
"in",
"drug_names",
"]"
]
| Establish whether multiple drugs, passed as a list of drug names, each
matches the specified criteria. See :func:`drug_matches_criteria`. | [
"Establish",
"whether",
"multiple",
"drugs",
"passed",
"as",
"a",
"list",
"of",
"drug",
"names",
"each",
"matches",
"the",
"specified",
"criteria",
".",
"See",
":",
"func",
":",
"drug_matches_criteria",
"."
]
| python | train | 38.875 |
HiPERCAM/hcam_widgets | hcam_widgets/widgets.py | https://github.com/HiPERCAM/hcam_widgets/blob/7219f0d96dd3a8ebe3139c7f542a72c02d02fce8/hcam_widgets/widgets.py#L2751-L2766 | def dumpJSON(self):
"""
Return dictionary of data for FITS headers.
"""
g = get_root(self).globals
return dict(
RA=self.ra['text'],
DEC=self.dec['text'],
tel=g.cpars['telins_name'],
alt=self._getVal(self.alt),
az=self._getVal(self.az),
secz=self._getVal(self.airmass),
pa=self._getVal(self.pa),
foc=self._getVal(self.focus),
mdist=self._getVal(self.mdist)
) | [
"def",
"dumpJSON",
"(",
"self",
")",
":",
"g",
"=",
"get_root",
"(",
"self",
")",
".",
"globals",
"return",
"dict",
"(",
"RA",
"=",
"self",
".",
"ra",
"[",
"'text'",
"]",
",",
"DEC",
"=",
"self",
".",
"dec",
"[",
"'text'",
"]",
",",
"tel",
"=",
"g",
".",
"cpars",
"[",
"'telins_name'",
"]",
",",
"alt",
"=",
"self",
".",
"_getVal",
"(",
"self",
".",
"alt",
")",
",",
"az",
"=",
"self",
".",
"_getVal",
"(",
"self",
".",
"az",
")",
",",
"secz",
"=",
"self",
".",
"_getVal",
"(",
"self",
".",
"airmass",
")",
",",
"pa",
"=",
"self",
".",
"_getVal",
"(",
"self",
".",
"pa",
")",
",",
"foc",
"=",
"self",
".",
"_getVal",
"(",
"self",
".",
"focus",
")",
",",
"mdist",
"=",
"self",
".",
"_getVal",
"(",
"self",
".",
"mdist",
")",
")"
]
| Return dictionary of data for FITS headers. | [
"Return",
"dictionary",
"of",
"data",
"for",
"FITS",
"headers",
"."
]
| python | train | 31.125 |
pybel/pybel-tools | src/pybel_tools/analysis/heat.py | https://github.com/pybel/pybel-tools/blob/3491adea0ac4ee60f57275ef72f9b73da6dbfe0c/src/pybel_tools/analysis/heat.py#L239-L266 | def workflow(
graph: BELGraph,
node: BaseEntity,
key: Optional[str] = None,
tag: Optional[str] = None,
default_score: Optional[float] = None,
runs: Optional[int] = None,
minimum_nodes: int = 1,
) -> List['Runner']:
"""Generate candidate mechanisms and run the heat diffusion workflow.
:param graph: A BEL graph
:param node: The BEL node that is the focus of this analysis
:param key: The key in the node data dictionary representing the experimental data. Defaults to
:data:`pybel_tools.constants.WEIGHT`.
:param tag: The key for the nodes' data dictionaries where the scores will be put. Defaults to 'score'
:param default_score: The initial score for all nodes. This number can go up or down.
:param runs: The number of times to run the heat diffusion workflow. Defaults to 100.
:param minimum_nodes: The minimum number of nodes a sub-graph needs to try running heat diffusion
:return: A list of runners
"""
subgraph = generate_mechanism(graph, node, key=key)
if subgraph.number_of_nodes() <= minimum_nodes:
return []
runners = multirun(subgraph, node, key=key, tag=tag, default_score=default_score, runs=runs)
return list(runners) | [
"def",
"workflow",
"(",
"graph",
":",
"BELGraph",
",",
"node",
":",
"BaseEntity",
",",
"key",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"tag",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"default_score",
":",
"Optional",
"[",
"float",
"]",
"=",
"None",
",",
"runs",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
",",
"minimum_nodes",
":",
"int",
"=",
"1",
",",
")",
"->",
"List",
"[",
"'Runner'",
"]",
":",
"subgraph",
"=",
"generate_mechanism",
"(",
"graph",
",",
"node",
",",
"key",
"=",
"key",
")",
"if",
"subgraph",
".",
"number_of_nodes",
"(",
")",
"<=",
"minimum_nodes",
":",
"return",
"[",
"]",
"runners",
"=",
"multirun",
"(",
"subgraph",
",",
"node",
",",
"key",
"=",
"key",
",",
"tag",
"=",
"tag",
",",
"default_score",
"=",
"default_score",
",",
"runs",
"=",
"runs",
")",
"return",
"list",
"(",
"runners",
")"
]
| Generate candidate mechanisms and run the heat diffusion workflow.
:param graph: A BEL graph
:param node: The BEL node that is the focus of this analysis
:param key: The key in the node data dictionary representing the experimental data. Defaults to
:data:`pybel_tools.constants.WEIGHT`.
:param tag: The key for the nodes' data dictionaries where the scores will be put. Defaults to 'score'
:param default_score: The initial score for all nodes. This number can go up or down.
:param runs: The number of times to run the heat diffusion workflow. Defaults to 100.
:param minimum_nodes: The minimum number of nodes a sub-graph needs to try running heat diffusion
:return: A list of runners | [
"Generate",
"candidate",
"mechanisms",
"and",
"run",
"the",
"heat",
"diffusion",
"workflow",
"."
]
| python | valid | 44.071429 |
sys-git/certifiable | certifiable/operators.py | https://github.com/sys-git/certifiable/blob/a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8/certifiable/operators.py#L34-L55 | def AND(*args, **kwargs):
"""
ALL args must not raise an exception when called incrementally.
If an exception is specified, raise it, otherwise raise the callable's exception.
:params iterable[Certifier] args:
The certifiers to call
:param callable kwargs['exc']:
Callable that excepts the unexpectedly raised exception as argument and return an
exception to raise.
:raises CertifierError:
The first certifier error if at least one raises a certifier error.
"""
for arg in args:
try:
arg()
except CertifierError as e:
exc = kwargs.get('exc', None)
if exc is not None:
raise exc(e)
raise | [
"def",
"AND",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"arg",
"in",
"args",
":",
"try",
":",
"arg",
"(",
")",
"except",
"CertifierError",
"as",
"e",
":",
"exc",
"=",
"kwargs",
".",
"get",
"(",
"'exc'",
",",
"None",
")",
"if",
"exc",
"is",
"not",
"None",
":",
"raise",
"exc",
"(",
"e",
")",
"raise"
]
| ALL args must not raise an exception when called incrementally.
If an exception is specified, raise it, otherwise raise the callable's exception.
:params iterable[Certifier] args:
The certifiers to call
:param callable kwargs['exc']:
Callable that excepts the unexpectedly raised exception as argument and return an
exception to raise.
:raises CertifierError:
The first certifier error if at least one raises a certifier error. | [
"ALL",
"args",
"must",
"not",
"raise",
"an",
"exception",
"when",
"called",
"incrementally",
".",
"If",
"an",
"exception",
"is",
"specified",
"raise",
"it",
"otherwise",
"raise",
"the",
"callable",
"s",
"exception",
"."
]
| python | train | 32.318182 |
smarie/python-valid8 | valid8/entry_points_annotations.py | https://github.com/smarie/python-valid8/blob/5e15d1de11602933c5114eb9f73277ad91d97800/valid8/entry_points_annotations.py#L945-L963 | def _assert_input_is_valid(input_value, # type: Any
validators, # type: List[InputValidator]
validated_func, # type: Callable
input_name # type: str
):
"""
Called by the `validating_wrapper` in the first step (a) `apply_on_each_func_args` for each function input before
executing the function. It simply delegates to the validator. The signature of this function is hardcoded to
correspond to `apply_on_each_func_args`'s behaviour and should therefore not be changed.
:param input_value: the value to validate
:param validator: the Validator object that will be applied on input_value_to_validate
:param validated_func: the function for which this validation is performed. This is not used since the Validator
knows it already, but we should not change the signature here.
:param input_name: the name of the function input that is being validated
:return: Nothing
"""
for validator in validators:
validator.assert_valid(input_name, input_value) | [
"def",
"_assert_input_is_valid",
"(",
"input_value",
",",
"# type: Any",
"validators",
",",
"# type: List[InputValidator]",
"validated_func",
",",
"# type: Callable",
"input_name",
"# type: str",
")",
":",
"for",
"validator",
"in",
"validators",
":",
"validator",
".",
"assert_valid",
"(",
"input_name",
",",
"input_value",
")"
]
| Called by the `validating_wrapper` in the first step (a) `apply_on_each_func_args` for each function input before
executing the function. It simply delegates to the validator. The signature of this function is hardcoded to
correspond to `apply_on_each_func_args`'s behaviour and should therefore not be changed.
:param input_value: the value to validate
:param validator: the Validator object that will be applied on input_value_to_validate
:param validated_func: the function for which this validation is performed. This is not used since the Validator
knows it already, but we should not change the signature here.
:param input_name: the name of the function input that is being validated
:return: Nothing | [
"Called",
"by",
"the",
"validating_wrapper",
"in",
"the",
"first",
"step",
"(",
"a",
")",
"apply_on_each_func_args",
"for",
"each",
"function",
"input",
"before",
"executing",
"the",
"function",
".",
"It",
"simply",
"delegates",
"to",
"the",
"validator",
".",
"The",
"signature",
"of",
"this",
"function",
"is",
"hardcoded",
"to",
"correspond",
"to",
"apply_on_each_func_args",
"s",
"behaviour",
"and",
"should",
"therefore",
"not",
"be",
"changed",
"."
]
| python | train | 58.473684 |
UCL-INGI/INGInious | inginious/frontend/cookieless_app.py | https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/cookieless_app.py#L263-L267 | def expired(self):
"""Called when an expired session is atime"""
self._data["_killed"] = True
self.save()
raise SessionExpired(self._config.expired_message) | [
"def",
"expired",
"(",
"self",
")",
":",
"self",
".",
"_data",
"[",
"\"_killed\"",
"]",
"=",
"True",
"self",
".",
"save",
"(",
")",
"raise",
"SessionExpired",
"(",
"self",
".",
"_config",
".",
"expired_message",
")"
]
| Called when an expired session is atime | [
"Called",
"when",
"an",
"expired",
"session",
"is",
"atime"
]
| python | train | 36.8 |
OCHA-DAP/hdx-python-utilities | src/hdx/utilities/dictandlist.py | https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/dictandlist.py#L84-L103 | def dict_diff(d1, d2, no_key='<KEYNOTFOUND>'):
# type: (DictUpperBound, DictUpperBound, str) -> Dict
"""Compares two dictionaries
Args:
d1 (DictUpperBound): First dictionary to compare
d2 (DictUpperBound): Second dictionary to compare
no_key (str): What value to use if key is not found Defaults to '<KEYNOTFOUND>'.
Returns:
Dict: Comparison dictionary
"""
d1keys = set(d1.keys())
d2keys = set(d2.keys())
both = d1keys & d2keys
diff = {k: (d1[k], d2[k]) for k in both if d1[k] != d2[k]}
diff.update({k: (d1[k], no_key) for k in d1keys - both})
diff.update({k: (no_key, d2[k]) for k in d2keys - both})
return diff | [
"def",
"dict_diff",
"(",
"d1",
",",
"d2",
",",
"no_key",
"=",
"'<KEYNOTFOUND>'",
")",
":",
"# type: (DictUpperBound, DictUpperBound, str) -> Dict",
"d1keys",
"=",
"set",
"(",
"d1",
".",
"keys",
"(",
")",
")",
"d2keys",
"=",
"set",
"(",
"d2",
".",
"keys",
"(",
")",
")",
"both",
"=",
"d1keys",
"&",
"d2keys",
"diff",
"=",
"{",
"k",
":",
"(",
"d1",
"[",
"k",
"]",
",",
"d2",
"[",
"k",
"]",
")",
"for",
"k",
"in",
"both",
"if",
"d1",
"[",
"k",
"]",
"!=",
"d2",
"[",
"k",
"]",
"}",
"diff",
".",
"update",
"(",
"{",
"k",
":",
"(",
"d1",
"[",
"k",
"]",
",",
"no_key",
")",
"for",
"k",
"in",
"d1keys",
"-",
"both",
"}",
")",
"diff",
".",
"update",
"(",
"{",
"k",
":",
"(",
"no_key",
",",
"d2",
"[",
"k",
"]",
")",
"for",
"k",
"in",
"d2keys",
"-",
"both",
"}",
")",
"return",
"diff"
]
| Compares two dictionaries
Args:
d1 (DictUpperBound): First dictionary to compare
d2 (DictUpperBound): Second dictionary to compare
no_key (str): What value to use if key is not found Defaults to '<KEYNOTFOUND>'.
Returns:
Dict: Comparison dictionary | [
"Compares",
"two",
"dictionaries"
]
| python | train | 33.8 |
gamechanger/mongothon | mongothon/events.py | https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/events.py#L15-L24 | def register(self, event, fn):
"""
Registers the given function as a handler to be applied
in response to the the given event.
"""
# TODO: Can we check the method signature?
self._handler_dict.setdefault(event, [])
if fn not in self._handler_dict[event]:
self._handler_dict[event].append(fn) | [
"def",
"register",
"(",
"self",
",",
"event",
",",
"fn",
")",
":",
"# TODO: Can we check the method signature?",
"self",
".",
"_handler_dict",
".",
"setdefault",
"(",
"event",
",",
"[",
"]",
")",
"if",
"fn",
"not",
"in",
"self",
".",
"_handler_dict",
"[",
"event",
"]",
":",
"self",
".",
"_handler_dict",
"[",
"event",
"]",
".",
"append",
"(",
"fn",
")"
]
| Registers the given function as a handler to be applied
in response to the the given event. | [
"Registers",
"the",
"given",
"function",
"as",
"a",
"handler",
"to",
"be",
"applied",
"in",
"response",
"to",
"the",
"the",
"given",
"event",
"."
]
| python | train | 35.1 |
SMTG-UCL/sumo | sumo/electronic_structure/effective_mass.py | https://github.com/SMTG-UCL/sumo/blob/47aec6bbfa033a624435a65bd4edabd18bfb437f/sumo/electronic_structure/effective_mass.py#L116-L152 | def fit_effective_mass(distances, energies, parabolic=True):
"""Fit the effective masses using either a parabolic or nonparabolic fit.
Args:
distances (:obj:`numpy.ndarray`): The x-distances between k-points in
reciprocal Angstroms, normalised to the band extrema.
energies (:obj:`numpy.ndarray`): The band eigenvalues normalised to the
eigenvalue of the band extrema.
parabolic (:obj:`bool`, optional): Use a parabolic fit of the band
edges. If ``False`` then nonparabolic fitting will be attempted.
Defaults to ``True``.
Returns:
float: The effective mass in units of electron rest mass, :math:`m_0`.
"""
if parabolic:
fit = np.polyfit(distances, energies, 2)
c = 2 * fit[0] # curvature therefore 2 * the exponent on the ^2 term
else:
# Use non parabolic description of the bands
def f(x, alpha, d):
top = np.sqrt(4 * alpha * d * x**2 + 1) - 1
bot = 2 * alpha
return top / bot
# set boundaries for curve fitting: alpha > 1e-8
# as alpha = 0 causes an error
bounds = ((1e-8, -np.inf), (np.inf, np.inf))
popt, _ = curve_fit(f, distances, energies, p0=[1., 1.],
bounds=bounds)
c = 2 * popt[1]
# coefficient is currently in eV/Angstrom^2/h_bar^2
# want it in atomic units so Hartree/bohr^2/h_bar^2
eff_mass = (angstrom_to_bohr**2 / eV_to_hartree) / c
return eff_mass | [
"def",
"fit_effective_mass",
"(",
"distances",
",",
"energies",
",",
"parabolic",
"=",
"True",
")",
":",
"if",
"parabolic",
":",
"fit",
"=",
"np",
".",
"polyfit",
"(",
"distances",
",",
"energies",
",",
"2",
")",
"c",
"=",
"2",
"*",
"fit",
"[",
"0",
"]",
"# curvature therefore 2 * the exponent on the ^2 term",
"else",
":",
"# Use non parabolic description of the bands",
"def",
"f",
"(",
"x",
",",
"alpha",
",",
"d",
")",
":",
"top",
"=",
"np",
".",
"sqrt",
"(",
"4",
"*",
"alpha",
"*",
"d",
"*",
"x",
"**",
"2",
"+",
"1",
")",
"-",
"1",
"bot",
"=",
"2",
"*",
"alpha",
"return",
"top",
"/",
"bot",
"# set boundaries for curve fitting: alpha > 1e-8",
"# as alpha = 0 causes an error",
"bounds",
"=",
"(",
"(",
"1e-8",
",",
"-",
"np",
".",
"inf",
")",
",",
"(",
"np",
".",
"inf",
",",
"np",
".",
"inf",
")",
")",
"popt",
",",
"_",
"=",
"curve_fit",
"(",
"f",
",",
"distances",
",",
"energies",
",",
"p0",
"=",
"[",
"1.",
",",
"1.",
"]",
",",
"bounds",
"=",
"bounds",
")",
"c",
"=",
"2",
"*",
"popt",
"[",
"1",
"]",
"# coefficient is currently in eV/Angstrom^2/h_bar^2",
"# want it in atomic units so Hartree/bohr^2/h_bar^2",
"eff_mass",
"=",
"(",
"angstrom_to_bohr",
"**",
"2",
"/",
"eV_to_hartree",
")",
"/",
"c",
"return",
"eff_mass"
]
| Fit the effective masses using either a parabolic or nonparabolic fit.
Args:
distances (:obj:`numpy.ndarray`): The x-distances between k-points in
reciprocal Angstroms, normalised to the band extrema.
energies (:obj:`numpy.ndarray`): The band eigenvalues normalised to the
eigenvalue of the band extrema.
parabolic (:obj:`bool`, optional): Use a parabolic fit of the band
edges. If ``False`` then nonparabolic fitting will be attempted.
Defaults to ``True``.
Returns:
float: The effective mass in units of electron rest mass, :math:`m_0`. | [
"Fit",
"the",
"effective",
"masses",
"using",
"either",
"a",
"parabolic",
"or",
"nonparabolic",
"fit",
"."
]
| python | train | 40.27027 |
spry-group/python-vultr | vultr/v1_regions.py | https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/v1_regions.py#L10-L21 | def availability(self, dcid, params=None):
''' /v1/regions/availability
GET - public
Retrieve a list of the VPSPLANIDs currently available
in this location. If your account has special plans available,
you will need to pass your api_key in in order to see them.
For all other accounts, the API key is not optional.
Link: https://www.vultr.com/api/#regions_region_available
'''
params = update_params(params, {'DCID': dcid})
return self.request('/v1/regions/availability', params, 'GET') | [
"def",
"availability",
"(",
"self",
",",
"dcid",
",",
"params",
"=",
"None",
")",
":",
"params",
"=",
"update_params",
"(",
"params",
",",
"{",
"'DCID'",
":",
"dcid",
"}",
")",
"return",
"self",
".",
"request",
"(",
"'/v1/regions/availability'",
",",
"params",
",",
"'GET'",
")"
]
| /v1/regions/availability
GET - public
Retrieve a list of the VPSPLANIDs currently available
in this location. If your account has special plans available,
you will need to pass your api_key in in order to see them.
For all other accounts, the API key is not optional.
Link: https://www.vultr.com/api/#regions_region_available | [
"/",
"v1",
"/",
"regions",
"/",
"availability",
"GET",
"-",
"public",
"Retrieve",
"a",
"list",
"of",
"the",
"VPSPLANIDs",
"currently",
"available",
"in",
"this",
"location",
".",
"If",
"your",
"account",
"has",
"special",
"plans",
"available",
"you",
"will",
"need",
"to",
"pass",
"your",
"api_key",
"in",
"in",
"order",
"to",
"see",
"them",
".",
"For",
"all",
"other",
"accounts",
"the",
"API",
"key",
"is",
"not",
"optional",
"."
]
| python | train | 46.333333 |
PMEAL/OpenPNM | openpnm/algorithms/MixedInvasionPercolation.py | https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/algorithms/MixedInvasionPercolation.py#L733-L819 | def set_residual(self, pores=[], overwrite=False):
r"""
Method to start invasion in a network w. residual saturation.
Called after inlets are set.
Parameters
----------
pores : array_like
The pores locations that are to be filled with invader at the
beginning of the simulation.
overwrite : boolean
If ``True`` then all existing inlet locations will be removed and
then the supplied locations will be added. If ``False``, then
supplied locations are added to any already existing locations.
Notes
-----
Currently works for pores only and treats inner throats, i.e.
those that connect two pores in the cluster as invaded and outer ones
as uninvaded. Uninvaded throats are added to a new residual cluster
queue but do not start invading independently if not connected to an
inlet.
Step 1. Identify clusters in the phase occupancy.
Step 2. Look for clusters that are connected or contain an inlet
Step 3. For those that are merge into inlet cluster. May be connected
to more than one - run should sort this out
Step 4. For those that are isolated set the queue to not invading.
Step 5. (in run) When isolated cluster is met my invading cluster it
merges in and starts invading
"""
Ps = self._parse_indices(pores)
if overwrite:
self['pore.residual'] = False
self['pore.residual'][Ps] = True
residual = self['pore.residual']
net = self.project.network
conns = net['throat.conns']
rclusters = site_percolation(conns, residual).sites
rcluster_ids = np.unique(rclusters[rclusters > -1])
initial_num = len(self.queue)-1
for rcluster_id in rcluster_ids:
rPs = rclusters == rcluster_id
existing = np.unique(self['pore.cluster'][rPs])
existing = existing[existing > -1]
if len(existing) > 0:
# There was at least one inlet cluster connected to this
# residual cluster, pick the first one.
cluster_num = existing[0]
else:
# Make a new cluster queue
cluster_num = len(self.queue)
self.queue.append([])
queue = self.queue[cluster_num]
# Set the residual pores and inner throats as part of cluster
self['pore.cluster'][rPs] = cluster_num
Ts = net.find_neighbor_throats(pores=rPs,
flatten=True,
mode='xnor')
self['throat.cluster'][Ts] = cluster_num
self['pore.invasion_sequence'][rPs] = 0
self['throat.invasion_sequence'][Ts] = 0
self['pore.invasion_pressure'][rPs] = -np.inf
self['throat.invasion_pressure'][Ts] = -np.inf
# Add all the outer throats to the queue
Ts = net.find_neighbor_throats(pores=rPs,
flatten=True,
mode='exclusive_or')
for T in Ts:
data = []
# Pc
data.append(self['throat.entry_pressure'][T])
# Element Index
data.append(T)
# Element Type (Pore of Throat)
data.append('throat')
hq.heappush(queue, data)
self.invasion_running = [True]*len(self.queue)
# we have added new clusters that are currently isolated and we
# need to stop them invading until they merge into an invading
# cluster
for c_num in range(len(self.queue)):
if c_num > initial_num:
self.invasion_running[c_num] = False | [
"def",
"set_residual",
"(",
"self",
",",
"pores",
"=",
"[",
"]",
",",
"overwrite",
"=",
"False",
")",
":",
"Ps",
"=",
"self",
".",
"_parse_indices",
"(",
"pores",
")",
"if",
"overwrite",
":",
"self",
"[",
"'pore.residual'",
"]",
"=",
"False",
"self",
"[",
"'pore.residual'",
"]",
"[",
"Ps",
"]",
"=",
"True",
"residual",
"=",
"self",
"[",
"'pore.residual'",
"]",
"net",
"=",
"self",
".",
"project",
".",
"network",
"conns",
"=",
"net",
"[",
"'throat.conns'",
"]",
"rclusters",
"=",
"site_percolation",
"(",
"conns",
",",
"residual",
")",
".",
"sites",
"rcluster_ids",
"=",
"np",
".",
"unique",
"(",
"rclusters",
"[",
"rclusters",
">",
"-",
"1",
"]",
")",
"initial_num",
"=",
"len",
"(",
"self",
".",
"queue",
")",
"-",
"1",
"for",
"rcluster_id",
"in",
"rcluster_ids",
":",
"rPs",
"=",
"rclusters",
"==",
"rcluster_id",
"existing",
"=",
"np",
".",
"unique",
"(",
"self",
"[",
"'pore.cluster'",
"]",
"[",
"rPs",
"]",
")",
"existing",
"=",
"existing",
"[",
"existing",
">",
"-",
"1",
"]",
"if",
"len",
"(",
"existing",
")",
">",
"0",
":",
"# There was at least one inlet cluster connected to this",
"# residual cluster, pick the first one.",
"cluster_num",
"=",
"existing",
"[",
"0",
"]",
"else",
":",
"# Make a new cluster queue",
"cluster_num",
"=",
"len",
"(",
"self",
".",
"queue",
")",
"self",
".",
"queue",
".",
"append",
"(",
"[",
"]",
")",
"queue",
"=",
"self",
".",
"queue",
"[",
"cluster_num",
"]",
"# Set the residual pores and inner throats as part of cluster",
"self",
"[",
"'pore.cluster'",
"]",
"[",
"rPs",
"]",
"=",
"cluster_num",
"Ts",
"=",
"net",
".",
"find_neighbor_throats",
"(",
"pores",
"=",
"rPs",
",",
"flatten",
"=",
"True",
",",
"mode",
"=",
"'xnor'",
")",
"self",
"[",
"'throat.cluster'",
"]",
"[",
"Ts",
"]",
"=",
"cluster_num",
"self",
"[",
"'pore.invasion_sequence'",
"]",
"[",
"rPs",
"]",
"=",
"0",
"self",
"[",
"'throat.invasion_sequence'",
"]",
"[",
"Ts",
"]",
"=",
"0",
"self",
"[",
"'pore.invasion_pressure'",
"]",
"[",
"rPs",
"]",
"=",
"-",
"np",
".",
"inf",
"self",
"[",
"'throat.invasion_pressure'",
"]",
"[",
"Ts",
"]",
"=",
"-",
"np",
".",
"inf",
"# Add all the outer throats to the queue",
"Ts",
"=",
"net",
".",
"find_neighbor_throats",
"(",
"pores",
"=",
"rPs",
",",
"flatten",
"=",
"True",
",",
"mode",
"=",
"'exclusive_or'",
")",
"for",
"T",
"in",
"Ts",
":",
"data",
"=",
"[",
"]",
"# Pc",
"data",
".",
"append",
"(",
"self",
"[",
"'throat.entry_pressure'",
"]",
"[",
"T",
"]",
")",
"# Element Index",
"data",
".",
"append",
"(",
"T",
")",
"# Element Type (Pore of Throat)",
"data",
".",
"append",
"(",
"'throat'",
")",
"hq",
".",
"heappush",
"(",
"queue",
",",
"data",
")",
"self",
".",
"invasion_running",
"=",
"[",
"True",
"]",
"*",
"len",
"(",
"self",
".",
"queue",
")",
"# we have added new clusters that are currently isolated and we",
"# need to stop them invading until they merge into an invading",
"# cluster",
"for",
"c_num",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"queue",
")",
")",
":",
"if",
"c_num",
">",
"initial_num",
":",
"self",
".",
"invasion_running",
"[",
"c_num",
"]",
"=",
"False"
]
| r"""
Method to start invasion in a network w. residual saturation.
Called after inlets are set.
Parameters
----------
pores : array_like
The pores locations that are to be filled with invader at the
beginning of the simulation.
overwrite : boolean
If ``True`` then all existing inlet locations will be removed and
then the supplied locations will be added. If ``False``, then
supplied locations are added to any already existing locations.
Notes
-----
Currently works for pores only and treats inner throats, i.e.
those that connect two pores in the cluster as invaded and outer ones
as uninvaded. Uninvaded throats are added to a new residual cluster
queue but do not start invading independently if not connected to an
inlet.
Step 1. Identify clusters in the phase occupancy.
Step 2. Look for clusters that are connected or contain an inlet
Step 3. For those that are merge into inlet cluster. May be connected
to more than one - run should sort this out
Step 4. For those that are isolated set the queue to not invading.
Step 5. (in run) When isolated cluster is met my invading cluster it
merges in and starts invading | [
"r",
"Method",
"to",
"start",
"invasion",
"in",
"a",
"network",
"w",
".",
"residual",
"saturation",
".",
"Called",
"after",
"inlets",
"are",
"set",
"."
]
| python | train | 43.747126 |
belbio/bel | bel/db/arangodb.py | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/db/arangodb.py#L297-L340 | def batch_load_docs(db, doc_iterator, on_duplicate="replace"):
"""Batch load documents
Args:
db: ArangoDB client database handle
doc_iterator: function that yields (collection_name, doc_key, doc)
on_duplicate: defaults to replace, but can be error, update, replace or ignore
https://python-driver-for-arangodb.readthedocs.io/en/master/specs.html?highlight=import_bulk#arango.collection.StandardCollection.import_bulk
"""
batch_size = 100
counter = 0
collections = {}
docs = {}
if on_duplicate not in ["error", "update", "replace", "ignore"]:
log.error(f"Bad parameter for on_duplicate: {on_duplicate}")
return
for (collection_name, doc) in doc_iterator:
if collection_name not in collections:
collections[collection_name] = db.collection(collection_name)
docs[collection_name] = []
counter += 1
docs[collection_name].append(doc)
if counter % batch_size == 0:
log.info(f"Bulk import arangodb: {counter}")
for cname in docs:
collections[cname].import_bulk(
docs[cname], on_duplicate=on_duplicate, halt_on_error=False
)
docs[cname] = []
log.info(f"Bulk import arangodb: {counter}")
for cname in docs:
collections[cname].import_bulk(
docs[cname], on_duplicate=on_duplicate, halt_on_error=False
)
docs[cname] = [] | [
"def",
"batch_load_docs",
"(",
"db",
",",
"doc_iterator",
",",
"on_duplicate",
"=",
"\"replace\"",
")",
":",
"batch_size",
"=",
"100",
"counter",
"=",
"0",
"collections",
"=",
"{",
"}",
"docs",
"=",
"{",
"}",
"if",
"on_duplicate",
"not",
"in",
"[",
"\"error\"",
",",
"\"update\"",
",",
"\"replace\"",
",",
"\"ignore\"",
"]",
":",
"log",
".",
"error",
"(",
"f\"Bad parameter for on_duplicate: {on_duplicate}\"",
")",
"return",
"for",
"(",
"collection_name",
",",
"doc",
")",
"in",
"doc_iterator",
":",
"if",
"collection_name",
"not",
"in",
"collections",
":",
"collections",
"[",
"collection_name",
"]",
"=",
"db",
".",
"collection",
"(",
"collection_name",
")",
"docs",
"[",
"collection_name",
"]",
"=",
"[",
"]",
"counter",
"+=",
"1",
"docs",
"[",
"collection_name",
"]",
".",
"append",
"(",
"doc",
")",
"if",
"counter",
"%",
"batch_size",
"==",
"0",
":",
"log",
".",
"info",
"(",
"f\"Bulk import arangodb: {counter}\"",
")",
"for",
"cname",
"in",
"docs",
":",
"collections",
"[",
"cname",
"]",
".",
"import_bulk",
"(",
"docs",
"[",
"cname",
"]",
",",
"on_duplicate",
"=",
"on_duplicate",
",",
"halt_on_error",
"=",
"False",
")",
"docs",
"[",
"cname",
"]",
"=",
"[",
"]",
"log",
".",
"info",
"(",
"f\"Bulk import arangodb: {counter}\"",
")",
"for",
"cname",
"in",
"docs",
":",
"collections",
"[",
"cname",
"]",
".",
"import_bulk",
"(",
"docs",
"[",
"cname",
"]",
",",
"on_duplicate",
"=",
"on_duplicate",
",",
"halt_on_error",
"=",
"False",
")",
"docs",
"[",
"cname",
"]",
"=",
"[",
"]"
]
| Batch load documents
Args:
db: ArangoDB client database handle
doc_iterator: function that yields (collection_name, doc_key, doc)
on_duplicate: defaults to replace, but can be error, update, replace or ignore
https://python-driver-for-arangodb.readthedocs.io/en/master/specs.html?highlight=import_bulk#arango.collection.StandardCollection.import_bulk | [
"Batch",
"load",
"documents"
]
| python | train | 33 |
iotile/coretools | iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/MSCommon/vc.py | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/MSCommon/vc.py#L229-L254 | def find_vc_pdir_vswhere(msvc_version):
"""
Find the MSVC product directory using vswhere.exe .
Run it asking for specified version and get MSVS install location
:param msvc_version:
:return: MSVC install dir
"""
vswhere_path = os.path.join(
'C:\\',
'Program Files (x86)',
'Microsoft Visual Studio',
'Installer',
'vswhere.exe'
)
vswhere_cmd = [vswhere_path, '-version', msvc_version, '-property', 'installationPath']
if os.path.exists(vswhere_path):
sp = subprocess.Popen(vswhere_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
vsdir, err = sp.communicate()
vsdir = vsdir.decode("mbcs")
vsdir = vsdir.rstrip()
vc_pdir = os.path.join(vsdir, 'VC')
return vc_pdir
else:
# No vswhere on system, no install info available
return None | [
"def",
"find_vc_pdir_vswhere",
"(",
"msvc_version",
")",
":",
"vswhere_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"'C:\\\\'",
",",
"'Program Files (x86)'",
",",
"'Microsoft Visual Studio'",
",",
"'Installer'",
",",
"'vswhere.exe'",
")",
"vswhere_cmd",
"=",
"[",
"vswhere_path",
",",
"'-version'",
",",
"msvc_version",
",",
"'-property'",
",",
"'installationPath'",
"]",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"vswhere_path",
")",
":",
"sp",
"=",
"subprocess",
".",
"Popen",
"(",
"vswhere_cmd",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
")",
"vsdir",
",",
"err",
"=",
"sp",
".",
"communicate",
"(",
")",
"vsdir",
"=",
"vsdir",
".",
"decode",
"(",
"\"mbcs\"",
")",
"vsdir",
"=",
"vsdir",
".",
"rstrip",
"(",
")",
"vc_pdir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"vsdir",
",",
"'VC'",
")",
"return",
"vc_pdir",
"else",
":",
"# No vswhere on system, no install info available",
"return",
"None"
]
| Find the MSVC product directory using vswhere.exe .
Run it asking for specified version and get MSVS install location
:param msvc_version:
:return: MSVC install dir | [
"Find",
"the",
"MSVC",
"product",
"directory",
"using",
"vswhere",
".",
"exe",
".",
"Run",
"it",
"asking",
"for",
"specified",
"version",
"and",
"get",
"MSVS",
"install",
"location",
":",
"param",
"msvc_version",
":",
":",
"return",
":",
"MSVC",
"install",
"dir"
]
| python | train | 33 |
materialsproject/pymatgen | pymatgen/analysis/adsorption.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/adsorption.py#L189-L200 | def assign_site_properties(self, slab, height=0.9):
"""
Assigns site properties.
"""
if 'surface_properties' in slab.site_properties.keys():
return slab
else:
surf_sites = self.find_surface_sites_by_height(slab, height)
surf_props = ['surface' if site in surf_sites
else 'subsurface' for site in slab.sites]
return slab.copy(
site_properties={'surface_properties': surf_props}) | [
"def",
"assign_site_properties",
"(",
"self",
",",
"slab",
",",
"height",
"=",
"0.9",
")",
":",
"if",
"'surface_properties'",
"in",
"slab",
".",
"site_properties",
".",
"keys",
"(",
")",
":",
"return",
"slab",
"else",
":",
"surf_sites",
"=",
"self",
".",
"find_surface_sites_by_height",
"(",
"slab",
",",
"height",
")",
"surf_props",
"=",
"[",
"'surface'",
"if",
"site",
"in",
"surf_sites",
"else",
"'subsurface'",
"for",
"site",
"in",
"slab",
".",
"sites",
"]",
"return",
"slab",
".",
"copy",
"(",
"site_properties",
"=",
"{",
"'surface_properties'",
":",
"surf_props",
"}",
")"
]
| Assigns site properties. | [
"Assigns",
"site",
"properties",
"."
]
| python | train | 40 |
spotify/snakebite | snakebite/minicluster.py | https://github.com/spotify/snakebite/blob/6a456e6100b0c1be66cc1f7f9d7f50494f369da3/snakebite/minicluster.py#L130-L132 | def df(self, src):
'''Perform ``df`` on a path'''
return self._getStdOutCmd([self._hadoop_cmd, 'fs', '-df', self._full_hdfs_path(src)], True) | [
"def",
"df",
"(",
"self",
",",
"src",
")",
":",
"return",
"self",
".",
"_getStdOutCmd",
"(",
"[",
"self",
".",
"_hadoop_cmd",
",",
"'fs'",
",",
"'-df'",
",",
"self",
".",
"_full_hdfs_path",
"(",
"src",
")",
"]",
",",
"True",
")"
]
| Perform ``df`` on a path | [
"Perform",
"df",
"on",
"a",
"path"
]
| python | train | 51.666667 |
obriencj/python-javatools | javatools/__init__.py | https://github.com/obriencj/python-javatools/blob/9e2332b452ddc508bed0615937dddcb2cf051557/javatools/__init__.py#L1497-L1517 | def get_identifier(self):
"""
For methods this is the return type, the name and the (non-pretty)
argument descriptor. For fields it is simply the name.
The return-type of methods is attached to the identifier when
it is a bridge method, which can technically allow two methods
with the same name and argument type list, but with different
return type.
"""
ident = self.get_name()
if self.is_method:
args = ",".join(self.get_arg_type_descriptors())
if self.is_bridge():
ident = "%s(%s):%s" % (ident, args, self.get_descriptor())
else:
ident = "%s(%s)" % (ident, args)
return ident | [
"def",
"get_identifier",
"(",
"self",
")",
":",
"ident",
"=",
"self",
".",
"get_name",
"(",
")",
"if",
"self",
".",
"is_method",
":",
"args",
"=",
"\",\"",
".",
"join",
"(",
"self",
".",
"get_arg_type_descriptors",
"(",
")",
")",
"if",
"self",
".",
"is_bridge",
"(",
")",
":",
"ident",
"=",
"\"%s(%s):%s\"",
"%",
"(",
"ident",
",",
"args",
",",
"self",
".",
"get_descriptor",
"(",
")",
")",
"else",
":",
"ident",
"=",
"\"%s(%s)\"",
"%",
"(",
"ident",
",",
"args",
")",
"return",
"ident"
]
| For methods this is the return type, the name and the (non-pretty)
argument descriptor. For fields it is simply the name.
The return-type of methods is attached to the identifier when
it is a bridge method, which can technically allow two methods
with the same name and argument type list, but with different
return type. | [
"For",
"methods",
"this",
"is",
"the",
"return",
"type",
"the",
"name",
"and",
"the",
"(",
"non",
"-",
"pretty",
")",
"argument",
"descriptor",
".",
"For",
"fields",
"it",
"is",
"simply",
"the",
"name",
"."
]
| python | train | 34.238095 |
google/grr | grr/server/grr_response_server/databases/mem_paths.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mem_paths.py#L329-L354 | def MultiWritePathHistory(self, client_path_histories):
"""Writes a collection of hash and stat entries observed for given paths."""
for client_path, client_path_history in iteritems(client_path_histories):
if client_path.client_id not in self.metadatas:
raise db.UnknownClientError(client_path.client_id)
path_info = rdf_objects.PathInfo(
path_type=client_path.path_type, components=client_path.components)
for timestamp, stat_entry in iteritems(client_path_history.stat_entries):
path_record = self._GetPathRecord(
client_path.client_id, path_info, set_default=False)
if path_record is None:
# TODO(hanuszczak): Provide more details about paths that caused that.
raise db.AtLeastOneUnknownPathError([])
path_record.AddStatEntry(stat_entry, timestamp)
for timestamp, hash_entry in iteritems(client_path_history.hash_entries):
path_record = self._GetPathRecord(
client_path.client_id, path_info, set_default=False)
if path_record is None:
# TODO(hanuszczak): Provide more details about paths that caused that.
raise db.AtLeastOneUnknownPathError([])
path_record.AddHashEntry(hash_entry, timestamp) | [
"def",
"MultiWritePathHistory",
"(",
"self",
",",
"client_path_histories",
")",
":",
"for",
"client_path",
",",
"client_path_history",
"in",
"iteritems",
"(",
"client_path_histories",
")",
":",
"if",
"client_path",
".",
"client_id",
"not",
"in",
"self",
".",
"metadatas",
":",
"raise",
"db",
".",
"UnknownClientError",
"(",
"client_path",
".",
"client_id",
")",
"path_info",
"=",
"rdf_objects",
".",
"PathInfo",
"(",
"path_type",
"=",
"client_path",
".",
"path_type",
",",
"components",
"=",
"client_path",
".",
"components",
")",
"for",
"timestamp",
",",
"stat_entry",
"in",
"iteritems",
"(",
"client_path_history",
".",
"stat_entries",
")",
":",
"path_record",
"=",
"self",
".",
"_GetPathRecord",
"(",
"client_path",
".",
"client_id",
",",
"path_info",
",",
"set_default",
"=",
"False",
")",
"if",
"path_record",
"is",
"None",
":",
"# TODO(hanuszczak): Provide more details about paths that caused that.",
"raise",
"db",
".",
"AtLeastOneUnknownPathError",
"(",
"[",
"]",
")",
"path_record",
".",
"AddStatEntry",
"(",
"stat_entry",
",",
"timestamp",
")",
"for",
"timestamp",
",",
"hash_entry",
"in",
"iteritems",
"(",
"client_path_history",
".",
"hash_entries",
")",
":",
"path_record",
"=",
"self",
".",
"_GetPathRecord",
"(",
"client_path",
".",
"client_id",
",",
"path_info",
",",
"set_default",
"=",
"False",
")",
"if",
"path_record",
"is",
"None",
":",
"# TODO(hanuszczak): Provide more details about paths that caused that.",
"raise",
"db",
".",
"AtLeastOneUnknownPathError",
"(",
"[",
"]",
")",
"path_record",
".",
"AddHashEntry",
"(",
"hash_entry",
",",
"timestamp",
")"
]
| Writes a collection of hash and stat entries observed for given paths. | [
"Writes",
"a",
"collection",
"of",
"hash",
"and",
"stat",
"entries",
"observed",
"for",
"given",
"paths",
"."
]
| python | train | 47.653846 |
wandb/client | wandb/vendor/prompt_toolkit/renderer.py | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/renderer.py#L492-L506 | def clear(self):
"""
Clear screen and go to 0,0
"""
# Erase current output first.
self.erase()
# Send "Erase Screen" command and go to (0, 0).
output = self.output
output.erase_screen()
output.cursor_goto(0, 0)
output.flush()
self.request_absolute_cursor_position() | [
"def",
"clear",
"(",
"self",
")",
":",
"# Erase current output first.",
"self",
".",
"erase",
"(",
")",
"# Send \"Erase Screen\" command and go to (0, 0).",
"output",
"=",
"self",
".",
"output",
"output",
".",
"erase_screen",
"(",
")",
"output",
".",
"cursor_goto",
"(",
"0",
",",
"0",
")",
"output",
".",
"flush",
"(",
")",
"self",
".",
"request_absolute_cursor_position",
"(",
")"
]
| Clear screen and go to 0,0 | [
"Clear",
"screen",
"and",
"go",
"to",
"0",
"0"
]
| python | train | 22.8 |
pycontribs/pyrax | pyrax/cloudloadbalancers.py | https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/cloudloadbalancers.py#L557-L561 | def add_virtualip(self, lb, vip):
"""Adds the VirtualIP to the specified load balancer."""
resp, body = self.api.method_post("/loadbalancers/%s/virtualips" % lb.id,
body=vip.to_dict())
return resp, body | [
"def",
"add_virtualip",
"(",
"self",
",",
"lb",
",",
"vip",
")",
":",
"resp",
",",
"body",
"=",
"self",
".",
"api",
".",
"method_post",
"(",
"\"/loadbalancers/%s/virtualips\"",
"%",
"lb",
".",
"id",
",",
"body",
"=",
"vip",
".",
"to_dict",
"(",
")",
")",
"return",
"resp",
",",
"body"
]
| Adds the VirtualIP to the specified load balancer. | [
"Adds",
"the",
"VirtualIP",
"to",
"the",
"specified",
"load",
"balancer",
"."
]
| python | train | 47.6 |
apache/incubator-heron | heron/tools/tracker/src/python/handlers/basehandler.py | https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/tools/tracker/src/python/handlers/basehandler.py#L188-L198 | def get_argument_starttime(self):
"""
Helper function to get starttime argument.
Raises exception if argument is missing.
Returns the starttime argument.
"""
try:
starttime = self.get_argument(constants.PARAM_STARTTIME)
return starttime
except tornado.web.MissingArgumentError as e:
raise Exception(e.log_message) | [
"def",
"get_argument_starttime",
"(",
"self",
")",
":",
"try",
":",
"starttime",
"=",
"self",
".",
"get_argument",
"(",
"constants",
".",
"PARAM_STARTTIME",
")",
"return",
"starttime",
"except",
"tornado",
".",
"web",
".",
"MissingArgumentError",
"as",
"e",
":",
"raise",
"Exception",
"(",
"e",
".",
"log_message",
")"
]
| Helper function to get starttime argument.
Raises exception if argument is missing.
Returns the starttime argument. | [
"Helper",
"function",
"to",
"get",
"starttime",
"argument",
".",
"Raises",
"exception",
"if",
"argument",
"is",
"missing",
".",
"Returns",
"the",
"starttime",
"argument",
"."
]
| python | valid | 31.727273 |
frejanordsiek/hdf5storage | hdf5storage/utilities.py | https://github.com/frejanordsiek/hdf5storage/blob/539275141dd3a4efbbbfd9bdb978f3ed59e3f05d/hdf5storage/utilities.py#L1537-L1599 | def set_attributes_all(target, attributes, discard_others=True):
""" Set Attributes in bulk and optionally discard others.
Sets each Attribute in turn (modifying it in place if possible if it
is already present) and optionally discarding all other Attributes
not explicitly set. This function yields much greater performance
than the required individual calls to ``set_attribute``,
``set_attribute_string``, ``set_attribute_string_array`` and
``del_attribute`` put together.
.. versionadded:: 0.2
Parameters
----------
target : Dataset or Group
Dataset or Group to set the Attributes of.
attributes : dict
The Attributes to set. The keys (``str``) are the names. The
values are ``tuple`` of the Attribute kind and the value to
set. Valid kinds are ``'string_array'``, ``'string'``, and
``'value'``. The values must correspond to what
``set_attribute_string_array``, ``set_attribute_string`` and
``set_attribute`` would take respectively.
discard_others : bool, optional
Whether to discard all other Attributes not explicitly set
(default) or not.
See Also
--------
set_attribute
set_attribute_string
set_attribute_string_array
"""
attrs = target.attrs
existing = dict(attrs.items())
# Generate special dtype for string arrays.
if sys.hexversion >= 0x03000000:
str_arr_dtype = h5py.special_dtype(vlen=str)
else:
str_arr_dtype = dtype=h5py.special_dtype(vlen=unicode)
# Go through each attribute. If it is already present, modify it if
# possible and create it otherwise (deletes old value.)
for k, (kind, value) in attributes.items():
if kind == 'string_array':
attrs.create(k, [convert_to_str(s) for s in value],
dtype=str_arr_dtype)
else:
if kind == 'string':
value = np.bytes_(value)
if k not in existing:
attrs.create(k, value)
else:
try:
if value.dtype == existing[k].dtype \
and value.shape == existing[k].shape:
attrs.modify(k, value)
except:
attrs.create(k, value)
# Discard all other attributes.
if discard_others:
for k in set(existing) - set(attributes):
del attrs[k] | [
"def",
"set_attributes_all",
"(",
"target",
",",
"attributes",
",",
"discard_others",
"=",
"True",
")",
":",
"attrs",
"=",
"target",
".",
"attrs",
"existing",
"=",
"dict",
"(",
"attrs",
".",
"items",
"(",
")",
")",
"# Generate special dtype for string arrays.",
"if",
"sys",
".",
"hexversion",
">=",
"0x03000000",
":",
"str_arr_dtype",
"=",
"h5py",
".",
"special_dtype",
"(",
"vlen",
"=",
"str",
")",
"else",
":",
"str_arr_dtype",
"=",
"dtype",
"=",
"h5py",
".",
"special_dtype",
"(",
"vlen",
"=",
"unicode",
")",
"# Go through each attribute. If it is already present, modify it if",
"# possible and create it otherwise (deletes old value.)",
"for",
"k",
",",
"(",
"kind",
",",
"value",
")",
"in",
"attributes",
".",
"items",
"(",
")",
":",
"if",
"kind",
"==",
"'string_array'",
":",
"attrs",
".",
"create",
"(",
"k",
",",
"[",
"convert_to_str",
"(",
"s",
")",
"for",
"s",
"in",
"value",
"]",
",",
"dtype",
"=",
"str_arr_dtype",
")",
"else",
":",
"if",
"kind",
"==",
"'string'",
":",
"value",
"=",
"np",
".",
"bytes_",
"(",
"value",
")",
"if",
"k",
"not",
"in",
"existing",
":",
"attrs",
".",
"create",
"(",
"k",
",",
"value",
")",
"else",
":",
"try",
":",
"if",
"value",
".",
"dtype",
"==",
"existing",
"[",
"k",
"]",
".",
"dtype",
"and",
"value",
".",
"shape",
"==",
"existing",
"[",
"k",
"]",
".",
"shape",
":",
"attrs",
".",
"modify",
"(",
"k",
",",
"value",
")",
"except",
":",
"attrs",
".",
"create",
"(",
"k",
",",
"value",
")",
"# Discard all other attributes.",
"if",
"discard_others",
":",
"for",
"k",
"in",
"set",
"(",
"existing",
")",
"-",
"set",
"(",
"attributes",
")",
":",
"del",
"attrs",
"[",
"k",
"]"
]
| Set Attributes in bulk and optionally discard others.
Sets each Attribute in turn (modifying it in place if possible if it
is already present) and optionally discarding all other Attributes
not explicitly set. This function yields much greater performance
than the required individual calls to ``set_attribute``,
``set_attribute_string``, ``set_attribute_string_array`` and
``del_attribute`` put together.
.. versionadded:: 0.2
Parameters
----------
target : Dataset or Group
Dataset or Group to set the Attributes of.
attributes : dict
The Attributes to set. The keys (``str``) are the names. The
values are ``tuple`` of the Attribute kind and the value to
set. Valid kinds are ``'string_array'``, ``'string'``, and
``'value'``. The values must correspond to what
``set_attribute_string_array``, ``set_attribute_string`` and
``set_attribute`` would take respectively.
discard_others : bool, optional
Whether to discard all other Attributes not explicitly set
(default) or not.
See Also
--------
set_attribute
set_attribute_string
set_attribute_string_array | [
"Set",
"Attributes",
"in",
"bulk",
"and",
"optionally",
"discard",
"others",
"."
]
| python | train | 37.920635 |
pymupdf/PyMuPDF | examples/PDFLinkMaint.py | https://github.com/pymupdf/PyMuPDF/blob/917f2d83482510e26ba0ff01fd2392c26f3a8e90/examples/PDFLinkMaint.py#L560-L629 | def on_linkType_changed(self, evt):
"""User changed link kind, so prepare available fields."""
if self.current_idx < 0:
evt.Skip()
return
n = self.linkType.GetSelection()
lt_str = self.linkType.GetString(n)
lt = self.link_code[lt_str]
self.prep_link_details(lt)
lnk = self.page_links[self.current_idx]
lnk["update"] = True
lnk["kind"] = lt
self.enable_update()
if lt == fitz.LINK_GOTO:
if not self.toPage.Value.isdecimal():
self.toPage.ChangeValue("1")
self.toPage.Enable()
if not self.toLeft.Value.isdecimal():
self.toLeft.ChangeValue("0")
self.toLeft.Enable()
if not self.toHeight.Value.isdecimal():
self.toHeight.ChangeValue("0")
self.toHeight.Enable()
lnk["page"] = int(self.toPage.Value) - 1
lnk["to"] = fitz.Point(int(self.toLeft.Value),
int(self.toHeight.Value))
elif lt == fitz.LINK_GOTOR:
if not self.toFile.Value:
self.toFile.SetValue(self.text_in_rect())
self.toFile.MarkDirty()
if not self.toPage.Value.isdecimal():
self.toPage.ChangeValue("1")
if not self.toLeft.Value.isdecimal():
self.toLeft.ChangeValue("0")
if not self.toHeight.Value.isdecimal():
self.toHeight.ChangeValue("0")
self.toLeft.Enable()
self.toPage.Enable()
self.toFile.Enable()
self.toHeight.Enable()
lnk["file"] = self.toFile.Value
lnk["page"] = int(self.toPage.Value) - 1
lnk["to"] = fitz.Point(int(self.toLeft.Value),
int(self.toHeight.Value))
elif lt == fitz.LINK_URI:
if not self.toURI.Value:
self.toURI.SetValue(self.text_in_rect())
self.toURI.MarkDirty()
lnk["uri"] = self.toURI.Value
self.toURI.Enable()
elif lt == fitz.LINK_LAUNCH:
if not self.toFile.Value:
self.toFile.SetValue(self.text_in_rect())
self.toFile.MarkDirty()
lnk["file"] = self.toFile.Value
self.toFile.Enable()
elif lt == fitz.LINK_NAMED:
self.toName.SetSelection(0)
self.toName.Enable()
self.page_links[self.current_idx] = lnk
evt.Skip()
return | [
"def",
"on_linkType_changed",
"(",
"self",
",",
"evt",
")",
":",
"if",
"self",
".",
"current_idx",
"<",
"0",
":",
"evt",
".",
"Skip",
"(",
")",
"return",
"n",
"=",
"self",
".",
"linkType",
".",
"GetSelection",
"(",
")",
"lt_str",
"=",
"self",
".",
"linkType",
".",
"GetString",
"(",
"n",
")",
"lt",
"=",
"self",
".",
"link_code",
"[",
"lt_str",
"]",
"self",
".",
"prep_link_details",
"(",
"lt",
")",
"lnk",
"=",
"self",
".",
"page_links",
"[",
"self",
".",
"current_idx",
"]",
"lnk",
"[",
"\"update\"",
"]",
"=",
"True",
"lnk",
"[",
"\"kind\"",
"]",
"=",
"lt",
"self",
".",
"enable_update",
"(",
")",
"if",
"lt",
"==",
"fitz",
".",
"LINK_GOTO",
":",
"if",
"not",
"self",
".",
"toPage",
".",
"Value",
".",
"isdecimal",
"(",
")",
":",
"self",
".",
"toPage",
".",
"ChangeValue",
"(",
"\"1\"",
")",
"self",
".",
"toPage",
".",
"Enable",
"(",
")",
"if",
"not",
"self",
".",
"toLeft",
".",
"Value",
".",
"isdecimal",
"(",
")",
":",
"self",
".",
"toLeft",
".",
"ChangeValue",
"(",
"\"0\"",
")",
"self",
".",
"toLeft",
".",
"Enable",
"(",
")",
"if",
"not",
"self",
".",
"toHeight",
".",
"Value",
".",
"isdecimal",
"(",
")",
":",
"self",
".",
"toHeight",
".",
"ChangeValue",
"(",
"\"0\"",
")",
"self",
".",
"toHeight",
".",
"Enable",
"(",
")",
"lnk",
"[",
"\"page\"",
"]",
"=",
"int",
"(",
"self",
".",
"toPage",
".",
"Value",
")",
"-",
"1",
"lnk",
"[",
"\"to\"",
"]",
"=",
"fitz",
".",
"Point",
"(",
"int",
"(",
"self",
".",
"toLeft",
".",
"Value",
")",
",",
"int",
"(",
"self",
".",
"toHeight",
".",
"Value",
")",
")",
"elif",
"lt",
"==",
"fitz",
".",
"LINK_GOTOR",
":",
"if",
"not",
"self",
".",
"toFile",
".",
"Value",
":",
"self",
".",
"toFile",
".",
"SetValue",
"(",
"self",
".",
"text_in_rect",
"(",
")",
")",
"self",
".",
"toFile",
".",
"MarkDirty",
"(",
")",
"if",
"not",
"self",
".",
"toPage",
".",
"Value",
".",
"isdecimal",
"(",
")",
":",
"self",
".",
"toPage",
".",
"ChangeValue",
"(",
"\"1\"",
")",
"if",
"not",
"self",
".",
"toLeft",
".",
"Value",
".",
"isdecimal",
"(",
")",
":",
"self",
".",
"toLeft",
".",
"ChangeValue",
"(",
"\"0\"",
")",
"if",
"not",
"self",
".",
"toHeight",
".",
"Value",
".",
"isdecimal",
"(",
")",
":",
"self",
".",
"toHeight",
".",
"ChangeValue",
"(",
"\"0\"",
")",
"self",
".",
"toLeft",
".",
"Enable",
"(",
")",
"self",
".",
"toPage",
".",
"Enable",
"(",
")",
"self",
".",
"toFile",
".",
"Enable",
"(",
")",
"self",
".",
"toHeight",
".",
"Enable",
"(",
")",
"lnk",
"[",
"\"file\"",
"]",
"=",
"self",
".",
"toFile",
".",
"Value",
"lnk",
"[",
"\"page\"",
"]",
"=",
"int",
"(",
"self",
".",
"toPage",
".",
"Value",
")",
"-",
"1",
"lnk",
"[",
"\"to\"",
"]",
"=",
"fitz",
".",
"Point",
"(",
"int",
"(",
"self",
".",
"toLeft",
".",
"Value",
")",
",",
"int",
"(",
"self",
".",
"toHeight",
".",
"Value",
")",
")",
"elif",
"lt",
"==",
"fitz",
".",
"LINK_URI",
":",
"if",
"not",
"self",
".",
"toURI",
".",
"Value",
":",
"self",
".",
"toURI",
".",
"SetValue",
"(",
"self",
".",
"text_in_rect",
"(",
")",
")",
"self",
".",
"toURI",
".",
"MarkDirty",
"(",
")",
"lnk",
"[",
"\"uri\"",
"]",
"=",
"self",
".",
"toURI",
".",
"Value",
"self",
".",
"toURI",
".",
"Enable",
"(",
")",
"elif",
"lt",
"==",
"fitz",
".",
"LINK_LAUNCH",
":",
"if",
"not",
"self",
".",
"toFile",
".",
"Value",
":",
"self",
".",
"toFile",
".",
"SetValue",
"(",
"self",
".",
"text_in_rect",
"(",
")",
")",
"self",
".",
"toFile",
".",
"MarkDirty",
"(",
")",
"lnk",
"[",
"\"file\"",
"]",
"=",
"self",
".",
"toFile",
".",
"Value",
"self",
".",
"toFile",
".",
"Enable",
"(",
")",
"elif",
"lt",
"==",
"fitz",
".",
"LINK_NAMED",
":",
"self",
".",
"toName",
".",
"SetSelection",
"(",
"0",
")",
"self",
".",
"toName",
".",
"Enable",
"(",
")",
"self",
".",
"page_links",
"[",
"self",
".",
"current_idx",
"]",
"=",
"lnk",
"evt",
".",
"Skip",
"(",
")",
"return"
]
| User changed link kind, so prepare available fields. | [
"User",
"changed",
"link",
"kind",
"so",
"prepare",
"available",
"fields",
"."
]
| python | train | 36.642857 |
Iotic-Labs/py-IoticAgent | src/IoticAgent/IOT/Client.py | https://github.com/Iotic-Labs/py-IoticAgent/blob/893e8582ad1dacfe32dfc0ee89452bbd6f57d28d/src/IoticAgent/IOT/Client.py#L629-L651 | def _parsed_callback_wrapper(self, callback_parsed, callback_plain, foc, data):
"""Used to by register_catchall_*data() and Thing class (follow, create_point) to present point data as an
object."""
# used by PointDataObjectHandler as reference
if foc == R_FEED:
point_ref = data['pid']
else: # R_CONTROL
point_ref = Control(self, data[P_ENTITY_LID], data[P_LID], '0' * 32)
try:
data['parsed'] = self._get_point_data_handler_for(point_ref).get_template(data=data[P_DATA])
except RefreshException:
# No metadata available, do not produce warning
if callback_plain:
callback_plain(data)
except:
logger.warning('Failed to parse %s data for %s%s', foc_to_str(foc), point_ref,
'' if callback_plain else ', ignoring',
exc_info=DEBUG_ENABLED)
if callback_plain:
callback_plain(data)
else:
callback_parsed(data) | [
"def",
"_parsed_callback_wrapper",
"(",
"self",
",",
"callback_parsed",
",",
"callback_plain",
",",
"foc",
",",
"data",
")",
":",
"# used by PointDataObjectHandler as reference",
"if",
"foc",
"==",
"R_FEED",
":",
"point_ref",
"=",
"data",
"[",
"'pid'",
"]",
"else",
":",
"# R_CONTROL",
"point_ref",
"=",
"Control",
"(",
"self",
",",
"data",
"[",
"P_ENTITY_LID",
"]",
",",
"data",
"[",
"P_LID",
"]",
",",
"'0'",
"*",
"32",
")",
"try",
":",
"data",
"[",
"'parsed'",
"]",
"=",
"self",
".",
"_get_point_data_handler_for",
"(",
"point_ref",
")",
".",
"get_template",
"(",
"data",
"=",
"data",
"[",
"P_DATA",
"]",
")",
"except",
"RefreshException",
":",
"# No metadata available, do not produce warning",
"if",
"callback_plain",
":",
"callback_plain",
"(",
"data",
")",
"except",
":",
"logger",
".",
"warning",
"(",
"'Failed to parse %s data for %s%s'",
",",
"foc_to_str",
"(",
"foc",
")",
",",
"point_ref",
",",
"''",
"if",
"callback_plain",
"else",
"', ignoring'",
",",
"exc_info",
"=",
"DEBUG_ENABLED",
")",
"if",
"callback_plain",
":",
"callback_plain",
"(",
"data",
")",
"else",
":",
"callback_parsed",
"(",
"data",
")"
]
| Used to by register_catchall_*data() and Thing class (follow, create_point) to present point data as an
object. | [
"Used",
"to",
"by",
"register_catchall_",
"*",
"data",
"()",
"and",
"Thing",
"class",
"(",
"follow",
"create_point",
")",
"to",
"present",
"point",
"data",
"as",
"an",
"object",
"."
]
| python | train | 45.043478 |
nabla-c0d3/sslyze | sslyze/synchronous_scanner.py | https://github.com/nabla-c0d3/sslyze/blob/0fb3ae668453d7ecf616d0755f237ca7be9f62fa/sslyze/synchronous_scanner.py#L32-L50 | def run_scan_command(
self,
server_info: ServerConnectivityInfo,
scan_command: PluginScanCommand
) -> PluginScanResult:
"""Run a single scan command against a server; will block until the scan command has been completed.
Args:
server_info: The server's connectivity information. The test_connectivity_to_server() method must have been
called first to ensure that the server is online and accessible.
scan_command: The scan command to run against this server.
Returns:
The result of the scan command, which will be an instance of the scan command's
corresponding PluginScanResult subclass.
"""
plugin_class = self._plugins_repository.get_plugin_class_for_command(scan_command)
plugin = plugin_class()
return plugin.process_task(server_info, scan_command) | [
"def",
"run_scan_command",
"(",
"self",
",",
"server_info",
":",
"ServerConnectivityInfo",
",",
"scan_command",
":",
"PluginScanCommand",
")",
"->",
"PluginScanResult",
":",
"plugin_class",
"=",
"self",
".",
"_plugins_repository",
".",
"get_plugin_class_for_command",
"(",
"scan_command",
")",
"plugin",
"=",
"plugin_class",
"(",
")",
"return",
"plugin",
".",
"process_task",
"(",
"server_info",
",",
"scan_command",
")"
]
| Run a single scan command against a server; will block until the scan command has been completed.
Args:
server_info: The server's connectivity information. The test_connectivity_to_server() method must have been
called first to ensure that the server is online and accessible.
scan_command: The scan command to run against this server.
Returns:
The result of the scan command, which will be an instance of the scan command's
corresponding PluginScanResult subclass. | [
"Run",
"a",
"single",
"scan",
"command",
"against",
"a",
"server",
";",
"will",
"block",
"until",
"the",
"scan",
"command",
"has",
"been",
"completed",
"."
]
| python | train | 47.210526 |
brainiak/brainiak | brainiak/hyperparamopt/hpo.py | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/hyperparamopt/hpo.py#L126-L158 | def get_gmm_pdf(self, x):
"""Calculate the GMM likelihood for a single point.
.. math::
y = \\sum_{i=1}^{N} w_i
\\times \\text{normpdf}(x, x_i, \\sigma_i)/\\sum_{i=1}^{N} w_i
:label: gmm-likelihood
Arguments
---------
x : float
Point at which likelihood needs to be computed
Returns
-------
float
Likelihood value at x
"""
def my_norm_pdf(xt, mu, sigma):
z = (xt - mu) / sigma
return (math.exp(-0.5 * z * z)
/ (math.sqrt(2. * np.pi) * sigma))
y = 0
if (x < self.min_limit):
return 0
if (x > self.max_limit):
return 0
for _x in range(self.points.size):
y += (my_norm_pdf(x, self.points[_x], self.sigma[_x])
* self.weights[_x]) / self.W_sum
return y | [
"def",
"get_gmm_pdf",
"(",
"self",
",",
"x",
")",
":",
"def",
"my_norm_pdf",
"(",
"xt",
",",
"mu",
",",
"sigma",
")",
":",
"z",
"=",
"(",
"xt",
"-",
"mu",
")",
"/",
"sigma",
"return",
"(",
"math",
".",
"exp",
"(",
"-",
"0.5",
"*",
"z",
"*",
"z",
")",
"/",
"(",
"math",
".",
"sqrt",
"(",
"2.",
"*",
"np",
".",
"pi",
")",
"*",
"sigma",
")",
")",
"y",
"=",
"0",
"if",
"(",
"x",
"<",
"self",
".",
"min_limit",
")",
":",
"return",
"0",
"if",
"(",
"x",
">",
"self",
".",
"max_limit",
")",
":",
"return",
"0",
"for",
"_x",
"in",
"range",
"(",
"self",
".",
"points",
".",
"size",
")",
":",
"y",
"+=",
"(",
"my_norm_pdf",
"(",
"x",
",",
"self",
".",
"points",
"[",
"_x",
"]",
",",
"self",
".",
"sigma",
"[",
"_x",
"]",
")",
"*",
"self",
".",
"weights",
"[",
"_x",
"]",
")",
"/",
"self",
".",
"W_sum",
"return",
"y"
]
| Calculate the GMM likelihood for a single point.
.. math::
y = \\sum_{i=1}^{N} w_i
\\times \\text{normpdf}(x, x_i, \\sigma_i)/\\sum_{i=1}^{N} w_i
:label: gmm-likelihood
Arguments
---------
x : float
Point at which likelihood needs to be computed
Returns
-------
float
Likelihood value at x | [
"Calculate",
"the",
"GMM",
"likelihood",
"for",
"a",
"single",
"point",
"."
]
| python | train | 27.212121 |
saltstack/salt | salt/states/selinux.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/selinux.py#L292-L315 | def module_remove(name):
'''
Removes SELinux module
name
The name of the module to remove
.. versionadded:: 2016.11.6
'''
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}}
modules = __salt__['selinux.list_semod']()
if name not in modules:
ret['comment'] = 'Module {0} is not available'.format(name)
ret['result'] = False
return ret
if __salt__['selinux.remove_semod'](name):
ret['comment'] = 'Module {0} has been removed'.format(name)
return ret
ret['result'] = False
ret['comment'] = 'Failed to remove module {0}'.format(name)
return ret | [
"def",
"module_remove",
"(",
"name",
")",
":",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'result'",
":",
"True",
",",
"'comment'",
":",
"''",
",",
"'changes'",
":",
"{",
"}",
"}",
"modules",
"=",
"__salt__",
"[",
"'selinux.list_semod'",
"]",
"(",
")",
"if",
"name",
"not",
"in",
"modules",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Module {0} is not available'",
".",
"format",
"(",
"name",
")",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"return",
"ret",
"if",
"__salt__",
"[",
"'selinux.remove_semod'",
"]",
"(",
"name",
")",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Module {0} has been removed'",
".",
"format",
"(",
"name",
")",
"return",
"ret",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
"[",
"'comment'",
"]",
"=",
"'Failed to remove module {0}'",
".",
"format",
"(",
"name",
")",
"return",
"ret"
]
| Removes SELinux module
name
The name of the module to remove
.. versionadded:: 2016.11.6 | [
"Removes",
"SELinux",
"module"
]
| python | train | 27.625 |
Knio/pynmea2 | pynmea2/nmea_file.py | https://github.com/Knio/pynmea2/blob/c4fc66c6a13dd85ad862b15c516245af6e571456/pynmea2/nmea_file.py#L23-L28 | def open(self, fp, mode='r'):
"""
Open the NMEAFile.
"""
self._file = open(fp, mode=mode)
return self._file | [
"def",
"open",
"(",
"self",
",",
"fp",
",",
"mode",
"=",
"'r'",
")",
":",
"self",
".",
"_file",
"=",
"open",
"(",
"fp",
",",
"mode",
"=",
"mode",
")",
"return",
"self",
".",
"_file"
]
| Open the NMEAFile. | [
"Open",
"the",
"NMEAFile",
"."
]
| python | train | 23.666667 |
edx/edx-enterprise | enterprise/api_client/enterprise.py | https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/enterprise/api_client/enterprise.py#L73-L119 | def _load_data(
self,
resource,
detail_resource=None,
resource_id=None,
querystring=None,
traverse_pagination=False,
default=DEFAULT_VALUE_SAFEGUARD,
):
"""
Loads a response from a call to one of the Enterprise endpoints.
:param resource: The endpoint resource name.
:param detail_resource: The sub-resource to append to the path.
:param resource_id: The resource ID for the specific detail to get from the endpoint.
:param querystring: Optional query string parameters.
:param traverse_pagination: Whether to traverse pagination or return paginated response.
:param default: The default value to return in case of no response content.
:return: Data returned by the API.
"""
default_val = default if default != self.DEFAULT_VALUE_SAFEGUARD else {}
querystring = querystring if querystring else {}
cache_key = utils.get_cache_key(
resource=resource,
querystring=querystring,
traverse_pagination=traverse_pagination,
resource_id=resource_id
)
response = cache.get(cache_key)
if not response:
# Response is not cached, so make a call.
endpoint = getattr(self.client, resource)(resource_id)
endpoint = getattr(endpoint, detail_resource) if detail_resource else endpoint
response = endpoint.get(**querystring)
if traverse_pagination:
results = utils.traverse_pagination(response, endpoint)
response = {
'count': len(results),
'next': 'None',
'previous': 'None',
'results': results,
}
if response:
# Now that we've got a response, cache it.
cache.set(cache_key, response, settings.ENTERPRISE_API_CACHE_TIMEOUT)
return response or default_val | [
"def",
"_load_data",
"(",
"self",
",",
"resource",
",",
"detail_resource",
"=",
"None",
",",
"resource_id",
"=",
"None",
",",
"querystring",
"=",
"None",
",",
"traverse_pagination",
"=",
"False",
",",
"default",
"=",
"DEFAULT_VALUE_SAFEGUARD",
",",
")",
":",
"default_val",
"=",
"default",
"if",
"default",
"!=",
"self",
".",
"DEFAULT_VALUE_SAFEGUARD",
"else",
"{",
"}",
"querystring",
"=",
"querystring",
"if",
"querystring",
"else",
"{",
"}",
"cache_key",
"=",
"utils",
".",
"get_cache_key",
"(",
"resource",
"=",
"resource",
",",
"querystring",
"=",
"querystring",
",",
"traverse_pagination",
"=",
"traverse_pagination",
",",
"resource_id",
"=",
"resource_id",
")",
"response",
"=",
"cache",
".",
"get",
"(",
"cache_key",
")",
"if",
"not",
"response",
":",
"# Response is not cached, so make a call.",
"endpoint",
"=",
"getattr",
"(",
"self",
".",
"client",
",",
"resource",
")",
"(",
"resource_id",
")",
"endpoint",
"=",
"getattr",
"(",
"endpoint",
",",
"detail_resource",
")",
"if",
"detail_resource",
"else",
"endpoint",
"response",
"=",
"endpoint",
".",
"get",
"(",
"*",
"*",
"querystring",
")",
"if",
"traverse_pagination",
":",
"results",
"=",
"utils",
".",
"traverse_pagination",
"(",
"response",
",",
"endpoint",
")",
"response",
"=",
"{",
"'count'",
":",
"len",
"(",
"results",
")",
",",
"'next'",
":",
"'None'",
",",
"'previous'",
":",
"'None'",
",",
"'results'",
":",
"results",
",",
"}",
"if",
"response",
":",
"# Now that we've got a response, cache it.",
"cache",
".",
"set",
"(",
"cache_key",
",",
"response",
",",
"settings",
".",
"ENTERPRISE_API_CACHE_TIMEOUT",
")",
"return",
"response",
"or",
"default_val"
]
| Loads a response from a call to one of the Enterprise endpoints.
:param resource: The endpoint resource name.
:param detail_resource: The sub-resource to append to the path.
:param resource_id: The resource ID for the specific detail to get from the endpoint.
:param querystring: Optional query string parameters.
:param traverse_pagination: Whether to traverse pagination or return paginated response.
:param default: The default value to return in case of no response content.
:return: Data returned by the API. | [
"Loads",
"a",
"response",
"from",
"a",
"call",
"to",
"one",
"of",
"the",
"Enterprise",
"endpoints",
"."
]
| python | valid | 42.446809 |
devassistant/devassistant | devassistant/dapi/dapicli.py | https://github.com/devassistant/devassistant/blob/2dbfeaa666a64127263664d18969c55d19ecc83e/devassistant/dapi/dapicli.py#L337-L346 | def format_search(q, **kwargs):
'''Formats the results of a search'''
m = search(q, **kwargs)
count = m['count']
if not count:
raise DapiCommError('Could not find any DAP packages for your query.')
return
for mdap in m['results']:
mdap = mdap['content_object']
return _format_dap_with_description(mdap) | [
"def",
"format_search",
"(",
"q",
",",
"*",
"*",
"kwargs",
")",
":",
"m",
"=",
"search",
"(",
"q",
",",
"*",
"*",
"kwargs",
")",
"count",
"=",
"m",
"[",
"'count'",
"]",
"if",
"not",
"count",
":",
"raise",
"DapiCommError",
"(",
"'Could not find any DAP packages for your query.'",
")",
"return",
"for",
"mdap",
"in",
"m",
"[",
"'results'",
"]",
":",
"mdap",
"=",
"mdap",
"[",
"'content_object'",
"]",
"return",
"_format_dap_with_description",
"(",
"mdap",
")"
]
| Formats the results of a search | [
"Formats",
"the",
"results",
"of",
"a",
"search"
]
| python | train | 34.5 |
mitsei/dlkit | dlkit/handcar/repository/managers.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/repository/managers.py#L1926-L1949 | def get_asset_lookup_session(self, proxy, *args, **kwargs):
"""Gets the OsidSession associated with the asset lookup service.
arg proxy (osid.proxy.Proxy): a proxy
return: (osid.repository.AssetLookupSession) - the new
AssetLookupSession
raise: OperationFailed - unable to complete request
raise: Unimplemented - supports_asset_lookup() is false
compliance: optional - This method must be implemented if
supports_asset_lookup() is true.
"""
if not self.supports_asset_lookup():
raise Unimplemented()
try:
from . import sessions
except ImportError:
raise # OperationFailed()
proxy = self._convert_proxy(proxy)
try:
session = sessions.AssetLookupSession(proxy=proxy, runtime=self._runtime, **kwargs)
except AttributeError:
raise # OperationFailed()
return session | [
"def",
"get_asset_lookup_session",
"(",
"self",
",",
"proxy",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"self",
".",
"supports_asset_lookup",
"(",
")",
":",
"raise",
"Unimplemented",
"(",
")",
"try",
":",
"from",
".",
"import",
"sessions",
"except",
"ImportError",
":",
"raise",
"# OperationFailed()",
"proxy",
"=",
"self",
".",
"_convert_proxy",
"(",
"proxy",
")",
"try",
":",
"session",
"=",
"sessions",
".",
"AssetLookupSession",
"(",
"proxy",
"=",
"proxy",
",",
"runtime",
"=",
"self",
".",
"_runtime",
",",
"*",
"*",
"kwargs",
")",
"except",
"AttributeError",
":",
"raise",
"# OperationFailed()",
"return",
"session"
]
| Gets the OsidSession associated with the asset lookup service.
arg proxy (osid.proxy.Proxy): a proxy
return: (osid.repository.AssetLookupSession) - the new
AssetLookupSession
raise: OperationFailed - unable to complete request
raise: Unimplemented - supports_asset_lookup() is false
compliance: optional - This method must be implemented if
supports_asset_lookup() is true. | [
"Gets",
"the",
"OsidSession",
"associated",
"with",
"the",
"asset",
"lookup",
"service",
"."
]
| python | train | 39.833333 |
knagra/farnsworth | events/views.py | https://github.com/knagra/farnsworth/blob/1b6589f0d9fea154f0a1e2231ed906764ed26d26/events/views.py#L42-L86 | def list_events_view(request):
''' A list view of upcoming events. '''
page_name = "Upcoming Events"
profile = UserProfile.objects.get(user=request.user)
event_form = EventForm(
request.POST if 'post_event' in request.POST else None,
profile=profile,
)
if event_form.is_valid():
event_form.save()
return HttpResponseRedirect(reverse('events:list'))
# a pseudo-dictionary, actually a list with items of form (event, ongoing,
# rsvpd, rsvp_form), where ongoing is a boolean of whether the event is
# currently ongoing, rsvpd is a boolean of whether the user has rsvp'd to
# the event
events_dict = list()
for event in Event.objects.filter(end_time__gte=now()):
rsvp_form = RsvpForm(
request.POST if "rsvp-{0}".format(event.pk) in request.POST else None,
instance=event,
profile=profile,
)
if rsvp_form.is_valid():
rsvpd = rsvp_form.save()
if rsvpd:
message = MESSAGES['RSVP_ADD'].format(event=event.title)
else:
message = MESSAGES['RSVP_REMOVE'].format(event=event.title)
messages.add_message(request, messages.SUCCESS, message)
return HttpResponseRedirect(reverse('events:list'))
ongoing = ((event.start_time <= now()) and (event.end_time >= now()))
rsvpd = (profile in event.rsvps.all())
events_dict.append((event, ongoing, rsvpd, rsvp_form))
if request.method == "POST":
messages.add_message(request, messages.ERROR, MESSAGES["EVENT_ERROR"])
return render_to_response('list_events.html', {
'page_name': page_name,
'events_dict': events_dict,
'now': now(),
'event_form': event_form,
}, context_instance=RequestContext(request)) | [
"def",
"list_events_view",
"(",
"request",
")",
":",
"page_name",
"=",
"\"Upcoming Events\"",
"profile",
"=",
"UserProfile",
".",
"objects",
".",
"get",
"(",
"user",
"=",
"request",
".",
"user",
")",
"event_form",
"=",
"EventForm",
"(",
"request",
".",
"POST",
"if",
"'post_event'",
"in",
"request",
".",
"POST",
"else",
"None",
",",
"profile",
"=",
"profile",
",",
")",
"if",
"event_form",
".",
"is_valid",
"(",
")",
":",
"event_form",
".",
"save",
"(",
")",
"return",
"HttpResponseRedirect",
"(",
"reverse",
"(",
"'events:list'",
")",
")",
"# a pseudo-dictionary, actually a list with items of form (event, ongoing,",
"# rsvpd, rsvp_form), where ongoing is a boolean of whether the event is",
"# currently ongoing, rsvpd is a boolean of whether the user has rsvp'd to",
"# the event",
"events_dict",
"=",
"list",
"(",
")",
"for",
"event",
"in",
"Event",
".",
"objects",
".",
"filter",
"(",
"end_time__gte",
"=",
"now",
"(",
")",
")",
":",
"rsvp_form",
"=",
"RsvpForm",
"(",
"request",
".",
"POST",
"if",
"\"rsvp-{0}\"",
".",
"format",
"(",
"event",
".",
"pk",
")",
"in",
"request",
".",
"POST",
"else",
"None",
",",
"instance",
"=",
"event",
",",
"profile",
"=",
"profile",
",",
")",
"if",
"rsvp_form",
".",
"is_valid",
"(",
")",
":",
"rsvpd",
"=",
"rsvp_form",
".",
"save",
"(",
")",
"if",
"rsvpd",
":",
"message",
"=",
"MESSAGES",
"[",
"'RSVP_ADD'",
"]",
".",
"format",
"(",
"event",
"=",
"event",
".",
"title",
")",
"else",
":",
"message",
"=",
"MESSAGES",
"[",
"'RSVP_REMOVE'",
"]",
".",
"format",
"(",
"event",
"=",
"event",
".",
"title",
")",
"messages",
".",
"add_message",
"(",
"request",
",",
"messages",
".",
"SUCCESS",
",",
"message",
")",
"return",
"HttpResponseRedirect",
"(",
"reverse",
"(",
"'events:list'",
")",
")",
"ongoing",
"=",
"(",
"(",
"event",
".",
"start_time",
"<=",
"now",
"(",
")",
")",
"and",
"(",
"event",
".",
"end_time",
">=",
"now",
"(",
")",
")",
")",
"rsvpd",
"=",
"(",
"profile",
"in",
"event",
".",
"rsvps",
".",
"all",
"(",
")",
")",
"events_dict",
".",
"append",
"(",
"(",
"event",
",",
"ongoing",
",",
"rsvpd",
",",
"rsvp_form",
")",
")",
"if",
"request",
".",
"method",
"==",
"\"POST\"",
":",
"messages",
".",
"add_message",
"(",
"request",
",",
"messages",
".",
"ERROR",
",",
"MESSAGES",
"[",
"\"EVENT_ERROR\"",
"]",
")",
"return",
"render_to_response",
"(",
"'list_events.html'",
",",
"{",
"'page_name'",
":",
"page_name",
",",
"'events_dict'",
":",
"events_dict",
",",
"'now'",
":",
"now",
"(",
")",
",",
"'event_form'",
":",
"event_form",
",",
"}",
",",
"context_instance",
"=",
"RequestContext",
"(",
"request",
")",
")"
]
| A list view of upcoming events. | [
"A",
"list",
"view",
"of",
"upcoming",
"events",
"."
]
| python | train | 39.888889 |
justquick/django-native-tags | native_tags/nodes.py | https://github.com/justquick/django-native-tags/blob/d40b976ee1cb13faeb04f0dedf02933d4274abf2/native_tags/nodes.py#L47-L53 | def get_cache_key(bucket, name, args, kwargs):
"""
Gets a unique SHA1 cache key for any call to a native tag.
Use args and kwargs in hash so that the same arguments use the same key
"""
u = ''.join(map(str, (bucket, name, args, kwargs)))
return 'native_tags.%s' % sha_constructor(u).hexdigest() | [
"def",
"get_cache_key",
"(",
"bucket",
",",
"name",
",",
"args",
",",
"kwargs",
")",
":",
"u",
"=",
"''",
".",
"join",
"(",
"map",
"(",
"str",
",",
"(",
"bucket",
",",
"name",
",",
"args",
",",
"kwargs",
")",
")",
")",
"return",
"'native_tags.%s'",
"%",
"sha_constructor",
"(",
"u",
")",
".",
"hexdigest",
"(",
")"
]
| Gets a unique SHA1 cache key for any call to a native tag.
Use args and kwargs in hash so that the same arguments use the same key | [
"Gets",
"a",
"unique",
"SHA1",
"cache",
"key",
"for",
"any",
"call",
"to",
"a",
"native",
"tag",
".",
"Use",
"args",
"and",
"kwargs",
"in",
"hash",
"so",
"that",
"the",
"same",
"arguments",
"use",
"the",
"same",
"key"
]
| python | train | 44.571429 |
theduke/django-baseline | django_baseline/templatetags/helpers.py | https://github.com/theduke/django-baseline/blob/7be8b956e53c70b35f34e1783a8fe8f716955afb/django_baseline/templatetags/helpers.py#L127-L135 | def mul(value, arg):
"""Multiply the arg with the value."""
try:
return valid_numeric(value) * valid_numeric(arg)
except (ValueError, TypeError):
try:
return value * arg
except Exception:
return '' | [
"def",
"mul",
"(",
"value",
",",
"arg",
")",
":",
"try",
":",
"return",
"valid_numeric",
"(",
"value",
")",
"*",
"valid_numeric",
"(",
"arg",
")",
"except",
"(",
"ValueError",
",",
"TypeError",
")",
":",
"try",
":",
"return",
"value",
"*",
"arg",
"except",
"Exception",
":",
"return",
"''"
]
| Multiply the arg with the value. | [
"Multiply",
"the",
"arg",
"with",
"the",
"value",
"."
]
| python | test | 27.666667 |
project-ncl/pnc-cli | pnc_cli/buildconfigurations.py | https://github.com/project-ncl/pnc-cli/blob/3dc149bf84928f60a8044ac50b58bbaddd451902/pnc_cli/buildconfigurations.py#L278-L284 | def list_build_configurations_for_product(id=None, name=None, page_size=200, page_index=0, sort="", q=""):
"""
List all BuildConfigurations associated with the given Product.
"""
data = list_build_configurations_for_product_raw(id, name, page_size, page_index, sort, q)
if data:
return utils.format_json_list(data) | [
"def",
"list_build_configurations_for_product",
"(",
"id",
"=",
"None",
",",
"name",
"=",
"None",
",",
"page_size",
"=",
"200",
",",
"page_index",
"=",
"0",
",",
"sort",
"=",
"\"\"",
",",
"q",
"=",
"\"\"",
")",
":",
"data",
"=",
"list_build_configurations_for_product_raw",
"(",
"id",
",",
"name",
",",
"page_size",
",",
"page_index",
",",
"sort",
",",
"q",
")",
"if",
"data",
":",
"return",
"utils",
".",
"format_json_list",
"(",
"data",
")"
]
| List all BuildConfigurations associated with the given Product. | [
"List",
"all",
"BuildConfigurations",
"associated",
"with",
"the",
"given",
"Product",
"."
]
| python | train | 48 |
leonjza/filesmudge | filesmudge/smudge.py | https://github.com/leonjza/filesmudge/blob/ba519aa4df85751458faf68220964d3e2480b9fc/filesmudge/smudge.py#L107-L118 | def smudge(newtype, target):
"""
Smudge magic bytes with a known type
"""
db = smudge_db.get()
magic_bytes = db[newtype]['magic']
magic_offset = db[newtype]['offset']
_backup_bytes(target, magic_offset, len(magic_bytes))
_smudge_bytes(target, magic_offset, magic_bytes) | [
"def",
"smudge",
"(",
"newtype",
",",
"target",
")",
":",
"db",
"=",
"smudge_db",
".",
"get",
"(",
")",
"magic_bytes",
"=",
"db",
"[",
"newtype",
"]",
"[",
"'magic'",
"]",
"magic_offset",
"=",
"db",
"[",
"newtype",
"]",
"[",
"'offset'",
"]",
"_backup_bytes",
"(",
"target",
",",
"magic_offset",
",",
"len",
"(",
"magic_bytes",
")",
")",
"_smudge_bytes",
"(",
"target",
",",
"magic_offset",
",",
"magic_bytes",
")"
]
| Smudge magic bytes with a known type | [
"Smudge",
"magic",
"bytes",
"with",
"a",
"known",
"type"
]
| python | train | 24.75 |
AndrewAnnex/SpiceyPy | spiceypy/spiceypy.py | https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L13895-L13917 | def tipbod(ref, body, et):
"""
Return a 3x3 matrix that transforms positions in inertial
coordinates to positions in body-equator-and-prime-meridian
coordinates.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/tipbod_c.html
:param ref: ID of inertial reference frame to transform from.
:type ref: str
:param body: ID code of body.
:type body: int
:param et: Epoch of transformation.
:type et: float
:return: Transformation (position), inertial to prime meridian.
:rtype: 3x3-Element Array of floats
"""
ref = stypes.stringToCharP(ref)
body = ctypes.c_int(body)
et = ctypes.c_double(et)
retmatrix = stypes.emptyDoubleMatrix()
libspice.tipbod_c(ref, body, et, retmatrix)
return stypes.cMatrixToNumpy(retmatrix) | [
"def",
"tipbod",
"(",
"ref",
",",
"body",
",",
"et",
")",
":",
"ref",
"=",
"stypes",
".",
"stringToCharP",
"(",
"ref",
")",
"body",
"=",
"ctypes",
".",
"c_int",
"(",
"body",
")",
"et",
"=",
"ctypes",
".",
"c_double",
"(",
"et",
")",
"retmatrix",
"=",
"stypes",
".",
"emptyDoubleMatrix",
"(",
")",
"libspice",
".",
"tipbod_c",
"(",
"ref",
",",
"body",
",",
"et",
",",
"retmatrix",
")",
"return",
"stypes",
".",
"cMatrixToNumpy",
"(",
"retmatrix",
")"
]
| Return a 3x3 matrix that transforms positions in inertial
coordinates to positions in body-equator-and-prime-meridian
coordinates.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/tipbod_c.html
:param ref: ID of inertial reference frame to transform from.
:type ref: str
:param body: ID code of body.
:type body: int
:param et: Epoch of transformation.
:type et: float
:return: Transformation (position), inertial to prime meridian.
:rtype: 3x3-Element Array of floats | [
"Return",
"a",
"3x3",
"matrix",
"that",
"transforms",
"positions",
"in",
"inertial",
"coordinates",
"to",
"positions",
"in",
"body",
"-",
"equator",
"-",
"and",
"-",
"prime",
"-",
"meridian",
"coordinates",
"."
]
| python | train | 33.73913 |
keenlabs/KeenClient-Python | keen/__init__.py | https://github.com/keenlabs/KeenClient-Python/blob/266387c3376d1e000d117e17c45045ae3439d43f/keen/__init__.py#L385-L406 | def extraction(event_collection, timeframe=None, timezone=None, filters=None, latest=None, email=None,
property_names=None):
""" Performs a data extraction
Returns either a JSON object of events or a response
indicating an email will be sent to you with data.
:param event_collection: string, the name of the collection to query
:param timeframe: string or dict, the timeframe in which the events
happened example: "previous_7_days"
:param timezone: int, the timezone you'd like to use for the timeframe
and interval in seconds
:param filters: array of dict, contains the filters you'd like to apply to the data
example: [{"property_name":"device", "operator":"eq", "property_value":"iPhone"}]
:param latest: int, the number of most recent records you'd like to return
:param email: string, optional string containing an email address to email results to
:param property_names: string or list of strings, used to limit the properties returned
"""
_initialize_client_from_environment()
return _client.extraction(event_collection=event_collection, timeframe=timeframe, timezone=timezone,
filters=filters, latest=latest, email=email, property_names=property_names) | [
"def",
"extraction",
"(",
"event_collection",
",",
"timeframe",
"=",
"None",
",",
"timezone",
"=",
"None",
",",
"filters",
"=",
"None",
",",
"latest",
"=",
"None",
",",
"email",
"=",
"None",
",",
"property_names",
"=",
"None",
")",
":",
"_initialize_client_from_environment",
"(",
")",
"return",
"_client",
".",
"extraction",
"(",
"event_collection",
"=",
"event_collection",
",",
"timeframe",
"=",
"timeframe",
",",
"timezone",
"=",
"timezone",
",",
"filters",
"=",
"filters",
",",
"latest",
"=",
"latest",
",",
"email",
"=",
"email",
",",
"property_names",
"=",
"property_names",
")"
]
| Performs a data extraction
Returns either a JSON object of events or a response
indicating an email will be sent to you with data.
:param event_collection: string, the name of the collection to query
:param timeframe: string or dict, the timeframe in which the events
happened example: "previous_7_days"
:param timezone: int, the timezone you'd like to use for the timeframe
and interval in seconds
:param filters: array of dict, contains the filters you'd like to apply to the data
example: [{"property_name":"device", "operator":"eq", "property_value":"iPhone"}]
:param latest: int, the number of most recent records you'd like to return
:param email: string, optional string containing an email address to email results to
:param property_names: string or list of strings, used to limit the properties returned | [
"Performs",
"a",
"data",
"extraction"
]
| python | train | 56.954545 |
agile-geoscience/striplog | striplog/striplog.py | https://github.com/agile-geoscience/striplog/blob/8033b673a151f96c29802b43763e863519a3124c/striplog/striplog.py#L2124-L2145 | def invert(self, copy=False):
"""
Inverts the striplog, changing its order and the order of its contents.
Operates in place by default.
Args:
copy (bool): Whether to operate in place or make a copy.
Returns:
None if operating in-place, or an inverted copy of the striplog
if not.
"""
if copy:
return Striplog([i.invert(copy=True) for i in self])
else:
for i in self:
i.invert()
self.__sort()
o = self.order
self.order = {'depth': 'elevation', 'elevation': 'depth'}[o]
return | [
"def",
"invert",
"(",
"self",
",",
"copy",
"=",
"False",
")",
":",
"if",
"copy",
":",
"return",
"Striplog",
"(",
"[",
"i",
".",
"invert",
"(",
"copy",
"=",
"True",
")",
"for",
"i",
"in",
"self",
"]",
")",
"else",
":",
"for",
"i",
"in",
"self",
":",
"i",
".",
"invert",
"(",
")",
"self",
".",
"__sort",
"(",
")",
"o",
"=",
"self",
".",
"order",
"self",
".",
"order",
"=",
"{",
"'depth'",
":",
"'elevation'",
",",
"'elevation'",
":",
"'depth'",
"}",
"[",
"o",
"]",
"return"
]
| Inverts the striplog, changing its order and the order of its contents.
Operates in place by default.
Args:
copy (bool): Whether to operate in place or make a copy.
Returns:
None if operating in-place, or an inverted copy of the striplog
if not. | [
"Inverts",
"the",
"striplog",
"changing",
"its",
"order",
"and",
"the",
"order",
"of",
"its",
"contents",
"."
]
| python | test | 29.454545 |
e-dard/postcodes | postcodes.py | https://github.com/e-dard/postcodes/blob/d63c47b4ecd765bc2e4e6ba34bc0b8a796f44005/postcodes.py#L123-L127 | def _check_point(self, lat, lng):
""" Checks if latitude and longitude correct """
if abs(lat) > 90 or abs(lng) > 180:
msg = "Illegal lat and/or lng, (%s, %s) provided." % (lat, lng)
raise IllegalPointException(msg) | [
"def",
"_check_point",
"(",
"self",
",",
"lat",
",",
"lng",
")",
":",
"if",
"abs",
"(",
"lat",
")",
">",
"90",
"or",
"abs",
"(",
"lng",
")",
">",
"180",
":",
"msg",
"=",
"\"Illegal lat and/or lng, (%s, %s) provided.\"",
"%",
"(",
"lat",
",",
"lng",
")",
"raise",
"IllegalPointException",
"(",
"msg",
")"
]
| Checks if latitude and longitude correct | [
"Checks",
"if",
"latitude",
"and",
"longitude",
"correct"
]
| python | train | 50.2 |
kolypto/py-smsframework | smsframework/Gateway.py | https://github.com/kolypto/py-smsframework/blob/4f3d812711f5e2e037dc80c4014c815fe2d68a0b/smsframework/Gateway.py#L139-L177 | def send(self, message):
""" Send a message object
:type message: data.OutgoingMessage
:param message: The message to send
:rtype: data.OutgoingMessage
:returns: The sent message with populated fields
:raises AssertionError: wrong provider name encountered (returned by the router, or provided to OutgoingMessage)
:raises MessageSendError: generic errors
:raises AuthError: provider authentication failed
:raises LimitsError: sending limits exceeded
:raises CreditError: not enough money on the account
"""
# Which provider to use?
provider_name = self._default_provider # default
if message.provider is not None:
assert message.provider in self._providers, \
'Unknown provider specified in OutgoingMessage.provideer: {}'.format(provider_name)
provider = self.get_provider(message.provider)
else:
# Apply routing
if message.routing_values is not None: # Use the default provider when no routing values are given
# Routing values are present
provider_name = self.router(message, *message.routing_values) or self._default_provider
assert provider_name in self._providers, \
'Routing function returned an unknown provider name: {}'.format(provider_name)
provider = self.get_provider(provider_name)
# Set message provider name
message.provider = provider.name
# Send the message using the provider
message = provider.send(message)
# Emit the send event
self.onSend(message)
# Finish
return message | [
"def",
"send",
"(",
"self",
",",
"message",
")",
":",
"# Which provider to use?",
"provider_name",
"=",
"self",
".",
"_default_provider",
"# default",
"if",
"message",
".",
"provider",
"is",
"not",
"None",
":",
"assert",
"message",
".",
"provider",
"in",
"self",
".",
"_providers",
",",
"'Unknown provider specified in OutgoingMessage.provideer: {}'",
".",
"format",
"(",
"provider_name",
")",
"provider",
"=",
"self",
".",
"get_provider",
"(",
"message",
".",
"provider",
")",
"else",
":",
"# Apply routing",
"if",
"message",
".",
"routing_values",
"is",
"not",
"None",
":",
"# Use the default provider when no routing values are given",
"# Routing values are present",
"provider_name",
"=",
"self",
".",
"router",
"(",
"message",
",",
"*",
"message",
".",
"routing_values",
")",
"or",
"self",
".",
"_default_provider",
"assert",
"provider_name",
"in",
"self",
".",
"_providers",
",",
"'Routing function returned an unknown provider name: {}'",
".",
"format",
"(",
"provider_name",
")",
"provider",
"=",
"self",
".",
"get_provider",
"(",
"provider_name",
")",
"# Set message provider name",
"message",
".",
"provider",
"=",
"provider",
".",
"name",
"# Send the message using the provider",
"message",
"=",
"provider",
".",
"send",
"(",
"message",
")",
"# Emit the send event",
"self",
".",
"onSend",
"(",
"message",
")",
"# Finish",
"return",
"message"
]
| Send a message object
:type message: data.OutgoingMessage
:param message: The message to send
:rtype: data.OutgoingMessage
:returns: The sent message with populated fields
:raises AssertionError: wrong provider name encountered (returned by the router, or provided to OutgoingMessage)
:raises MessageSendError: generic errors
:raises AuthError: provider authentication failed
:raises LimitsError: sending limits exceeded
:raises CreditError: not enough money on the account | [
"Send",
"a",
"message",
"object"
]
| python | test | 44.230769 |
partofthething/ace | ace/smoother.py | https://github.com/partofthething/ace/blob/1593a49f3c2e845514323e9c36ee253fe77bac3c/ace/smoother.py#L188-L201 | def _update_mean_in_window(self):
"""
Compute mean in window the slow way. useful for first step.
Considers all values in window
See Also
--------
_add_observation_to_means : fast update of mean for single observation addition
_remove_observation_from_means : fast update of mean for single observation removal
"""
self._mean_x_in_window = numpy.mean(self._x_in_window)
self._mean_y_in_window = numpy.mean(self._y_in_window) | [
"def",
"_update_mean_in_window",
"(",
"self",
")",
":",
"self",
".",
"_mean_x_in_window",
"=",
"numpy",
".",
"mean",
"(",
"self",
".",
"_x_in_window",
")",
"self",
".",
"_mean_y_in_window",
"=",
"numpy",
".",
"mean",
"(",
"self",
".",
"_y_in_window",
")"
]
| Compute mean in window the slow way. useful for first step.
Considers all values in window
See Also
--------
_add_observation_to_means : fast update of mean for single observation addition
_remove_observation_from_means : fast update of mean for single observation removal | [
"Compute",
"mean",
"in",
"window",
"the",
"slow",
"way",
".",
"useful",
"for",
"first",
"step",
"."
]
| python | train | 35.285714 |
cds-astro/ipyaladin | ipyaladin/aladin_widget.py | https://github.com/cds-astro/ipyaladin/blob/a8fa9d3cca31b55dbd066a54d5007ceada8bae60/ipyaladin/aladin_widget.py#L173-L188 | def add_listener(self, listener_type, callback):
""" add a listener to the widget
Args:
listener_type: string that can either be 'objectHovered' or 'objClicked'
callback: python function"""
self.listener_type= listener_type
if listener_type == 'objectHovered':
self.listener_callback_source_hover= callback
elif listener_type == 'objectClicked':
self.listener_callback_source_click= callback
elif listener_type == 'click':
self.listener_callback_click= callback
elif listener_type == 'select':
self.listener_callback_select= callback
self.listener_flag= not self.listener_flag | [
"def",
"add_listener",
"(",
"self",
",",
"listener_type",
",",
"callback",
")",
":",
"self",
".",
"listener_type",
"=",
"listener_type",
"if",
"listener_type",
"==",
"'objectHovered'",
":",
"self",
".",
"listener_callback_source_hover",
"=",
"callback",
"elif",
"listener_type",
"==",
"'objectClicked'",
":",
"self",
".",
"listener_callback_source_click",
"=",
"callback",
"elif",
"listener_type",
"==",
"'click'",
":",
"self",
".",
"listener_callback_click",
"=",
"callback",
"elif",
"listener_type",
"==",
"'select'",
":",
"self",
".",
"listener_callback_select",
"=",
"callback",
"self",
".",
"listener_flag",
"=",
"not",
"self",
".",
"listener_flag"
]
| add a listener to the widget
Args:
listener_type: string that can either be 'objectHovered' or 'objClicked'
callback: python function | [
"add",
"a",
"listener",
"to",
"the",
"widget",
"Args",
":",
"listener_type",
":",
"string",
"that",
"can",
"either",
"be",
"objectHovered",
"or",
"objClicked",
"callback",
":",
"python",
"function"
]
| python | train | 44.4375 |
OSSOS/MOP | src/ossos/core/ossos/storage.py | https://github.com/OSSOS/MOP/blob/94f91d32ad5ec081d5a1ebd67604a838003465af/src/ossos/core/ossos/storage.py#L1465-L1512 | def get_mopheader(expnum, ccd, version='p', prefix=None):
"""
Retrieve the mopheader, either from cache or from vospace
@param expnum:
@param ccd:
@param version:
@param prefix:
@return: Header
"""
prefix = prefix is None and "" or prefix
mopheader_uri = dbimages_uri(expnum=expnum,
ccd=ccd,
version=version,
prefix=prefix,
ext='.mopheader')
if mopheader_uri in mopheaders:
return mopheaders[mopheader_uri]
filename = os.path.basename(mopheader_uri)
if os.access(filename, os.F_OK):
logger.debug("File already on disk: {}".format(filename))
mopheader_fpt = StringIO(open(filename, 'r').read())
else:
mopheader_fpt = StringIO(open_vos_or_local(mopheader_uri).read())
with warnings.catch_warnings():
warnings.simplefilter('ignore', AstropyUserWarning)
mopheader = fits.open(mopheader_fpt)
# add some values to the mopheader so it can be an astrom header too.
header = mopheader[0].header
try:
header['FWHM'] = get_fwhm(expnum, ccd)
except IOError:
header['FWHM'] = 10
header['SCALE'] = mopheader[0].header['PIXSCALE']
header['NAX1'] = header['NAXIS1']
header['NAX2'] = header['NAXIS2']
header['MOPversion'] = header['MOP_VER']
header['MJD_OBS_CENTER'] = str(Time(header['MJD-OBSC'],
format='mjd',
scale='utc', precision=5).replicate(format='mpc'))
header['MAXCOUNT'] = MAXCOUNT
mopheaders[mopheader_uri] = header
mopheader.close()
return mopheaders[mopheader_uri] | [
"def",
"get_mopheader",
"(",
"expnum",
",",
"ccd",
",",
"version",
"=",
"'p'",
",",
"prefix",
"=",
"None",
")",
":",
"prefix",
"=",
"prefix",
"is",
"None",
"and",
"\"\"",
"or",
"prefix",
"mopheader_uri",
"=",
"dbimages_uri",
"(",
"expnum",
"=",
"expnum",
",",
"ccd",
"=",
"ccd",
",",
"version",
"=",
"version",
",",
"prefix",
"=",
"prefix",
",",
"ext",
"=",
"'.mopheader'",
")",
"if",
"mopheader_uri",
"in",
"mopheaders",
":",
"return",
"mopheaders",
"[",
"mopheader_uri",
"]",
"filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"mopheader_uri",
")",
"if",
"os",
".",
"access",
"(",
"filename",
",",
"os",
".",
"F_OK",
")",
":",
"logger",
".",
"debug",
"(",
"\"File already on disk: {}\"",
".",
"format",
"(",
"filename",
")",
")",
"mopheader_fpt",
"=",
"StringIO",
"(",
"open",
"(",
"filename",
",",
"'r'",
")",
".",
"read",
"(",
")",
")",
"else",
":",
"mopheader_fpt",
"=",
"StringIO",
"(",
"open_vos_or_local",
"(",
"mopheader_uri",
")",
".",
"read",
"(",
")",
")",
"with",
"warnings",
".",
"catch_warnings",
"(",
")",
":",
"warnings",
".",
"simplefilter",
"(",
"'ignore'",
",",
"AstropyUserWarning",
")",
"mopheader",
"=",
"fits",
".",
"open",
"(",
"mopheader_fpt",
")",
"# add some values to the mopheader so it can be an astrom header too.",
"header",
"=",
"mopheader",
"[",
"0",
"]",
".",
"header",
"try",
":",
"header",
"[",
"'FWHM'",
"]",
"=",
"get_fwhm",
"(",
"expnum",
",",
"ccd",
")",
"except",
"IOError",
":",
"header",
"[",
"'FWHM'",
"]",
"=",
"10",
"header",
"[",
"'SCALE'",
"]",
"=",
"mopheader",
"[",
"0",
"]",
".",
"header",
"[",
"'PIXSCALE'",
"]",
"header",
"[",
"'NAX1'",
"]",
"=",
"header",
"[",
"'NAXIS1'",
"]",
"header",
"[",
"'NAX2'",
"]",
"=",
"header",
"[",
"'NAXIS2'",
"]",
"header",
"[",
"'MOPversion'",
"]",
"=",
"header",
"[",
"'MOP_VER'",
"]",
"header",
"[",
"'MJD_OBS_CENTER'",
"]",
"=",
"str",
"(",
"Time",
"(",
"header",
"[",
"'MJD-OBSC'",
"]",
",",
"format",
"=",
"'mjd'",
",",
"scale",
"=",
"'utc'",
",",
"precision",
"=",
"5",
")",
".",
"replicate",
"(",
"format",
"=",
"'mpc'",
")",
")",
"header",
"[",
"'MAXCOUNT'",
"]",
"=",
"MAXCOUNT",
"mopheaders",
"[",
"mopheader_uri",
"]",
"=",
"header",
"mopheader",
".",
"close",
"(",
")",
"return",
"mopheaders",
"[",
"mopheader_uri",
"]"
]
| Retrieve the mopheader, either from cache or from vospace
@param expnum:
@param ccd:
@param version:
@param prefix:
@return: Header | [
"Retrieve",
"the",
"mopheader",
"either",
"from",
"cache",
"or",
"from",
"vospace"
]
| python | train | 36.895833 |
anlutro/diay.py | diay/__init__.py | https://github.com/anlutro/diay.py/blob/78cfd2b53c8dca3dbac468d620eaa0bb7af08275/diay/__init__.py#L123-L134 | def register_lazy_provider_method(self, cls, method):
"""
Register a class method lazily as a provider.
"""
if 'provides' not in getattr(method, '__di__', {}):
raise DiayException('method %r is not a provider' % method)
@functools.wraps(method)
def wrapper(*args, **kwargs):
return getattr(self.get(cls), method.__name__)(*args, **kwargs)
self.factories[method.__di__['provides']] = wrapper | [
"def",
"register_lazy_provider_method",
"(",
"self",
",",
"cls",
",",
"method",
")",
":",
"if",
"'provides'",
"not",
"in",
"getattr",
"(",
"method",
",",
"'__di__'",
",",
"{",
"}",
")",
":",
"raise",
"DiayException",
"(",
"'method %r is not a provider'",
"%",
"method",
")",
"@",
"functools",
".",
"wraps",
"(",
"method",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"getattr",
"(",
"self",
".",
"get",
"(",
"cls",
")",
",",
"method",
".",
"__name__",
")",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"factories",
"[",
"method",
".",
"__di__",
"[",
"'provides'",
"]",
"]",
"=",
"wrapper"
]
| Register a class method lazily as a provider. | [
"Register",
"a",
"class",
"method",
"lazily",
"as",
"a",
"provider",
"."
]
| python | train | 38.416667 |
quantopian/empyrical | empyrical/stats.py | https://github.com/quantopian/empyrical/blob/badbdca75f5b293f28b5e947974894de041d6868/empyrical/stats.py#L1049-L1074 | def roll_alpha_beta(returns, factor_returns, window=10, **kwargs):
"""
Computes alpha and beta over a rolling window.
Parameters
----------
lhs : array-like
The first array to pass to the rolling alpha-beta.
rhs : array-like
The second array to pass to the rolling alpha-beta.
window : int
Size of the rolling window in terms of the periodicity of the data.
out : array-like, optional
Array to use as output buffer.
If not passed, a new array will be created.
**kwargs
Forwarded to :func:`~empyrical.alpha_beta`.
"""
returns, factor_returns = _aligned_series(returns, factor_returns)
return roll_alpha_beta_aligned(
returns,
factor_returns,
window=window,
**kwargs
) | [
"def",
"roll_alpha_beta",
"(",
"returns",
",",
"factor_returns",
",",
"window",
"=",
"10",
",",
"*",
"*",
"kwargs",
")",
":",
"returns",
",",
"factor_returns",
"=",
"_aligned_series",
"(",
"returns",
",",
"factor_returns",
")",
"return",
"roll_alpha_beta_aligned",
"(",
"returns",
",",
"factor_returns",
",",
"window",
"=",
"window",
",",
"*",
"*",
"kwargs",
")"
]
| Computes alpha and beta over a rolling window.
Parameters
----------
lhs : array-like
The first array to pass to the rolling alpha-beta.
rhs : array-like
The second array to pass to the rolling alpha-beta.
window : int
Size of the rolling window in terms of the periodicity of the data.
out : array-like, optional
Array to use as output buffer.
If not passed, a new array will be created.
**kwargs
Forwarded to :func:`~empyrical.alpha_beta`. | [
"Computes",
"alpha",
"and",
"beta",
"over",
"a",
"rolling",
"window",
"."
]
| python | train | 29.807692 |
cmbruns/pyopenvr | src/openvr/__init__.py | https://github.com/cmbruns/pyopenvr/blob/68395d26bb3df6ab1f0f059c38d441f962938be6/src/openvr/__init__.py#L3446-L3454 | def getApplicationKeyByProcessId(self, unProcessId, pchAppKeyBuffer, unAppKeyBufferLen):
"""
Returns the key of the application for the specified Process Id. The buffer should be at least
k_unMaxApplicationKeyLength in order to fit the key.
"""
fn = self.function_table.getApplicationKeyByProcessId
result = fn(unProcessId, pchAppKeyBuffer, unAppKeyBufferLen)
return result | [
"def",
"getApplicationKeyByProcessId",
"(",
"self",
",",
"unProcessId",
",",
"pchAppKeyBuffer",
",",
"unAppKeyBufferLen",
")",
":",
"fn",
"=",
"self",
".",
"function_table",
".",
"getApplicationKeyByProcessId",
"result",
"=",
"fn",
"(",
"unProcessId",
",",
"pchAppKeyBuffer",
",",
"unAppKeyBufferLen",
")",
"return",
"result"
]
| Returns the key of the application for the specified Process Id. The buffer should be at least
k_unMaxApplicationKeyLength in order to fit the key. | [
"Returns",
"the",
"key",
"of",
"the",
"application",
"for",
"the",
"specified",
"Process",
"Id",
".",
"The",
"buffer",
"should",
"be",
"at",
"least",
"k_unMaxApplicationKeyLength",
"in",
"order",
"to",
"fit",
"the",
"key",
"."
]
| python | train | 47 |
mordred-descriptor/mordred | mordred/_base/__init__.py | https://github.com/mordred-descriptor/mordred/blob/2848b088fd7b6735590242b5e22573babc724f10/mordred/_base/__init__.py#L68-L89 | def _Descriptor_from_json(self, obj):
"""Create Descriptor instance from json dict.
Parameters:
obj(dict): descriptor dict
Returns:
Descriptor: descriptor
"""
descs = getattr(self, "_all_descriptors", None)
if descs is None:
from mordred import descriptors
descs = {
cls.__name__: cls
for cls in get_descriptors_in_module(descriptors)
}
descs[ConstDescriptor.__name__] = ConstDescriptor
self._all_descriptors = descs
return _from_json(obj, descs) | [
"def",
"_Descriptor_from_json",
"(",
"self",
",",
"obj",
")",
":",
"descs",
"=",
"getattr",
"(",
"self",
",",
"\"_all_descriptors\"",
",",
"None",
")",
"if",
"descs",
"is",
"None",
":",
"from",
"mordred",
"import",
"descriptors",
"descs",
"=",
"{",
"cls",
".",
"__name__",
":",
"cls",
"for",
"cls",
"in",
"get_descriptors_in_module",
"(",
"descriptors",
")",
"}",
"descs",
"[",
"ConstDescriptor",
".",
"__name__",
"]",
"=",
"ConstDescriptor",
"self",
".",
"_all_descriptors",
"=",
"descs",
"return",
"_from_json",
"(",
"obj",
",",
"descs",
")"
]
| Create Descriptor instance from json dict.
Parameters:
obj(dict): descriptor dict
Returns:
Descriptor: descriptor | [
"Create",
"Descriptor",
"instance",
"from",
"json",
"dict",
"."
]
| python | test | 24.454545 |
rkargon/pixelsorter | pixelsorter/images2gif.py | https://github.com/rkargon/pixelsorter/blob/0775d1e487fbcb023e411e1818ba3290b0e8665e/pixelsorter/images2gif.py#L817-L822 | def altersingle(self, alpha, i, b, g, r):
"""Move neuron i towards biased (b,g,r) by factor alpha"""
n = self.network[i] # Alter hit neuron
n[0] -= (alpha * (n[0] - b))
n[1] -= (alpha * (n[1] - g))
n[2] -= (alpha * (n[2] - r)) | [
"def",
"altersingle",
"(",
"self",
",",
"alpha",
",",
"i",
",",
"b",
",",
"g",
",",
"r",
")",
":",
"n",
"=",
"self",
".",
"network",
"[",
"i",
"]",
"# Alter hit neuron",
"n",
"[",
"0",
"]",
"-=",
"(",
"alpha",
"*",
"(",
"n",
"[",
"0",
"]",
"-",
"b",
")",
")",
"n",
"[",
"1",
"]",
"-=",
"(",
"alpha",
"*",
"(",
"n",
"[",
"1",
"]",
"-",
"g",
")",
")",
"n",
"[",
"2",
"]",
"-=",
"(",
"alpha",
"*",
"(",
"n",
"[",
"2",
"]",
"-",
"r",
")",
")"
]
| Move neuron i towards biased (b,g,r) by factor alpha | [
"Move",
"neuron",
"i",
"towards",
"biased",
"(",
"b",
"g",
"r",
")",
"by",
"factor",
"alpha"
]
| python | train | 43.666667 |
abingham/spor | src/spor/cli.py | https://github.com/abingham/spor/blob/673c8c36c99a4b9ea882f002bfb529f1eca89126/src/spor/cli.py#L155-L169 | def _launch_editor(starting_text=''):
"Launch editor, let user write text, then return that text."
# TODO: What is a reasonable default for windows? Does this approach even
# make sense on windows?
editor = os.environ.get('EDITOR', 'vim')
with tempfile.TemporaryDirectory() as dirname:
filename = pathlib.Path(dirname) / 'metadata.yml'
with filename.open(mode='wt') as handle:
handle.write(starting_text)
subprocess.call([editor, filename])
with filename.open(mode='rt') as handle:
text = handle.read()
return text | [
"def",
"_launch_editor",
"(",
"starting_text",
"=",
"''",
")",
":",
"# TODO: What is a reasonable default for windows? Does this approach even",
"# make sense on windows?",
"editor",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'EDITOR'",
",",
"'vim'",
")",
"with",
"tempfile",
".",
"TemporaryDirectory",
"(",
")",
"as",
"dirname",
":",
"filename",
"=",
"pathlib",
".",
"Path",
"(",
"dirname",
")",
"/",
"'metadata.yml'",
"with",
"filename",
".",
"open",
"(",
"mode",
"=",
"'wt'",
")",
"as",
"handle",
":",
"handle",
".",
"write",
"(",
"starting_text",
")",
"subprocess",
".",
"call",
"(",
"[",
"editor",
",",
"filename",
"]",
")",
"with",
"filename",
".",
"open",
"(",
"mode",
"=",
"'rt'",
")",
"as",
"handle",
":",
"text",
"=",
"handle",
".",
"read",
"(",
")",
"return",
"text"
]
| Launch editor, let user write text, then return that text. | [
"Launch",
"editor",
"let",
"user",
"write",
"text",
"then",
"return",
"that",
"text",
"."
]
| python | train | 38.8 |
proteanhq/protean | src/protean/core/provider/__init__.py | https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/core/provider/__init__.py#L37-L44 | def get_provider(self, provider_name='default'):
"""Fetch provider with the name specified in Configuration file"""
try:
if self._providers is None:
self._providers = self._initialize_providers()
return self._providers[provider_name]
except KeyError:
raise AssertionError(f'No Provider registered with name {provider_name}') | [
"def",
"get_provider",
"(",
"self",
",",
"provider_name",
"=",
"'default'",
")",
":",
"try",
":",
"if",
"self",
".",
"_providers",
"is",
"None",
":",
"self",
".",
"_providers",
"=",
"self",
".",
"_initialize_providers",
"(",
")",
"return",
"self",
".",
"_providers",
"[",
"provider_name",
"]",
"except",
"KeyError",
":",
"raise",
"AssertionError",
"(",
"f'No Provider registered with name {provider_name}'",
")"
]
| Fetch provider with the name specified in Configuration file | [
"Fetch",
"provider",
"with",
"the",
"name",
"specified",
"in",
"Configuration",
"file"
]
| python | train | 49.125 |
log2timeline/plaso | plaso/formatters/mediator.py | https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/formatters/mediator.py#L36-L54 | def _GetWinevtRcDatabaseReader(self):
"""Opens the Windows Event Log resource database reader.
Returns:
WinevtResourcesSqlite3DatabaseReader: Windows Event Log resource
database reader or None.
"""
if not self._winevt_database_reader and self._data_location:
database_path = os.path.join(
self._data_location, self._WINEVT_RC_DATABASE)
if not os.path.isfile(database_path):
return None
self._winevt_database_reader = (
winevt_rc.WinevtResourcesSqlite3DatabaseReader())
if not self._winevt_database_reader.Open(database_path):
self._winevt_database_reader = None
return self._winevt_database_reader | [
"def",
"_GetWinevtRcDatabaseReader",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_winevt_database_reader",
"and",
"self",
".",
"_data_location",
":",
"database_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_data_location",
",",
"self",
".",
"_WINEVT_RC_DATABASE",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"database_path",
")",
":",
"return",
"None",
"self",
".",
"_winevt_database_reader",
"=",
"(",
"winevt_rc",
".",
"WinevtResourcesSqlite3DatabaseReader",
"(",
")",
")",
"if",
"not",
"self",
".",
"_winevt_database_reader",
".",
"Open",
"(",
"database_path",
")",
":",
"self",
".",
"_winevt_database_reader",
"=",
"None",
"return",
"self",
".",
"_winevt_database_reader"
]
| Opens the Windows Event Log resource database reader.
Returns:
WinevtResourcesSqlite3DatabaseReader: Windows Event Log resource
database reader or None. | [
"Opens",
"the",
"Windows",
"Event",
"Log",
"resource",
"database",
"reader",
"."
]
| python | train | 35.684211 |
cokelaer/spectrum | src/spectrum/criteria.py | https://github.com/cokelaer/spectrum/blob/bad6c32e3f10e185098748f67bb421b378b06afe/src/spectrum/criteria.py#L232-L242 | def MDL(N, rho, k):
r"""Minimum Description Length
.. math:: MDL(k) = N log \rho_k + p \log N
:validation: results
"""
from numpy import log
#p = arange(1, len(rho)+1)
mdl = N* log(rho) + k * log(N)
return mdl | [
"def",
"MDL",
"(",
"N",
",",
"rho",
",",
"k",
")",
":",
"from",
"numpy",
"import",
"log",
"#p = arange(1, len(rho)+1)",
"mdl",
"=",
"N",
"*",
"log",
"(",
"rho",
")",
"+",
"k",
"*",
"log",
"(",
"N",
")",
"return",
"mdl"
]
| r"""Minimum Description Length
.. math:: MDL(k) = N log \rho_k + p \log N
:validation: results | [
"r",
"Minimum",
"Description",
"Length"
]
| python | valid | 21.181818 |
genicam/harvesters | src/harvesters/core.py | https://github.com/genicam/harvesters/blob/c3314a7f9d320bbf943e599aabac02521cd8e80b/src/harvesters/core.py#L679-L690 | def data_format_value(self):
"""
:return: The data type of the data component as integer value.
"""
try:
if self._part:
value = self._part.data_format
else:
value = self._buffer.pixel_format
except InvalidParameterException:
value = self._node_map.PixelFormat.value
return value | [
"def",
"data_format_value",
"(",
"self",
")",
":",
"try",
":",
"if",
"self",
".",
"_part",
":",
"value",
"=",
"self",
".",
"_part",
".",
"data_format",
"else",
":",
"value",
"=",
"self",
".",
"_buffer",
".",
"pixel_format",
"except",
"InvalidParameterException",
":",
"value",
"=",
"self",
".",
"_node_map",
".",
"PixelFormat",
".",
"value",
"return",
"value"
]
| :return: The data type of the data component as integer value. | [
":",
"return",
":",
"The",
"data",
"type",
"of",
"the",
"data",
"component",
"as",
"integer",
"value",
"."
]
| python | valid | 31.916667 |
Maplecroft/Winston | winston/stats.py | https://github.com/Maplecroft/Winston/blob/d70394c60d5b56d8b374b4db2240394dfd45cfa8/winston/stats.py#L24-L117 | def summary(raster, geometry=None, all_touched=False, mean_only=False,
bounds=None, exclude_nodata_value=True):
"""Return ``ST_SummaryStats`` style stats for the given raster.
If ``geometry`` is provided, we mask the raster with the given geometry and
return the stats for the intersection. The parameter can be a GeoJSON-like
object, a WKT string, or a Shapely geometry.
If ``all_touched`` is set, we include every pixel that is touched by the
given geometry. If set to ``False``, we only include pixels that are
"mostly" inside the given geometry (the calculation is done by Rasterio).
If ``mean_only`` is ``True`` we only return the mean value of the pixels,
not the full set of stats.
If ``bounds`` is passed, it should be a two-tuple of (min, max) to use for
filtering raster pixels. If not provided, we exclude anything equal to the
raster no data value.
If ``mean_only`` is ``False``, we return a ``namedtuple`` representing the
stats. All other attributes should be obvious and are consistent with
PostGIS (``min``, ``max``, ``std``, etc).
If ``mean_only`` is ``True``, we simply return a ``float`` or ``None``
representing the mean value of the matching pixels.
The ``exclude_nodata_value`` is consistent with ``ST_SummaryStats`` in that
if it's ``True`` (default) we only count non-nodata pixels (or those pixels
within ``bounds`` if defined). If it's ``False`` we return the count of all
pixels.
"""
def no_result(mean_only):
if mean_only:
return None
else:
return Summary(None, None, None, None, None, None)
try:
if geometry:
# If it's a string, assume WKT
if isinstance(geometry, six.string_types):
geometry = wkt.loads(geometry)
# If not already GeoJSON, assume it's a Shapely shape
if not isinstance(geometry, dict):
geojson = mapping(geometry)
else:
geojson = geometry
geometry = shape(geometry)
result, _ = mask(
raster, [geojson], crop=True, all_touched=all_touched,
)
pixels = result.data.flatten()
else:
pixels = raster.read(1).flatten()
except ValueError:
return no_result(mean_only)
raster_shape = raster_to_shape(raster)
if not raster_shape.contains(geometry):
log.warning(
'Geometry {} is not fully contained by the source raster'.format(
geometry,
)
)
if bounds:
score_mask = numpy.logical_and(
numpy.greater_equal(pixels, bounds[0]),
numpy.less_equal(pixels, bounds[1]),
)
else:
score_mask = numpy.not_equal(pixels, raster.nodata),
scored_pixels = numpy.extract(score_mask, pixels)
if len(scored_pixels):
if mean_only:
return scored_pixels.mean()
else:
if exclude_nodata_value:
count = len(scored_pixels)
else:
count = len(pixels)
return Summary(
count,
scored_pixels.sum(),
scored_pixels.mean(),
scored_pixels.min(),
scored_pixels.max(),
scored_pixels.std(),
)
else:
return no_result(mean_only) | [
"def",
"summary",
"(",
"raster",
",",
"geometry",
"=",
"None",
",",
"all_touched",
"=",
"False",
",",
"mean_only",
"=",
"False",
",",
"bounds",
"=",
"None",
",",
"exclude_nodata_value",
"=",
"True",
")",
":",
"def",
"no_result",
"(",
"mean_only",
")",
":",
"if",
"mean_only",
":",
"return",
"None",
"else",
":",
"return",
"Summary",
"(",
"None",
",",
"None",
",",
"None",
",",
"None",
",",
"None",
",",
"None",
")",
"try",
":",
"if",
"geometry",
":",
"# If it's a string, assume WKT",
"if",
"isinstance",
"(",
"geometry",
",",
"six",
".",
"string_types",
")",
":",
"geometry",
"=",
"wkt",
".",
"loads",
"(",
"geometry",
")",
"# If not already GeoJSON, assume it's a Shapely shape",
"if",
"not",
"isinstance",
"(",
"geometry",
",",
"dict",
")",
":",
"geojson",
"=",
"mapping",
"(",
"geometry",
")",
"else",
":",
"geojson",
"=",
"geometry",
"geometry",
"=",
"shape",
"(",
"geometry",
")",
"result",
",",
"_",
"=",
"mask",
"(",
"raster",
",",
"[",
"geojson",
"]",
",",
"crop",
"=",
"True",
",",
"all_touched",
"=",
"all_touched",
",",
")",
"pixels",
"=",
"result",
".",
"data",
".",
"flatten",
"(",
")",
"else",
":",
"pixels",
"=",
"raster",
".",
"read",
"(",
"1",
")",
".",
"flatten",
"(",
")",
"except",
"ValueError",
":",
"return",
"no_result",
"(",
"mean_only",
")",
"raster_shape",
"=",
"raster_to_shape",
"(",
"raster",
")",
"if",
"not",
"raster_shape",
".",
"contains",
"(",
"geometry",
")",
":",
"log",
".",
"warning",
"(",
"'Geometry {} is not fully contained by the source raster'",
".",
"format",
"(",
"geometry",
",",
")",
")",
"if",
"bounds",
":",
"score_mask",
"=",
"numpy",
".",
"logical_and",
"(",
"numpy",
".",
"greater_equal",
"(",
"pixels",
",",
"bounds",
"[",
"0",
"]",
")",
",",
"numpy",
".",
"less_equal",
"(",
"pixels",
",",
"bounds",
"[",
"1",
"]",
")",
",",
")",
"else",
":",
"score_mask",
"=",
"numpy",
".",
"not_equal",
"(",
"pixels",
",",
"raster",
".",
"nodata",
")",
",",
"scored_pixels",
"=",
"numpy",
".",
"extract",
"(",
"score_mask",
",",
"pixels",
")",
"if",
"len",
"(",
"scored_pixels",
")",
":",
"if",
"mean_only",
":",
"return",
"scored_pixels",
".",
"mean",
"(",
")",
"else",
":",
"if",
"exclude_nodata_value",
":",
"count",
"=",
"len",
"(",
"scored_pixels",
")",
"else",
":",
"count",
"=",
"len",
"(",
"pixels",
")",
"return",
"Summary",
"(",
"count",
",",
"scored_pixels",
".",
"sum",
"(",
")",
",",
"scored_pixels",
".",
"mean",
"(",
")",
",",
"scored_pixels",
".",
"min",
"(",
")",
",",
"scored_pixels",
".",
"max",
"(",
")",
",",
"scored_pixels",
".",
"std",
"(",
")",
",",
")",
"else",
":",
"return",
"no_result",
"(",
"mean_only",
")"
]
| Return ``ST_SummaryStats`` style stats for the given raster.
If ``geometry`` is provided, we mask the raster with the given geometry and
return the stats for the intersection. The parameter can be a GeoJSON-like
object, a WKT string, or a Shapely geometry.
If ``all_touched`` is set, we include every pixel that is touched by the
given geometry. If set to ``False``, we only include pixels that are
"mostly" inside the given geometry (the calculation is done by Rasterio).
If ``mean_only`` is ``True`` we only return the mean value of the pixels,
not the full set of stats.
If ``bounds`` is passed, it should be a two-tuple of (min, max) to use for
filtering raster pixels. If not provided, we exclude anything equal to the
raster no data value.
If ``mean_only`` is ``False``, we return a ``namedtuple`` representing the
stats. All other attributes should be obvious and are consistent with
PostGIS (``min``, ``max``, ``std``, etc).
If ``mean_only`` is ``True``, we simply return a ``float`` or ``None``
representing the mean value of the matching pixels.
The ``exclude_nodata_value`` is consistent with ``ST_SummaryStats`` in that
if it's ``True`` (default) we only count non-nodata pixels (or those pixels
within ``bounds`` if defined). If it's ``False`` we return the count of all
pixels. | [
"Return",
"ST_SummaryStats",
"style",
"stats",
"for",
"the",
"given",
"raster",
"."
]
| python | train | 35.62766 |
PmagPy/PmagPy | programs/demag_gui.py | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/demag_gui.py#L7763-L7800 | def on_equalarea_specimen_select(self, event):
"""
Get mouse position on double click find the nearest interpretation
to the mouse
position then select that interpretation
Parameters
----------
event : the wx Mouseevent for that click
Alters
------
current_fit
"""
if not self.specimen_EA_xdata or not self.specimen_EA_ydata:
return
pos = event.GetPosition()
width, height = self.canvas2.get_width_height()
pos[1] = height - pos[1]
xpick_data, ypick_data = pos
xdata_org = self.specimen_EA_xdata
ydata_org = self.specimen_EA_ydata
data_corrected = self.specimen_eqarea.transData.transform(
vstack([xdata_org, ydata_org]).T)
xdata, ydata = data_corrected.T
xdata = list(map(float, xdata))
ydata = list(map(float, ydata))
e = 4e0
index = None
for i, (x, y) in enumerate(zip(xdata, ydata)):
if 0 < sqrt((x-xpick_data)**2. + (y-ypick_data)**2.) < e:
index = i
break
if index != None:
self.fit_box.SetSelection(index)
self.draw_figure(self.s, True)
self.on_select_fit(event) | [
"def",
"on_equalarea_specimen_select",
"(",
"self",
",",
"event",
")",
":",
"if",
"not",
"self",
".",
"specimen_EA_xdata",
"or",
"not",
"self",
".",
"specimen_EA_ydata",
":",
"return",
"pos",
"=",
"event",
".",
"GetPosition",
"(",
")",
"width",
",",
"height",
"=",
"self",
".",
"canvas2",
".",
"get_width_height",
"(",
")",
"pos",
"[",
"1",
"]",
"=",
"height",
"-",
"pos",
"[",
"1",
"]",
"xpick_data",
",",
"ypick_data",
"=",
"pos",
"xdata_org",
"=",
"self",
".",
"specimen_EA_xdata",
"ydata_org",
"=",
"self",
".",
"specimen_EA_ydata",
"data_corrected",
"=",
"self",
".",
"specimen_eqarea",
".",
"transData",
".",
"transform",
"(",
"vstack",
"(",
"[",
"xdata_org",
",",
"ydata_org",
"]",
")",
".",
"T",
")",
"xdata",
",",
"ydata",
"=",
"data_corrected",
".",
"T",
"xdata",
"=",
"list",
"(",
"map",
"(",
"float",
",",
"xdata",
")",
")",
"ydata",
"=",
"list",
"(",
"map",
"(",
"float",
",",
"ydata",
")",
")",
"e",
"=",
"4e0",
"index",
"=",
"None",
"for",
"i",
",",
"(",
"x",
",",
"y",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"xdata",
",",
"ydata",
")",
")",
":",
"if",
"0",
"<",
"sqrt",
"(",
"(",
"x",
"-",
"xpick_data",
")",
"**",
"2.",
"+",
"(",
"y",
"-",
"ypick_data",
")",
"**",
"2.",
")",
"<",
"e",
":",
"index",
"=",
"i",
"break",
"if",
"index",
"!=",
"None",
":",
"self",
".",
"fit_box",
".",
"SetSelection",
"(",
"index",
")",
"self",
".",
"draw_figure",
"(",
"self",
".",
"s",
",",
"True",
")",
"self",
".",
"on_select_fit",
"(",
"event",
")"
]
| Get mouse position on double click find the nearest interpretation
to the mouse
position then select that interpretation
Parameters
----------
event : the wx Mouseevent for that click
Alters
------
current_fit | [
"Get",
"mouse",
"position",
"on",
"double",
"click",
"find",
"the",
"nearest",
"interpretation",
"to",
"the",
"mouse",
"position",
"then",
"select",
"that",
"interpretation"
]
| python | train | 32.815789 |
AndrewAnnex/SpiceyPy | spiceypy/spiceypy.py | https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L9069-L9088 | def ordc(item, inset):
"""
The function returns the ordinal position of any given item in a
character set. If the item does not appear in the set, the function
returns -1.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ordc_c.html
:param item: An item to locate within a set.
:type item: str
:param inset: A set to search for a given item.
:type inset: SpiceCharCell
:return: the ordinal position of item within the set
:rtype: int
"""
assert isinstance(inset, stypes.SpiceCell)
assert inset.is_char()
assert isinstance(item, str)
item = stypes.stringToCharP(item)
return libspice.ordc_c(item, ctypes.byref(inset)) | [
"def",
"ordc",
"(",
"item",
",",
"inset",
")",
":",
"assert",
"isinstance",
"(",
"inset",
",",
"stypes",
".",
"SpiceCell",
")",
"assert",
"inset",
".",
"is_char",
"(",
")",
"assert",
"isinstance",
"(",
"item",
",",
"str",
")",
"item",
"=",
"stypes",
".",
"stringToCharP",
"(",
"item",
")",
"return",
"libspice",
".",
"ordc_c",
"(",
"item",
",",
"ctypes",
".",
"byref",
"(",
"inset",
")",
")"
]
| The function returns the ordinal position of any given item in a
character set. If the item does not appear in the set, the function
returns -1.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ordc_c.html
:param item: An item to locate within a set.
:type item: str
:param inset: A set to search for a given item.
:type inset: SpiceCharCell
:return: the ordinal position of item within the set
:rtype: int | [
"The",
"function",
"returns",
"the",
"ordinal",
"position",
"of",
"any",
"given",
"item",
"in",
"a",
"character",
"set",
".",
"If",
"the",
"item",
"does",
"not",
"appear",
"in",
"the",
"set",
"the",
"function",
"returns",
"-",
"1",
"."
]
| python | train | 33.75 |
asottile/reorder_python_imports | reorder_python_imports.py | https://github.com/asottile/reorder_python_imports/blob/bc7b5b2f0fde191c9d0121588ef9bbb79f8e5e21/reorder_python_imports.py#L148-L164 | def separate_comma_imports(partitions):
"""Turns `import a, b` into `import a` and `import b`"""
def _inner():
for partition in partitions:
if partition.code_type is CodeType.IMPORT:
import_obj = import_obj_from_str(partition.src)
if import_obj.has_multiple_imports:
for new_import_obj in import_obj.split_imports():
yield CodePartition(
CodeType.IMPORT, new_import_obj.to_text(),
)
else:
yield partition
else:
yield partition
return list(_inner()) | [
"def",
"separate_comma_imports",
"(",
"partitions",
")",
":",
"def",
"_inner",
"(",
")",
":",
"for",
"partition",
"in",
"partitions",
":",
"if",
"partition",
".",
"code_type",
"is",
"CodeType",
".",
"IMPORT",
":",
"import_obj",
"=",
"import_obj_from_str",
"(",
"partition",
".",
"src",
")",
"if",
"import_obj",
".",
"has_multiple_imports",
":",
"for",
"new_import_obj",
"in",
"import_obj",
".",
"split_imports",
"(",
")",
":",
"yield",
"CodePartition",
"(",
"CodeType",
".",
"IMPORT",
",",
"new_import_obj",
".",
"to_text",
"(",
")",
",",
")",
"else",
":",
"yield",
"partition",
"else",
":",
"yield",
"partition",
"return",
"list",
"(",
"_inner",
"(",
")",
")"
]
| Turns `import a, b` into `import a` and `import b` | [
"Turns",
"import",
"a",
"b",
"into",
"import",
"a",
"and",
"import",
"b"
]
| python | train | 38.647059 |
phoebe-project/phoebe2 | phoebe/backend/etvs.py | https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/backend/etvs.py#L15-L53 | def crossing(b, component, time, dynamics_method='keplerian', ltte=True, tol=1e-4, maxiter=1000):
"""
tol in days
"""
def projected_separation_sq(time, b, dynamics_method, cind1, cind2, ltte=True):
"""
"""
#print "*** projected_separation_sq", time, dynamics_method, cind1, cind2, ltte
times = np.array([time])
if dynamics_method in ['nbody', 'rebound']:
# TODO: make sure that this takes systemic velocity and corrects positions and velocities (including ltte effects if enabled)
ts, xs, ys, zs, vxs, vys, vzs = dynamics.nbody.dynamics_from_bundle(b, times, compute=None, ltte=ltte)
elif dynamics_method=='bs':
ts, xs, ys, zs, vxs, vys, vzs = dynamics.nbody.dynamics_from_bundle_bs(b, times, compute, ltte=ltte)
elif dynamics_method=='keplerian':
# TODO: make sure that this takes systemic velocity and corrects positions and velocities (including ltte effects if enabled)
ts, xs, ys, zs, vxs, vys, vzs = dynamics.keplerian.dynamics_from_bundle(b, times, compute=None, ltte=ltte, return_euler=False)
else:
raise NotImplementedError
return (xs[cind2][0]-xs[cind1][0])**2 + (ys[cind2][0]-ys[cind1][0])**2
# TODO: optimize this by allowing to pass cind1 and cind2 directly (and fallback to this if they aren't)
starrefs = b.hierarchy.get_stars()
cind1 = starrefs.index(component)
cind2 = starrefs.index(b.hierarchy.get_sibling_of(component))
# TODO: provide options for tol and maxiter (in the frontend computeoptionsp)?
return newton(projected_separation_sq, x0=time, args=(b, dynamics_method, cind1, cind2, ltte), tol=tol, maxiter=maxiter) | [
"def",
"crossing",
"(",
"b",
",",
"component",
",",
"time",
",",
"dynamics_method",
"=",
"'keplerian'",
",",
"ltte",
"=",
"True",
",",
"tol",
"=",
"1e-4",
",",
"maxiter",
"=",
"1000",
")",
":",
"def",
"projected_separation_sq",
"(",
"time",
",",
"b",
",",
"dynamics_method",
",",
"cind1",
",",
"cind2",
",",
"ltte",
"=",
"True",
")",
":",
"\"\"\"\n \"\"\"",
"#print \"*** projected_separation_sq\", time, dynamics_method, cind1, cind2, ltte",
"times",
"=",
"np",
".",
"array",
"(",
"[",
"time",
"]",
")",
"if",
"dynamics_method",
"in",
"[",
"'nbody'",
",",
"'rebound'",
"]",
":",
"# TODO: make sure that this takes systemic velocity and corrects positions and velocities (including ltte effects if enabled)",
"ts",
",",
"xs",
",",
"ys",
",",
"zs",
",",
"vxs",
",",
"vys",
",",
"vzs",
"=",
"dynamics",
".",
"nbody",
".",
"dynamics_from_bundle",
"(",
"b",
",",
"times",
",",
"compute",
"=",
"None",
",",
"ltte",
"=",
"ltte",
")",
"elif",
"dynamics_method",
"==",
"'bs'",
":",
"ts",
",",
"xs",
",",
"ys",
",",
"zs",
",",
"vxs",
",",
"vys",
",",
"vzs",
"=",
"dynamics",
".",
"nbody",
".",
"dynamics_from_bundle_bs",
"(",
"b",
",",
"times",
",",
"compute",
",",
"ltte",
"=",
"ltte",
")",
"elif",
"dynamics_method",
"==",
"'keplerian'",
":",
"# TODO: make sure that this takes systemic velocity and corrects positions and velocities (including ltte effects if enabled)",
"ts",
",",
"xs",
",",
"ys",
",",
"zs",
",",
"vxs",
",",
"vys",
",",
"vzs",
"=",
"dynamics",
".",
"keplerian",
".",
"dynamics_from_bundle",
"(",
"b",
",",
"times",
",",
"compute",
"=",
"None",
",",
"ltte",
"=",
"ltte",
",",
"return_euler",
"=",
"False",
")",
"else",
":",
"raise",
"NotImplementedError",
"return",
"(",
"xs",
"[",
"cind2",
"]",
"[",
"0",
"]",
"-",
"xs",
"[",
"cind1",
"]",
"[",
"0",
"]",
")",
"**",
"2",
"+",
"(",
"ys",
"[",
"cind2",
"]",
"[",
"0",
"]",
"-",
"ys",
"[",
"cind1",
"]",
"[",
"0",
"]",
")",
"**",
"2",
"# TODO: optimize this by allowing to pass cind1 and cind2 directly (and fallback to this if they aren't)",
"starrefs",
"=",
"b",
".",
"hierarchy",
".",
"get_stars",
"(",
")",
"cind1",
"=",
"starrefs",
".",
"index",
"(",
"component",
")",
"cind2",
"=",
"starrefs",
".",
"index",
"(",
"b",
".",
"hierarchy",
".",
"get_sibling_of",
"(",
"component",
")",
")",
"# TODO: provide options for tol and maxiter (in the frontend computeoptionsp)?",
"return",
"newton",
"(",
"projected_separation_sq",
",",
"x0",
"=",
"time",
",",
"args",
"=",
"(",
"b",
",",
"dynamics_method",
",",
"cind1",
",",
"cind2",
",",
"ltte",
")",
",",
"tol",
"=",
"tol",
",",
"maxiter",
"=",
"maxiter",
")"
]
| tol in days | [
"tol",
"in",
"days"
]
| python | train | 43.538462 |
funilrys/PyFunceble | PyFunceble/prints.py | https://github.com/funilrys/PyFunceble/blob/cdf69cbde120199171f7158e1c33635753e6e2f5/PyFunceble/prints.py#L484-L506 | def _size_from_header(cls, header):
"""
Get the size of each columns from the header.
:param header:
The header template we have to get the size from.
:type header: dict
:return: The maximal size of the each data to print.
:rtype: list
"""
# We initiate the result we are going to return.
result = []
for data in header:
# We lopp through the header.
# And we append the size to our result.
result.append(header[data])
# We return the result.
return result | [
"def",
"_size_from_header",
"(",
"cls",
",",
"header",
")",
":",
"# We initiate the result we are going to return.",
"result",
"=",
"[",
"]",
"for",
"data",
"in",
"header",
":",
"# We lopp through the header.",
"# And we append the size to our result.",
"result",
".",
"append",
"(",
"header",
"[",
"data",
"]",
")",
"# We return the result.",
"return",
"result"
]
| Get the size of each columns from the header.
:param header:
The header template we have to get the size from.
:type header: dict
:return: The maximal size of the each data to print.
:rtype: list | [
"Get",
"the",
"size",
"of",
"each",
"columns",
"from",
"the",
"header",
"."
]
| python | test | 25.391304 |
awslabs/aws-sam-cli | samcli/local/lambda_service/lambda_error_responses.py | https://github.com/awslabs/aws-sam-cli/blob/c05af5e7378c6f05f7d82ad3f0bca17204177db6/samcli/local/lambda_service/lambda_error_responses.py#L158-L179 | def generic_path_not_found(*args):
"""
Creates a Lambda Service Generic PathNotFound Response
Parameters
----------
args list
List of arguments Flask passes to the method
Returns
-------
Flask.Response
A response object representing the GenericPathNotFound Error
"""
exception_tuple = LambdaErrorResponses.PathNotFoundException
return BaseLocalService.service_response(
LambdaErrorResponses._construct_error_response_body(
LambdaErrorResponses.LOCAL_SERVICE_ERROR, "PathNotFoundException"),
LambdaErrorResponses._construct_headers(exception_tuple[0]),
exception_tuple[1]
) | [
"def",
"generic_path_not_found",
"(",
"*",
"args",
")",
":",
"exception_tuple",
"=",
"LambdaErrorResponses",
".",
"PathNotFoundException",
"return",
"BaseLocalService",
".",
"service_response",
"(",
"LambdaErrorResponses",
".",
"_construct_error_response_body",
"(",
"LambdaErrorResponses",
".",
"LOCAL_SERVICE_ERROR",
",",
"\"PathNotFoundException\"",
")",
",",
"LambdaErrorResponses",
".",
"_construct_headers",
"(",
"exception_tuple",
"[",
"0",
"]",
")",
",",
"exception_tuple",
"[",
"1",
"]",
")"
]
| Creates a Lambda Service Generic PathNotFound Response
Parameters
----------
args list
List of arguments Flask passes to the method
Returns
-------
Flask.Response
A response object representing the GenericPathNotFound Error | [
"Creates",
"a",
"Lambda",
"Service",
"Generic",
"PathNotFound",
"Response"
]
| python | train | 33 |
LuminosoInsight/luminoso-api-client-python | luminoso_api/v5_client.py | https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v5_client.py#L310-L338 | def wait_for_build(self, interval=5, path=None):
"""
A convenience method designed to inform you when a project build has
completed. It polls the API every `interval` seconds until there is
not a build running. At that point, it returns the "last_build_info"
field of the project record if the build succeeded, and raises a
LuminosoError with the field as its message if the build failed.
If a `path` is not specified, this method will assume that its URL is
the URL for the project. Otherwise, it will use the specified path
(which should be "/projects/<project_id>/").
"""
path = path or ''
start = time.time()
next_log = 0
while True:
response = self.get(path)['last_build_info']
if not response:
raise ValueError('This project is not building!')
if response['stop_time']:
if response['success']:
return response
else:
raise LuminosoError(response)
elapsed = time.time() - start
if elapsed > next_log:
logger.info('Still waiting (%d seconds elapsed).', next_log)
next_log += 120
time.sleep(interval) | [
"def",
"wait_for_build",
"(",
"self",
",",
"interval",
"=",
"5",
",",
"path",
"=",
"None",
")",
":",
"path",
"=",
"path",
"or",
"''",
"start",
"=",
"time",
".",
"time",
"(",
")",
"next_log",
"=",
"0",
"while",
"True",
":",
"response",
"=",
"self",
".",
"get",
"(",
"path",
")",
"[",
"'last_build_info'",
"]",
"if",
"not",
"response",
":",
"raise",
"ValueError",
"(",
"'This project is not building!'",
")",
"if",
"response",
"[",
"'stop_time'",
"]",
":",
"if",
"response",
"[",
"'success'",
"]",
":",
"return",
"response",
"else",
":",
"raise",
"LuminosoError",
"(",
"response",
")",
"elapsed",
"=",
"time",
".",
"time",
"(",
")",
"-",
"start",
"if",
"elapsed",
">",
"next_log",
":",
"logger",
".",
"info",
"(",
"'Still waiting (%d seconds elapsed).'",
",",
"next_log",
")",
"next_log",
"+=",
"120",
"time",
".",
"sleep",
"(",
"interval",
")"
]
| A convenience method designed to inform you when a project build has
completed. It polls the API every `interval` seconds until there is
not a build running. At that point, it returns the "last_build_info"
field of the project record if the build succeeded, and raises a
LuminosoError with the field as its message if the build failed.
If a `path` is not specified, this method will assume that its URL is
the URL for the project. Otherwise, it will use the specified path
(which should be "/projects/<project_id>/"). | [
"A",
"convenience",
"method",
"designed",
"to",
"inform",
"you",
"when",
"a",
"project",
"build",
"has",
"completed",
".",
"It",
"polls",
"the",
"API",
"every",
"interval",
"seconds",
"until",
"there",
"is",
"not",
"a",
"build",
"running",
".",
"At",
"that",
"point",
"it",
"returns",
"the",
"last_build_info",
"field",
"of",
"the",
"project",
"record",
"if",
"the",
"build",
"succeeded",
"and",
"raises",
"a",
"LuminosoError",
"with",
"the",
"field",
"as",
"its",
"message",
"if",
"the",
"build",
"failed",
"."
]
| python | test | 44.206897 |
bioidiap/gridtk | gridtk/script/jman.py | https://github.com/bioidiap/gridtk/blob/9e3291b8b50388682908927231b2730db1da147d/gridtk/script/jman.py#L226-L233 | def delete(args):
"""Deletes the jobs from the job manager. If the jobs are still running in the grid, they are stopped."""
jm = setup(args)
# first, stop the jobs if they are running in the grid
if not args.local and 'executing' in args.status:
stop(args)
# then, delete them from the database
jm.delete(job_ids=get_ids(args.job_ids), array_ids=get_ids(args.array_ids), delete_logs=not args.keep_logs, delete_log_dir=not args.keep_log_dir, status=args.status) | [
"def",
"delete",
"(",
"args",
")",
":",
"jm",
"=",
"setup",
"(",
"args",
")",
"# first, stop the jobs if they are running in the grid",
"if",
"not",
"args",
".",
"local",
"and",
"'executing'",
"in",
"args",
".",
"status",
":",
"stop",
"(",
"args",
")",
"# then, delete them from the database",
"jm",
".",
"delete",
"(",
"job_ids",
"=",
"get_ids",
"(",
"args",
".",
"job_ids",
")",
",",
"array_ids",
"=",
"get_ids",
"(",
"args",
".",
"array_ids",
")",
",",
"delete_logs",
"=",
"not",
"args",
".",
"keep_logs",
",",
"delete_log_dir",
"=",
"not",
"args",
".",
"keep_log_dir",
",",
"status",
"=",
"args",
".",
"status",
")"
]
| Deletes the jobs from the job manager. If the jobs are still running in the grid, they are stopped. | [
"Deletes",
"the",
"jobs",
"from",
"the",
"job",
"manager",
".",
"If",
"the",
"jobs",
"are",
"still",
"running",
"in",
"the",
"grid",
"they",
"are",
"stopped",
"."
]
| python | train | 58.625 |
DLR-RM/RAFCON | source/rafcon/gui/controllers/state_machines_editor.py | https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/state_machines_editor.py#L306-L327 | def update_state_machine_tab_label(self, state_machine_m):
""" Updates tab label if needed because system path, root state name or marked_dirty flag changed
:param StateMachineModel state_machine_m: State machine model that has changed
:return:
"""
sm_id = state_machine_m.state_machine.state_machine_id
if sm_id in self.tabs:
sm = state_machine_m.state_machine
# create new tab label if tab label properties are not up to date
if not self.tabs[sm_id]['marked_dirty'] == sm.marked_dirty or \
not self.tabs[sm_id]['file_system_path'] == sm.file_system_path or \
not self.tabs[sm_id]['root_state_name'] == sm.root_state.name:
label = self.view["notebook"].get_tab_label(self.tabs[sm_id]["page"]).get_child().get_children()[0]
set_tab_label_texts(label, state_machine_m, unsaved_changes=sm.marked_dirty)
self.tabs[sm_id]['file_system_path'] = sm.file_system_path
self.tabs[sm_id]['marked_dirty'] = sm.marked_dirty
self.tabs[sm_id]['root_state_name'] = sm.root_state.name
else:
logger.warning("State machine '{0}' tab label can not be updated there is no tab.".format(sm_id)) | [
"def",
"update_state_machine_tab_label",
"(",
"self",
",",
"state_machine_m",
")",
":",
"sm_id",
"=",
"state_machine_m",
".",
"state_machine",
".",
"state_machine_id",
"if",
"sm_id",
"in",
"self",
".",
"tabs",
":",
"sm",
"=",
"state_machine_m",
".",
"state_machine",
"# create new tab label if tab label properties are not up to date",
"if",
"not",
"self",
".",
"tabs",
"[",
"sm_id",
"]",
"[",
"'marked_dirty'",
"]",
"==",
"sm",
".",
"marked_dirty",
"or",
"not",
"self",
".",
"tabs",
"[",
"sm_id",
"]",
"[",
"'file_system_path'",
"]",
"==",
"sm",
".",
"file_system_path",
"or",
"not",
"self",
".",
"tabs",
"[",
"sm_id",
"]",
"[",
"'root_state_name'",
"]",
"==",
"sm",
".",
"root_state",
".",
"name",
":",
"label",
"=",
"self",
".",
"view",
"[",
"\"notebook\"",
"]",
".",
"get_tab_label",
"(",
"self",
".",
"tabs",
"[",
"sm_id",
"]",
"[",
"\"page\"",
"]",
")",
".",
"get_child",
"(",
")",
".",
"get_children",
"(",
")",
"[",
"0",
"]",
"set_tab_label_texts",
"(",
"label",
",",
"state_machine_m",
",",
"unsaved_changes",
"=",
"sm",
".",
"marked_dirty",
")",
"self",
".",
"tabs",
"[",
"sm_id",
"]",
"[",
"'file_system_path'",
"]",
"=",
"sm",
".",
"file_system_path",
"self",
".",
"tabs",
"[",
"sm_id",
"]",
"[",
"'marked_dirty'",
"]",
"=",
"sm",
".",
"marked_dirty",
"self",
".",
"tabs",
"[",
"sm_id",
"]",
"[",
"'root_state_name'",
"]",
"=",
"sm",
".",
"root_state",
".",
"name",
"else",
":",
"logger",
".",
"warning",
"(",
"\"State machine '{0}' tab label can not be updated there is no tab.\"",
".",
"format",
"(",
"sm_id",
")",
")"
]
| Updates tab label if needed because system path, root state name or marked_dirty flag changed
:param StateMachineModel state_machine_m: State machine model that has changed
:return: | [
"Updates",
"tab",
"label",
"if",
"needed",
"because",
"system",
"path",
"root",
"state",
"name",
"or",
"marked_dirty",
"flag",
"changed"
]
| python | train | 58.045455 |
EventTeam/beliefs | src/beliefs/beliefstate.py | https://github.com/EventTeam/beliefs/blob/c07d22b61bebeede74a72800030dde770bf64208/src/beliefs/beliefstate.py#L238-L255 | def get_paths_for_attribute_set(self, keys):
"""
Given a list/set of keys (or one key), returns the parts that have
all of the keys in the list.
Because on_targets=True, this DOES NOT WORK WITH TOP LEVEL PROPERTIES,
only those of targets.
These paths are not pointers to the objects themselves, but tuples of
attribute names that allow us to (attempt) to look up that object in any
belief state.
"""
if not isinstance(keys, (list, set)):
keys = [keys]
has_all_keys = lambda name, structure: \
all(map(lambda k: k in structure, keys))
return self.find_path(has_all_keys, on_targets=True) | [
"def",
"get_paths_for_attribute_set",
"(",
"self",
",",
"keys",
")",
":",
"if",
"not",
"isinstance",
"(",
"keys",
",",
"(",
"list",
",",
"set",
")",
")",
":",
"keys",
"=",
"[",
"keys",
"]",
"has_all_keys",
"=",
"lambda",
"name",
",",
"structure",
":",
"all",
"(",
"map",
"(",
"lambda",
"k",
":",
"k",
"in",
"structure",
",",
"keys",
")",
")",
"return",
"self",
".",
"find_path",
"(",
"has_all_keys",
",",
"on_targets",
"=",
"True",
")"
]
| Given a list/set of keys (or one key), returns the parts that have
all of the keys in the list.
Because on_targets=True, this DOES NOT WORK WITH TOP LEVEL PROPERTIES,
only those of targets.
These paths are not pointers to the objects themselves, but tuples of
attribute names that allow us to (attempt) to look up that object in any
belief state. | [
"Given",
"a",
"list",
"/",
"set",
"of",
"keys",
"(",
"or",
"one",
"key",
")",
"returns",
"the",
"parts",
"that",
"have",
"all",
"of",
"the",
"keys",
"in",
"the",
"list",
"."
]
| python | train | 38.666667 |
datajoint/datajoint-python | datajoint/connection.py | https://github.com/datajoint/datajoint-python/blob/4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c/datajoint/connection.py#L178-L183 | def in_transaction(self):
"""
:return: True if there is an open transaction.
"""
self._in_transaction = self._in_transaction and self.is_connected
return self._in_transaction | [
"def",
"in_transaction",
"(",
"self",
")",
":",
"self",
".",
"_in_transaction",
"=",
"self",
".",
"_in_transaction",
"and",
"self",
".",
"is_connected",
"return",
"self",
".",
"_in_transaction"
]
| :return: True if there is an open transaction. | [
":",
"return",
":",
"True",
"if",
"there",
"is",
"an",
"open",
"transaction",
"."
]
| python | train | 34.833333 |
ihgazni2/elist | elist/elist.py | https://github.com/ihgazni2/elist/blob/8c07b5029bda34ead60ce10335ceb145f209263c/elist/elist.py#L2008-L2021 | def indexes_all(ol,value):
'''
from elist.elist import *
ol = [1,'a',3,'a',4,'a',5]
indexes_all(ol,'a')
'''
length = ol.__len__()
indexes =[]
for i in range(0,length):
if(value == ol[i]):
indexes.append(i)
else:
pass
return(indexes) | [
"def",
"indexes_all",
"(",
"ol",
",",
"value",
")",
":",
"length",
"=",
"ol",
".",
"__len__",
"(",
")",
"indexes",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"length",
")",
":",
"if",
"(",
"value",
"==",
"ol",
"[",
"i",
"]",
")",
":",
"indexes",
".",
"append",
"(",
"i",
")",
"else",
":",
"pass",
"return",
"(",
"indexes",
")"
]
| from elist.elist import *
ol = [1,'a',3,'a',4,'a',5]
indexes_all(ol,'a') | [
"from",
"elist",
".",
"elist",
"import",
"*",
"ol",
"=",
"[",
"1",
"a",
"3",
"a",
"4",
"a",
"5",
"]",
"indexes_all",
"(",
"ol",
"a",
")"
]
| python | valid | 21.928571 |
ProjetPP/PPP-Core | ppp_core/__init__.py | https://github.com/ProjetPP/PPP-Core/blob/49ee5b16325aa7134e2e423cf75e7b2609df96a0/ppp_core/__init__.py#L6-L9 | def app(environ, start_response):
"""Function called by the WSGI server."""
r = HttpRequestHandler(environ, start_response, Router).dispatch()
return r | [
"def",
"app",
"(",
"environ",
",",
"start_response",
")",
":",
"r",
"=",
"HttpRequestHandler",
"(",
"environ",
",",
"start_response",
",",
"Router",
")",
".",
"dispatch",
"(",
")",
"return",
"r"
]
| Function called by the WSGI server. | [
"Function",
"called",
"by",
"the",
"WSGI",
"server",
"."
]
| python | train | 40 |
brosner/django-timezones | timezones/fields.py | https://github.com/brosner/django-timezones/blob/43b437c39533e1832562a2c69247b89ae1af169e/timezones/fields.py#L98-L107 | def get_db_prep_lookup(self, lookup_type, value, connection=None, prepared=None):
"""
Returns field's value prepared for database lookup.
"""
## convert to settings.TIME_ZONE
if value.tzinfo is None:
value = default_tz.localize(value)
else:
value = value.astimezone(default_tz)
return super(LocalizedDateTimeField, self).get_db_prep_lookup(lookup_type, value, connection=connection, prepared=prepared) | [
"def",
"get_db_prep_lookup",
"(",
"self",
",",
"lookup_type",
",",
"value",
",",
"connection",
"=",
"None",
",",
"prepared",
"=",
"None",
")",
":",
"## convert to settings.TIME_ZONE",
"if",
"value",
".",
"tzinfo",
"is",
"None",
":",
"value",
"=",
"default_tz",
".",
"localize",
"(",
"value",
")",
"else",
":",
"value",
"=",
"value",
".",
"astimezone",
"(",
"default_tz",
")",
"return",
"super",
"(",
"LocalizedDateTimeField",
",",
"self",
")",
".",
"get_db_prep_lookup",
"(",
"lookup_type",
",",
"value",
",",
"connection",
"=",
"connection",
",",
"prepared",
"=",
"prepared",
")"
]
| Returns field's value prepared for database lookup. | [
"Returns",
"field",
"s",
"value",
"prepared",
"for",
"database",
"lookup",
"."
]
| python | train | 47.2 |
PMEAL/OpenPNM | openpnm/models/phases/surface_tension.py | https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/models/phases/surface_tension.py#L94-L128 | def guggenheim_katayama(target, K2, n, temperature='pore.temperature',
critical_temperature='pore.critical_temperature',
critical_pressure='pore.critical_pressure'):
r"""
Missing description
Parameters
----------
target : OpenPNM Object
The object for which these values are being calculated. This
controls the length of the calculated array, and also provides
access to other necessary thermofluid properties.
K2 : scalar
Fluid specific constant
n : scalar
Fluid specific constant
temperature : string
The dictionary key containing the temperature values (K)
critical_temperature : string
The dictionary key containing the critical temperature values (K)
critical_pressure : string
The dictionary key containing the critical pressure values (K)
"""
T = target[temperature]
Pc = target[critical_pressure]
Tc = target[critical_temperature]
sigma_o = K2*Tc**(1/3)*Pc**(2/3)
value = sigma_o*(1-T/Tc)**n
return value | [
"def",
"guggenheim_katayama",
"(",
"target",
",",
"K2",
",",
"n",
",",
"temperature",
"=",
"'pore.temperature'",
",",
"critical_temperature",
"=",
"'pore.critical_temperature'",
",",
"critical_pressure",
"=",
"'pore.critical_pressure'",
")",
":",
"T",
"=",
"target",
"[",
"temperature",
"]",
"Pc",
"=",
"target",
"[",
"critical_pressure",
"]",
"Tc",
"=",
"target",
"[",
"critical_temperature",
"]",
"sigma_o",
"=",
"K2",
"*",
"Tc",
"**",
"(",
"1",
"/",
"3",
")",
"*",
"Pc",
"**",
"(",
"2",
"/",
"3",
")",
"value",
"=",
"sigma_o",
"*",
"(",
"1",
"-",
"T",
"/",
"Tc",
")",
"**",
"n",
"return",
"value"
]
| r"""
Missing description
Parameters
----------
target : OpenPNM Object
The object for which these values are being calculated. This
controls the length of the calculated array, and also provides
access to other necessary thermofluid properties.
K2 : scalar
Fluid specific constant
n : scalar
Fluid specific constant
temperature : string
The dictionary key containing the temperature values (K)
critical_temperature : string
The dictionary key containing the critical temperature values (K)
critical_pressure : string
The dictionary key containing the critical pressure values (K) | [
"r",
"Missing",
"description"
]
| python | train | 30.457143 |
pyinvoke/invocations | invocations/docs.py | https://github.com/pyinvoke/invocations/blob/bbf1b319bd1536817d5301ceb9eeb2f31830e5dc/invocations/docs.py#L173-L215 | def watch_docs(c):
"""
Watch both doc trees & rebuild them if files change.
This includes e.g. rebuilding the API docs if the source code changes;
rebuilding the WWW docs if the README changes; etc.
Reuses the configuration values ``packaging.package`` or ``tests.package``
(the former winning over the latter if both defined) when determining which
source directory to scan for API doc updates.
"""
# TODO: break back down into generic single-site version, then create split
# tasks as with docs/www above. Probably wants invoke#63.
# NOTE: 'www'/'docs' refer to the module level sub-collections. meh.
# Readme & WWW triggers WWW
www_c = Context(config=c.config.clone())
www_c.update(**www.configuration())
www_handler = make_handler(
ctx=www_c,
task_=www["build"],
regexes=[r"\./README.rst", r"\./sites/www"],
ignore_regexes=[r".*/\..*\.swp", r"\./sites/www/_build"],
)
# Code and docs trigger API
docs_c = Context(config=c.config.clone())
docs_c.update(**docs.configuration())
regexes = [r"\./sites/docs"]
package = c.get("packaging", {}).get("package", None)
if package is None:
package = c.get("tests", {}).get("package", None)
if package:
regexes.append(r"\./{}/".format(package))
api_handler = make_handler(
ctx=docs_c,
task_=docs["build"],
regexes=regexes,
ignore_regexes=[r".*/\..*\.swp", r"\./sites/docs/_build"],
)
observe(www_handler, api_handler) | [
"def",
"watch_docs",
"(",
"c",
")",
":",
"# TODO: break back down into generic single-site version, then create split",
"# tasks as with docs/www above. Probably wants invoke#63.",
"# NOTE: 'www'/'docs' refer to the module level sub-collections. meh.",
"# Readme & WWW triggers WWW",
"www_c",
"=",
"Context",
"(",
"config",
"=",
"c",
".",
"config",
".",
"clone",
"(",
")",
")",
"www_c",
".",
"update",
"(",
"*",
"*",
"www",
".",
"configuration",
"(",
")",
")",
"www_handler",
"=",
"make_handler",
"(",
"ctx",
"=",
"www_c",
",",
"task_",
"=",
"www",
"[",
"\"build\"",
"]",
",",
"regexes",
"=",
"[",
"r\"\\./README.rst\"",
",",
"r\"\\./sites/www\"",
"]",
",",
"ignore_regexes",
"=",
"[",
"r\".*/\\..*\\.swp\"",
",",
"r\"\\./sites/www/_build\"",
"]",
",",
")",
"# Code and docs trigger API",
"docs_c",
"=",
"Context",
"(",
"config",
"=",
"c",
".",
"config",
".",
"clone",
"(",
")",
")",
"docs_c",
".",
"update",
"(",
"*",
"*",
"docs",
".",
"configuration",
"(",
")",
")",
"regexes",
"=",
"[",
"r\"\\./sites/docs\"",
"]",
"package",
"=",
"c",
".",
"get",
"(",
"\"packaging\"",
",",
"{",
"}",
")",
".",
"get",
"(",
"\"package\"",
",",
"None",
")",
"if",
"package",
"is",
"None",
":",
"package",
"=",
"c",
".",
"get",
"(",
"\"tests\"",
",",
"{",
"}",
")",
".",
"get",
"(",
"\"package\"",
",",
"None",
")",
"if",
"package",
":",
"regexes",
".",
"append",
"(",
"r\"\\./{}/\"",
".",
"format",
"(",
"package",
")",
")",
"api_handler",
"=",
"make_handler",
"(",
"ctx",
"=",
"docs_c",
",",
"task_",
"=",
"docs",
"[",
"\"build\"",
"]",
",",
"regexes",
"=",
"regexes",
",",
"ignore_regexes",
"=",
"[",
"r\".*/\\..*\\.swp\"",
",",
"r\"\\./sites/docs/_build\"",
"]",
",",
")",
"observe",
"(",
"www_handler",
",",
"api_handler",
")"
]
| Watch both doc trees & rebuild them if files change.
This includes e.g. rebuilding the API docs if the source code changes;
rebuilding the WWW docs if the README changes; etc.
Reuses the configuration values ``packaging.package`` or ``tests.package``
(the former winning over the latter if both defined) when determining which
source directory to scan for API doc updates. | [
"Watch",
"both",
"doc",
"trees",
"&",
"rebuild",
"them",
"if",
"files",
"change",
"."
]
| python | train | 35.046512 |
django-fluent/django-fluent-blogs | fluent_blogs/appsettings.py | https://github.com/django-fluent/django-fluent-blogs/blob/86b148549a010eaca9a2ea987fe43be250e06c50/fluent_blogs/appsettings.py#L62-L73 | def get_language_settings(language_code, site_id=None):
"""
Return the language settings for the current site
"""
if site_id is None:
site_id = getattr(settings, 'SITE_ID', None)
for lang_dict in FLUENT_BLOGS_LANGUAGES.get(site_id, ()):
if lang_dict['code'] == language_code:
return lang_dict
return FLUENT_BLOGS_LANGUAGES['default'] | [
"def",
"get_language_settings",
"(",
"language_code",
",",
"site_id",
"=",
"None",
")",
":",
"if",
"site_id",
"is",
"None",
":",
"site_id",
"=",
"getattr",
"(",
"settings",
",",
"'SITE_ID'",
",",
"None",
")",
"for",
"lang_dict",
"in",
"FLUENT_BLOGS_LANGUAGES",
".",
"get",
"(",
"site_id",
",",
"(",
")",
")",
":",
"if",
"lang_dict",
"[",
"'code'",
"]",
"==",
"language_code",
":",
"return",
"lang_dict",
"return",
"FLUENT_BLOGS_LANGUAGES",
"[",
"'default'",
"]"
]
| Return the language settings for the current site | [
"Return",
"the",
"language",
"settings",
"for",
"the",
"current",
"site"
]
| python | train | 31.333333 |
log2timeline/plaso | plaso/formatters/fseventsd.py | https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/formatters/fseventsd.py#L54-L68 | def _GetFlagValues(self, flags):
"""Determines which events are indicated by a set of fsevents flags.
Args:
flags (int): fsevents record flags.
Returns:
str: a comma separated string containing descriptions of the flag values
stored in an fsevents record.
"""
event_types = []
for event_flag, description in self._FLAG_VALUES.items():
if event_flag & flags:
event_types.append(description)
return ', '.join(event_types) | [
"def",
"_GetFlagValues",
"(",
"self",
",",
"flags",
")",
":",
"event_types",
"=",
"[",
"]",
"for",
"event_flag",
",",
"description",
"in",
"self",
".",
"_FLAG_VALUES",
".",
"items",
"(",
")",
":",
"if",
"event_flag",
"&",
"flags",
":",
"event_types",
".",
"append",
"(",
"description",
")",
"return",
"', '",
".",
"join",
"(",
"event_types",
")"
]
| Determines which events are indicated by a set of fsevents flags.
Args:
flags (int): fsevents record flags.
Returns:
str: a comma separated string containing descriptions of the flag values
stored in an fsevents record. | [
"Determines",
"which",
"events",
"are",
"indicated",
"by",
"a",
"set",
"of",
"fsevents",
"flags",
"."
]
| python | train | 31.4 |
robotools/fontParts | Lib/fontParts/base/font.py | https://github.com/robotools/fontParts/blob/d2ff106fe95f9d566161d936a645157626568712/Lib/fontParts/base/font.py#L863-L884 | def insertLayer(self, layer, name=None):
"""
Insert **layer** into the font. ::
>>> layer = font.insertLayer(otherLayer, name="layer 2")
This will not insert the layer directly.
Rather, a new layer will be created and the data from
**layer** will be copied to to the new layer. **name**
indicates the name that should be assigned to the layer
after insertion. If **name** is not given, the layer's
original name must be used. If the layer does not have
a name, an error must be raised. The data that will be
inserted from **layer** is the same data as documented
in :meth:`BaseLayer.copy`.
"""
if name is None:
name = layer.name
name = normalizers.normalizeLayerName(name)
if name in self:
self.removeLayer(name)
return self._insertLayer(layer, name=name) | [
"def",
"insertLayer",
"(",
"self",
",",
"layer",
",",
"name",
"=",
"None",
")",
":",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"layer",
".",
"name",
"name",
"=",
"normalizers",
".",
"normalizeLayerName",
"(",
"name",
")",
"if",
"name",
"in",
"self",
":",
"self",
".",
"removeLayer",
"(",
"name",
")",
"return",
"self",
".",
"_insertLayer",
"(",
"layer",
",",
"name",
"=",
"name",
")"
]
| Insert **layer** into the font. ::
>>> layer = font.insertLayer(otherLayer, name="layer 2")
This will not insert the layer directly.
Rather, a new layer will be created and the data from
**layer** will be copied to to the new layer. **name**
indicates the name that should be assigned to the layer
after insertion. If **name** is not given, the layer's
original name must be used. If the layer does not have
a name, an error must be raised. The data that will be
inserted from **layer** is the same data as documented
in :meth:`BaseLayer.copy`. | [
"Insert",
"**",
"layer",
"**",
"into",
"the",
"font",
".",
"::"
]
| python | train | 40.909091 |
python-bugzilla/python-bugzilla | bugzilla/base.py | https://github.com/python-bugzilla/python-bugzilla/blob/7de8b225104f24a1eee3e837bf1e02d60aefe69f/bugzilla/base.py#L942-L951 | def editcomponent(self, data):
"""
A method to edit a component in Bugzilla. Takes a dict, with
mandatory elements of product. component, and initialowner.
All other elements are optional and use the same names as the
addcomponent() method.
"""
data = data.copy()
self._component_data_convert(data, update=True)
return self._proxy.Component.update(data) | [
"def",
"editcomponent",
"(",
"self",
",",
"data",
")",
":",
"data",
"=",
"data",
".",
"copy",
"(",
")",
"self",
".",
"_component_data_convert",
"(",
"data",
",",
"update",
"=",
"True",
")",
"return",
"self",
".",
"_proxy",
".",
"Component",
".",
"update",
"(",
"data",
")"
]
| A method to edit a component in Bugzilla. Takes a dict, with
mandatory elements of product. component, and initialowner.
All other elements are optional and use the same names as the
addcomponent() method. | [
"A",
"method",
"to",
"edit",
"a",
"component",
"in",
"Bugzilla",
".",
"Takes",
"a",
"dict",
"with",
"mandatory",
"elements",
"of",
"product",
".",
"component",
"and",
"initialowner",
".",
"All",
"other",
"elements",
"are",
"optional",
"and",
"use",
"the",
"same",
"names",
"as",
"the",
"addcomponent",
"()",
"method",
"."
]
| python | train | 41.6 |
erijo/tellcore-py | tellcore/telldus.py | https://github.com/erijo/tellcore-py/blob/7a1eb53e12ef039a2350933e502633df7560f6a8/tellcore/telldus.py#L244-L251 | def add_group(self, name, devices):
"""Add a new device group.
:return: a :class:`DeviceGroup` instance.
"""
device = self.add_device(name, "group")
device.add_to_group(devices)
return device | [
"def",
"add_group",
"(",
"self",
",",
"name",
",",
"devices",
")",
":",
"device",
"=",
"self",
".",
"add_device",
"(",
"name",
",",
"\"group\"",
")",
"device",
".",
"add_to_group",
"(",
"devices",
")",
"return",
"device"
]
| Add a new device group.
:return: a :class:`DeviceGroup` instance. | [
"Add",
"a",
"new",
"device",
"group",
"."
]
| python | train | 29.125 |
hubo1016/namedstruct | namedstruct/namedstruct.py | https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L579-L595 | def packvalue(value, *properties):
'''
Store a specified value to specified property path. Often used in nstruct "init" parameter.
:param value: a fixed value
:param properties: specified field name, same as sizefromlen.
:returns: a function which takes a NamedStruct as parameter, and store the value to property path.
'''
def func(namedstruct):
v = namedstruct._target
for p in properties[:-1]:
v = getattr(v, p)
setattr(v, properties[-1], value)
return func | [
"def",
"packvalue",
"(",
"value",
",",
"*",
"properties",
")",
":",
"def",
"func",
"(",
"namedstruct",
")",
":",
"v",
"=",
"namedstruct",
".",
"_target",
"for",
"p",
"in",
"properties",
"[",
":",
"-",
"1",
"]",
":",
"v",
"=",
"getattr",
"(",
"v",
",",
"p",
")",
"setattr",
"(",
"v",
",",
"properties",
"[",
"-",
"1",
"]",
",",
"value",
")",
"return",
"func"
]
| Store a specified value to specified property path. Often used in nstruct "init" parameter.
:param value: a fixed value
:param properties: specified field name, same as sizefromlen.
:returns: a function which takes a NamedStruct as parameter, and store the value to property path. | [
"Store",
"a",
"specified",
"value",
"to",
"specified",
"property",
"path",
".",
"Often",
"used",
"in",
"nstruct",
"init",
"parameter",
".",
":",
"param",
"value",
":",
"a",
"fixed",
"value",
":",
"param",
"properties",
":",
"specified",
"field",
"name",
"same",
"as",
"sizefromlen",
".",
":",
"returns",
":",
"a",
"function",
"which",
"takes",
"a",
"NamedStruct",
"as",
"parameter",
"and",
"store",
"the",
"value",
"to",
"property",
"path",
"."
]
| python | train | 31.294118 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.