repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
mongodb/mongo-python-driver | bson/codec_options.py | https://github.com/mongodb/mongo-python-driver/blob/c29c21449e3aae74154207058cf85fd94018d4cd/bson/codec_options.py#L294-L314 | def with_options(self, **kwargs):
"""Make a copy of this CodecOptions, overriding some options::
>>> from bson.codec_options import DEFAULT_CODEC_OPTIONS
>>> DEFAULT_CODEC_OPTIONS.tz_aware
False
>>> options = DEFAULT_CODEC_OPTIONS.with_options(tz_aware=True)
>>> options.tz_aware
True
.. versionadded:: 3.5
"""
return CodecOptions(
kwargs.get('document_class', self.document_class),
kwargs.get('tz_aware', self.tz_aware),
kwargs.get('uuid_representation', self.uuid_representation),
kwargs.get('unicode_decode_error_handler',
self.unicode_decode_error_handler),
kwargs.get('tzinfo', self.tzinfo),
kwargs.get('type_registry', self.type_registry)
) | [
"def",
"with_options",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"CodecOptions",
"(",
"kwargs",
".",
"get",
"(",
"'document_class'",
",",
"self",
".",
"document_class",
")",
",",
"kwargs",
".",
"get",
"(",
"'tz_aware'",
",",
"self",
".",
"tz_aware",
")",
",",
"kwargs",
".",
"get",
"(",
"'uuid_representation'",
",",
"self",
".",
"uuid_representation",
")",
",",
"kwargs",
".",
"get",
"(",
"'unicode_decode_error_handler'",
",",
"self",
".",
"unicode_decode_error_handler",
")",
",",
"kwargs",
".",
"get",
"(",
"'tzinfo'",
",",
"self",
".",
"tzinfo",
")",
",",
"kwargs",
".",
"get",
"(",
"'type_registry'",
",",
"self",
".",
"type_registry",
")",
")"
] | Make a copy of this CodecOptions, overriding some options::
>>> from bson.codec_options import DEFAULT_CODEC_OPTIONS
>>> DEFAULT_CODEC_OPTIONS.tz_aware
False
>>> options = DEFAULT_CODEC_OPTIONS.with_options(tz_aware=True)
>>> options.tz_aware
True
.. versionadded:: 3.5 | [
"Make",
"a",
"copy",
"of",
"this",
"CodecOptions",
"overriding",
"some",
"options",
"::"
] | python | train |
juju/charm-helpers | charmhelpers/contrib/database/mysql.py | https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/database/mysql.py#L160-L166 | def execute(self, sql):
"""Execute arbitary SQL against the database."""
cursor = self.connection.cursor()
try:
cursor.execute(sql)
finally:
cursor.close() | [
"def",
"execute",
"(",
"self",
",",
"sql",
")",
":",
"cursor",
"=",
"self",
".",
"connection",
".",
"cursor",
"(",
")",
"try",
":",
"cursor",
".",
"execute",
"(",
"sql",
")",
"finally",
":",
"cursor",
".",
"close",
"(",
")"
] | Execute arbitary SQL against the database. | [
"Execute",
"arbitary",
"SQL",
"against",
"the",
"database",
"."
] | python | train |
nutechsoftware/alarmdecoder | examples/rf_device.py | https://github.com/nutechsoftware/alarmdecoder/blob/b0c014089e24455228cb4402cf30ba98157578cd/examples/rf_device.py#L10-L33 | def main():
"""
Example application that watches for an event from a specific RF device.
This feature allows you to watch for events from RF devices if you have
an RF receiver. This is useful in the case of internal sensors, which
don't emit a FAULT if the sensor is tripped and the panel is armed STAY.
It also will monitor sensors that aren't configured.
NOTE: You must have an RF receiver installed and enabled in your panel
for RFX messages to be seen.
"""
try:
# Retrieve the first USB device
device = AlarmDecoder(SerialDevice(interface=SERIAL_DEVICE))
# Set up an event handler and open the device
device.on_rfx_message += handle_rfx
with device.open(baudrate=BAUDRATE):
while True:
time.sleep(1)
except Exception as ex:
print('Exception:', ex) | [
"def",
"main",
"(",
")",
":",
"try",
":",
"# Retrieve the first USB device",
"device",
"=",
"AlarmDecoder",
"(",
"SerialDevice",
"(",
"interface",
"=",
"SERIAL_DEVICE",
")",
")",
"# Set up an event handler and open the device",
"device",
".",
"on_rfx_message",
"+=",
"handle_rfx",
"with",
"device",
".",
"open",
"(",
"baudrate",
"=",
"BAUDRATE",
")",
":",
"while",
"True",
":",
"time",
".",
"sleep",
"(",
"1",
")",
"except",
"Exception",
"as",
"ex",
":",
"print",
"(",
"'Exception:'",
",",
"ex",
")"
] | Example application that watches for an event from a specific RF device.
This feature allows you to watch for events from RF devices if you have
an RF receiver. This is useful in the case of internal sensors, which
don't emit a FAULT if the sensor is tripped and the panel is armed STAY.
It also will monitor sensors that aren't configured.
NOTE: You must have an RF receiver installed and enabled in your panel
for RFX messages to be seen. | [
"Example",
"application",
"that",
"watches",
"for",
"an",
"event",
"from",
"a",
"specific",
"RF",
"device",
"."
] | python | train |
BlueBrain/NeuroM | examples/synthesis_json.py | https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/examples/synthesis_json.py#L92-L100 | def transform_header(mtype_name):
'''Add header to json output to wrap around distribution data.
'''
head_dict = OrderedDict()
head_dict["m-type"] = mtype_name
head_dict["components"] = defaultdict(OrderedDict)
return head_dict | [
"def",
"transform_header",
"(",
"mtype_name",
")",
":",
"head_dict",
"=",
"OrderedDict",
"(",
")",
"head_dict",
"[",
"\"m-type\"",
"]",
"=",
"mtype_name",
"head_dict",
"[",
"\"components\"",
"]",
"=",
"defaultdict",
"(",
"OrderedDict",
")",
"return",
"head_dict"
] | Add header to json output to wrap around distribution data. | [
"Add",
"header",
"to",
"json",
"output",
"to",
"wrap",
"around",
"distribution",
"data",
"."
] | python | train |
mixcloud/django-experiments | experiments/utils.py | https://github.com/mixcloud/django-experiments/blob/1f45e9f8a108b51e44918daa647269b2b8d43f1d/experiments/utils.py#L192-L200 | def is_enrolled(self, experiment_name, alternative):
"""Enroll this user in the experiment if they are not already part of it. Returns the selected alternative"""
"""Test if the user is enrolled in the supplied alternative for the given experiment.
The supplied alternative will be added to the list of possible alternatives for the
experiment if it is not already there. If the user is not yet enrolled in the supplied
experiment they will be enrolled, and an alternative chosen at random."""
chosen_alternative = self.enroll(experiment_name, [alternative])
return alternative == chosen_alternative | [
"def",
"is_enrolled",
"(",
"self",
",",
"experiment_name",
",",
"alternative",
")",
":",
"\"\"\"Test if the user is enrolled in the supplied alternative for the given experiment.\n\n The supplied alternative will be added to the list of possible alternatives for the\n experiment if it is not already there. If the user is not yet enrolled in the supplied\n experiment they will be enrolled, and an alternative chosen at random.\"\"\"",
"chosen_alternative",
"=",
"self",
".",
"enroll",
"(",
"experiment_name",
",",
"[",
"alternative",
"]",
")",
"return",
"alternative",
"==",
"chosen_alternative"
] | Enroll this user in the experiment if they are not already part of it. Returns the selected alternative | [
"Enroll",
"this",
"user",
"in",
"the",
"experiment",
"if",
"they",
"are",
"not",
"already",
"part",
"of",
"it",
".",
"Returns",
"the",
"selected",
"alternative"
] | python | train |
dcwatson/django-pgcrypto | pgcrypto/base.py | https://github.com/dcwatson/django-pgcrypto/blob/02108795ec97f80af92ff6800a1c55eb958c3496/pgcrypto/base.py#L33-L46 | def armor(data, versioned=True):
"""
Returns a string in ASCII Armor format, for the given binary data. The
output of this is compatiple with pgcrypto's armor/dearmor functions.
"""
template = '-----BEGIN PGP MESSAGE-----\n%(headers)s%(body)s\n=%(crc)s\n-----END PGP MESSAGE-----'
body = base64.b64encode(data)
# The 24-bit CRC should be in big-endian, strip off the first byte (it's already masked in crc24).
crc = base64.b64encode(struct.pack('>L', crc24(data))[1:])
return template % {
'headers': 'Version: django-pgcrypto %s\n\n' % __version__ if versioned else '\n',
'body': body.decode('ascii'),
'crc': crc.decode('ascii'),
} | [
"def",
"armor",
"(",
"data",
",",
"versioned",
"=",
"True",
")",
":",
"template",
"=",
"'-----BEGIN PGP MESSAGE-----\\n%(headers)s%(body)s\\n=%(crc)s\\n-----END PGP MESSAGE-----'",
"body",
"=",
"base64",
".",
"b64encode",
"(",
"data",
")",
"# The 24-bit CRC should be in big-endian, strip off the first byte (it's already masked in crc24).",
"crc",
"=",
"base64",
".",
"b64encode",
"(",
"struct",
".",
"pack",
"(",
"'>L'",
",",
"crc24",
"(",
"data",
")",
")",
"[",
"1",
":",
"]",
")",
"return",
"template",
"%",
"{",
"'headers'",
":",
"'Version: django-pgcrypto %s\\n\\n'",
"%",
"__version__",
"if",
"versioned",
"else",
"'\\n'",
",",
"'body'",
":",
"body",
".",
"decode",
"(",
"'ascii'",
")",
",",
"'crc'",
":",
"crc",
".",
"decode",
"(",
"'ascii'",
")",
",",
"}"
] | Returns a string in ASCII Armor format, for the given binary data. The
output of this is compatiple with pgcrypto's armor/dearmor functions. | [
"Returns",
"a",
"string",
"in",
"ASCII",
"Armor",
"format",
"for",
"the",
"given",
"binary",
"data",
".",
"The",
"output",
"of",
"this",
"is",
"compatiple",
"with",
"pgcrypto",
"s",
"armor",
"/",
"dearmor",
"functions",
"."
] | python | train |
evhub/coconut | coconut/command/util.py | https://github.com/evhub/coconut/blob/ff97177344e7604e89a0a98a977a87ed2a56fc6d/coconut/command/util.py#L263-L295 | def run_cmd(cmd, show_output=True, raise_errs=True, **kwargs):
"""Run a console command.
When show_output=True, prints output and returns exit code, otherwise returns output.
When raise_errs=True, raises a subprocess.CalledProcessError if the command fails.
"""
internal_assert(cmd and isinstance(cmd, list), "console commands must be passed as non-empty lists")
try:
from shutil import which
except ImportError:
pass
else:
cmd[0] = which(cmd[0]) or cmd[0]
logger.log_cmd(cmd)
try:
if show_output and raise_errs:
return subprocess.check_call(cmd, **kwargs)
elif show_output:
return subprocess.call(cmd, **kwargs)
else:
stdout, stderr, retcode = call_output(cmd, **kwargs)
output = "".join(stdout + stderr)
if retcode and raise_errs:
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
except OSError:
logger.log_exc()
if raise_errs:
raise subprocess.CalledProcessError(oserror_retcode, cmd)
elif show_output:
return oserror_retcode
else:
return "" | [
"def",
"run_cmd",
"(",
"cmd",
",",
"show_output",
"=",
"True",
",",
"raise_errs",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"internal_assert",
"(",
"cmd",
"and",
"isinstance",
"(",
"cmd",
",",
"list",
")",
",",
"\"console commands must be passed as non-empty lists\"",
")",
"try",
":",
"from",
"shutil",
"import",
"which",
"except",
"ImportError",
":",
"pass",
"else",
":",
"cmd",
"[",
"0",
"]",
"=",
"which",
"(",
"cmd",
"[",
"0",
"]",
")",
"or",
"cmd",
"[",
"0",
"]",
"logger",
".",
"log_cmd",
"(",
"cmd",
")",
"try",
":",
"if",
"show_output",
"and",
"raise_errs",
":",
"return",
"subprocess",
".",
"check_call",
"(",
"cmd",
",",
"*",
"*",
"kwargs",
")",
"elif",
"show_output",
":",
"return",
"subprocess",
".",
"call",
"(",
"cmd",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"stdout",
",",
"stderr",
",",
"retcode",
"=",
"call_output",
"(",
"cmd",
",",
"*",
"*",
"kwargs",
")",
"output",
"=",
"\"\"",
".",
"join",
"(",
"stdout",
"+",
"stderr",
")",
"if",
"retcode",
"and",
"raise_errs",
":",
"raise",
"subprocess",
".",
"CalledProcessError",
"(",
"retcode",
",",
"cmd",
",",
"output",
"=",
"output",
")",
"return",
"output",
"except",
"OSError",
":",
"logger",
".",
"log_exc",
"(",
")",
"if",
"raise_errs",
":",
"raise",
"subprocess",
".",
"CalledProcessError",
"(",
"oserror_retcode",
",",
"cmd",
")",
"elif",
"show_output",
":",
"return",
"oserror_retcode",
"else",
":",
"return",
"\"\""
] | Run a console command.
When show_output=True, prints output and returns exit code, otherwise returns output.
When raise_errs=True, raises a subprocess.CalledProcessError if the command fails. | [
"Run",
"a",
"console",
"command",
"."
] | python | train |
rikrd/inspire | inspirespeech/__init__.py | https://github.com/rikrd/inspire/blob/e281c0266a9a9633f34ab70f9c3ad58036c19b59/inspirespeech/__init__.py#L481-L494 | def load(fileobj):
"""Load the submission from a file-like object
:param fileobj: File-like object
:return: the loaded submission
"""
with gzip.GzipFile(fileobj=fileobj, mode='r') as z:
submission = Submission(metadata=json.loads(z.readline()))
for line in z:
token_id, token = json.loads(line)
submission['tokens'][token_id] = token
return submission | [
"def",
"load",
"(",
"fileobj",
")",
":",
"with",
"gzip",
".",
"GzipFile",
"(",
"fileobj",
"=",
"fileobj",
",",
"mode",
"=",
"'r'",
")",
"as",
"z",
":",
"submission",
"=",
"Submission",
"(",
"metadata",
"=",
"json",
".",
"loads",
"(",
"z",
".",
"readline",
"(",
")",
")",
")",
"for",
"line",
"in",
"z",
":",
"token_id",
",",
"token",
"=",
"json",
".",
"loads",
"(",
"line",
")",
"submission",
"[",
"'tokens'",
"]",
"[",
"token_id",
"]",
"=",
"token",
"return",
"submission"
] | Load the submission from a file-like object
:param fileobj: File-like object
:return: the loaded submission | [
"Load",
"the",
"submission",
"from",
"a",
"file",
"-",
"like",
"object"
] | python | train |
toumorokoshi/sprinter | sprinter/environment.py | https://github.com/toumorokoshi/sprinter/blob/846697a7a087e69c61d075232e754d6975a64152/sprinter/environment.py#L573-L578 | def _copy_source_to_target(self):
""" copy source user configuration to target """
if self.source and self.target:
for k, v in self.source.items('config'):
# always have source override target.
self.target.set_input(k, v) | [
"def",
"_copy_source_to_target",
"(",
"self",
")",
":",
"if",
"self",
".",
"source",
"and",
"self",
".",
"target",
":",
"for",
"k",
",",
"v",
"in",
"self",
".",
"source",
".",
"items",
"(",
"'config'",
")",
":",
"# always have source override target.",
"self",
".",
"target",
".",
"set_input",
"(",
"k",
",",
"v",
")"
] | copy source user configuration to target | [
"copy",
"source",
"user",
"configuration",
"to",
"target"
] | python | train |
SheffieldML/GPy | GPy/models/warped_gp.py | https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/models/warped_gp.py#L118-L132 | def predict_quantiles(self, X, quantiles=(2.5, 97.5), Y_metadata=None, likelihood=None, kern=None):
"""
Get the predictive quantiles around the prediction at X
:param X: The points at which to make a prediction
:type X: np.ndarray (Xnew x self.input_dim)
:param quantiles: tuple of quantiles, default is (2.5, 97.5) which is the 95% interval
:type quantiles: tuple
:returns: list of quantiles for each X and predictive quantiles for interval combination
:rtype: [np.ndarray (Xnew x self.input_dim), np.ndarray (Xnew x self.input_dim)]
"""
qs = super(WarpedGP, self).predict_quantiles(X, quantiles, Y_metadata=Y_metadata, likelihood=likelihood, kern=kern)
if self.predict_in_warped_space:
return [self.warping_function.f_inv(q) for q in qs]
return qs | [
"def",
"predict_quantiles",
"(",
"self",
",",
"X",
",",
"quantiles",
"=",
"(",
"2.5",
",",
"97.5",
")",
",",
"Y_metadata",
"=",
"None",
",",
"likelihood",
"=",
"None",
",",
"kern",
"=",
"None",
")",
":",
"qs",
"=",
"super",
"(",
"WarpedGP",
",",
"self",
")",
".",
"predict_quantiles",
"(",
"X",
",",
"quantiles",
",",
"Y_metadata",
"=",
"Y_metadata",
",",
"likelihood",
"=",
"likelihood",
",",
"kern",
"=",
"kern",
")",
"if",
"self",
".",
"predict_in_warped_space",
":",
"return",
"[",
"self",
".",
"warping_function",
".",
"f_inv",
"(",
"q",
")",
"for",
"q",
"in",
"qs",
"]",
"return",
"qs"
] | Get the predictive quantiles around the prediction at X
:param X: The points at which to make a prediction
:type X: np.ndarray (Xnew x self.input_dim)
:param quantiles: tuple of quantiles, default is (2.5, 97.5) which is the 95% interval
:type quantiles: tuple
:returns: list of quantiles for each X and predictive quantiles for interval combination
:rtype: [np.ndarray (Xnew x self.input_dim), np.ndarray (Xnew x self.input_dim)] | [
"Get",
"the",
"predictive",
"quantiles",
"around",
"the",
"prediction",
"at",
"X"
] | python | train |
atlassian-api/atlassian-python-api | atlassian/bitbucket.py | https://github.com/atlassian-api/atlassian-python-api/blob/540d269905c3e7547b666fe30c647b2d512cf358/atlassian/bitbucket.py#L20-L27 | def project(self, key):
"""
Provide project info
:param key:
:return:
"""
url = 'rest/api/1.0/projects/{0}'.format(key)
return (self.get(url) or {}).get('values') | [
"def",
"project",
"(",
"self",
",",
"key",
")",
":",
"url",
"=",
"'rest/api/1.0/projects/{0}'",
".",
"format",
"(",
"key",
")",
"return",
"(",
"self",
".",
"get",
"(",
"url",
")",
"or",
"{",
"}",
")",
".",
"get",
"(",
"'values'",
")"
] | Provide project info
:param key:
:return: | [
"Provide",
"project",
"info",
":",
"param",
"key",
":",
":",
"return",
":"
] | python | train |
cisco-sas/kitty | kitty/fuzzers/base.py | https://github.com/cisco-sas/kitty/blob/cb0760989dcdfe079e43ac574d872d0b18953a32/kitty/fuzzers/base.py#L246-L257 | def set_model(self, model):
'''
Set the model to fuzz
:type model: :class:`~kitty.model.high_level.base.BaseModel` or a subclass
:param model: Model object to fuzz
'''
self.model = model
if self.model:
self.model.set_notification_handler(self)
self.handle_stage_changed(model)
return self | [
"def",
"set_model",
"(",
"self",
",",
"model",
")",
":",
"self",
".",
"model",
"=",
"model",
"if",
"self",
".",
"model",
":",
"self",
".",
"model",
".",
"set_notification_handler",
"(",
"self",
")",
"self",
".",
"handle_stage_changed",
"(",
"model",
")",
"return",
"self"
] | Set the model to fuzz
:type model: :class:`~kitty.model.high_level.base.BaseModel` or a subclass
:param model: Model object to fuzz | [
"Set",
"the",
"model",
"to",
"fuzz"
] | python | train |
glomex/gcdt | gcdt/kumo_core.py | https://github.com/glomex/gcdt/blob/cd67cf416371337b83cb9ca3f696277125703339/gcdt/kumo_core.py#L608-L629 | def describe_change_set(awsclient, change_set_name, stack_name):
"""Print out the change_set to console.
This needs to run create_change_set first.
:param awsclient:
:param change_set_name:
:param stack_name:
"""
client = awsclient.get_client('cloudformation')
status = None
while status not in ['CREATE_COMPLETE', 'FAILED']:
response = client.describe_change_set(
ChangeSetName=change_set_name,
StackName=stack_name)
status = response['Status']
# print('##### %s' % status)
if status == 'FAILED':
print(response['StatusReason'])
elif status == 'CREATE_COMPLETE':
for change in response['Changes']:
print(json2table(change['ResourceChange'])) | [
"def",
"describe_change_set",
"(",
"awsclient",
",",
"change_set_name",
",",
"stack_name",
")",
":",
"client",
"=",
"awsclient",
".",
"get_client",
"(",
"'cloudformation'",
")",
"status",
"=",
"None",
"while",
"status",
"not",
"in",
"[",
"'CREATE_COMPLETE'",
",",
"'FAILED'",
"]",
":",
"response",
"=",
"client",
".",
"describe_change_set",
"(",
"ChangeSetName",
"=",
"change_set_name",
",",
"StackName",
"=",
"stack_name",
")",
"status",
"=",
"response",
"[",
"'Status'",
"]",
"# print('##### %s' % status)",
"if",
"status",
"==",
"'FAILED'",
":",
"print",
"(",
"response",
"[",
"'StatusReason'",
"]",
")",
"elif",
"status",
"==",
"'CREATE_COMPLETE'",
":",
"for",
"change",
"in",
"response",
"[",
"'Changes'",
"]",
":",
"print",
"(",
"json2table",
"(",
"change",
"[",
"'ResourceChange'",
"]",
")",
")"
] | Print out the change_set to console.
This needs to run create_change_set first.
:param awsclient:
:param change_set_name:
:param stack_name: | [
"Print",
"out",
"the",
"change_set",
"to",
"console",
".",
"This",
"needs",
"to",
"run",
"create_change_set",
"first",
"."
] | python | train |
kennknowles/python-rightarrow | rightarrow/parser.py | https://github.com/kennknowles/python-rightarrow/blob/86c83bde9d2fba6d54744eac9abedd1c248b7e73/rightarrow/parser.py#L169-L174 | def p_obj_fields(self, p):
"""
obj_fields : obj_fields ',' obj_field
| obj_field
"""
p[0] = dict([p[1]] if len(p) == 2 else p[1] + [p[3]]) | [
"def",
"p_obj_fields",
"(",
"self",
",",
"p",
")",
":",
"p",
"[",
"0",
"]",
"=",
"dict",
"(",
"[",
"p",
"[",
"1",
"]",
"]",
"if",
"len",
"(",
"p",
")",
"==",
"2",
"else",
"p",
"[",
"1",
"]",
"+",
"[",
"p",
"[",
"3",
"]",
"]",
")"
] | obj_fields : obj_fields ',' obj_field
| obj_field | [
"obj_fields",
":",
"obj_fields",
"obj_field",
"|",
"obj_field"
] | python | train |
portfors-lab/sparkle | sparkle/gui/stim/stimulusview.py | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/stim/stimulusview.py#L205-L208 | def rowsAboutToBeRemoved(self, parent, start, end):
"""Marks view for repaint. :qtdoc:`Re-implemented<QAbstractItemView.rowsAboutToBeRemoved>`"""
self._viewIsDirty = True
super(StimulusView, self).rowsAboutToBeRemoved(parent, start, end) | [
"def",
"rowsAboutToBeRemoved",
"(",
"self",
",",
"parent",
",",
"start",
",",
"end",
")",
":",
"self",
".",
"_viewIsDirty",
"=",
"True",
"super",
"(",
"StimulusView",
",",
"self",
")",
".",
"rowsAboutToBeRemoved",
"(",
"parent",
",",
"start",
",",
"end",
")"
] | Marks view for repaint. :qtdoc:`Re-implemented<QAbstractItemView.rowsAboutToBeRemoved>` | [
"Marks",
"view",
"for",
"repaint",
".",
":",
"qtdoc",
":",
"Re",
"-",
"implemented<QAbstractItemView",
".",
"rowsAboutToBeRemoved",
">"
] | python | train |
bcbio/bcbio-nextgen | bcbio/structural/cnvkit.py | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L125-L139 | def _match_batches(tumor, normal):
"""Fix batch names for shared tumor/normals to ensure matching
"""
def _get_batch(x):
b = dd.get_batch(x)
return [b] if not isinstance(b, (list, tuple)) else b
if normal:
tumor = copy.deepcopy(tumor)
normal = copy.deepcopy(normal)
cur_batch = list(set(_get_batch(tumor)) & set(_get_batch(normal)))
assert len(cur_batch) == 1, "No batch overlap: %s and %s" % (_get_batch(tumor), _get_batch(normal))
cur_batch = cur_batch[0]
tumor["metadata"]["batch"] = cur_batch
normal["metadata"]["batch"] = cur_batch
return tumor, normal | [
"def",
"_match_batches",
"(",
"tumor",
",",
"normal",
")",
":",
"def",
"_get_batch",
"(",
"x",
")",
":",
"b",
"=",
"dd",
".",
"get_batch",
"(",
"x",
")",
"return",
"[",
"b",
"]",
"if",
"not",
"isinstance",
"(",
"b",
",",
"(",
"list",
",",
"tuple",
")",
")",
"else",
"b",
"if",
"normal",
":",
"tumor",
"=",
"copy",
".",
"deepcopy",
"(",
"tumor",
")",
"normal",
"=",
"copy",
".",
"deepcopy",
"(",
"normal",
")",
"cur_batch",
"=",
"list",
"(",
"set",
"(",
"_get_batch",
"(",
"tumor",
")",
")",
"&",
"set",
"(",
"_get_batch",
"(",
"normal",
")",
")",
")",
"assert",
"len",
"(",
"cur_batch",
")",
"==",
"1",
",",
"\"No batch overlap: %s and %s\"",
"%",
"(",
"_get_batch",
"(",
"tumor",
")",
",",
"_get_batch",
"(",
"normal",
")",
")",
"cur_batch",
"=",
"cur_batch",
"[",
"0",
"]",
"tumor",
"[",
"\"metadata\"",
"]",
"[",
"\"batch\"",
"]",
"=",
"cur_batch",
"normal",
"[",
"\"metadata\"",
"]",
"[",
"\"batch\"",
"]",
"=",
"cur_batch",
"return",
"tumor",
",",
"normal"
] | Fix batch names for shared tumor/normals to ensure matching | [
"Fix",
"batch",
"names",
"for",
"shared",
"tumor",
"/",
"normals",
"to",
"ensure",
"matching"
] | python | train |
tcpcloud/python-aptly | aptly/publisher/__init__.py | https://github.com/tcpcloud/python-aptly/blob/7eb4ce1c508666bad0e6a0d4c5c561b1485ed558/aptly/publisher/__init__.py#L594-L601 | def add(self, snapshot, component='main'):
"""
Add snapshot of component to publish
"""
try:
self.components[component].append(snapshot)
except KeyError:
self.components[component] = [snapshot] | [
"def",
"add",
"(",
"self",
",",
"snapshot",
",",
"component",
"=",
"'main'",
")",
":",
"try",
":",
"self",
".",
"components",
"[",
"component",
"]",
".",
"append",
"(",
"snapshot",
")",
"except",
"KeyError",
":",
"self",
".",
"components",
"[",
"component",
"]",
"=",
"[",
"snapshot",
"]"
] | Add snapshot of component to publish | [
"Add",
"snapshot",
"of",
"component",
"to",
"publish"
] | python | train |
saltstack/salt | salt/modules/netscaler.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/netscaler.py#L484-L510 | def service_disable(s_name, s_delay=None, **connection_args):
'''
Disable a service
CLI Example:
.. code-block:: bash
salt '*' netscaler.service_disable 'serviceName'
salt '*' netscaler.service_disable 'serviceName' 'delayInSeconds'
'''
ret = True
service = _service_get(s_name, **connection_args)
if service is None:
return False
if s_delay is not None:
service.set_delay(s_delay)
nitro = _connect(**connection_args)
if nitro is None:
return False
try:
NSService.disable(nitro, service)
except NSNitroError as error:
log.debug('netscaler module error - NSService.enable() failed: %s', error)
ret = False
_disconnect(nitro)
return ret | [
"def",
"service_disable",
"(",
"s_name",
",",
"s_delay",
"=",
"None",
",",
"*",
"*",
"connection_args",
")",
":",
"ret",
"=",
"True",
"service",
"=",
"_service_get",
"(",
"s_name",
",",
"*",
"*",
"connection_args",
")",
"if",
"service",
"is",
"None",
":",
"return",
"False",
"if",
"s_delay",
"is",
"not",
"None",
":",
"service",
".",
"set_delay",
"(",
"s_delay",
")",
"nitro",
"=",
"_connect",
"(",
"*",
"*",
"connection_args",
")",
"if",
"nitro",
"is",
"None",
":",
"return",
"False",
"try",
":",
"NSService",
".",
"disable",
"(",
"nitro",
",",
"service",
")",
"except",
"NSNitroError",
"as",
"error",
":",
"log",
".",
"debug",
"(",
"'netscaler module error - NSService.enable() failed: %s'",
",",
"error",
")",
"ret",
"=",
"False",
"_disconnect",
"(",
"nitro",
")",
"return",
"ret"
] | Disable a service
CLI Example:
.. code-block:: bash
salt '*' netscaler.service_disable 'serviceName'
salt '*' netscaler.service_disable 'serviceName' 'delayInSeconds' | [
"Disable",
"a",
"service"
] | python | train |
nephila/djangocms-blog | djangocms_blog/admin.py | https://github.com/nephila/djangocms-blog/blob/3fdfbd4ba48947df0ee4c6d42e3a1c812b6dd95d/djangocms_blog/admin.py#L302-L342 | def get_fieldsets(self, request, obj=None):
"""
Customize the fieldsets according to the app settings
:param request: request
:param obj: post
:return: fieldsets configuration
"""
app_config_default = self._app_config_select(request, obj)
if app_config_default is None and request.method == 'GET':
return super(PostAdmin, self).get_fieldsets(request, obj)
if not obj:
config = app_config_default
else:
config = obj.app_config
fsets = deepcopy(self._fieldsets)
if config:
abstract = bool(config.use_abstract)
placeholder = bool(config.use_placeholder)
related = bool(config.use_related)
else:
abstract = get_setting('USE_ABSTRACT')
placeholder = get_setting('USE_PLACEHOLDER')
related = get_setting('USE_RELATED')
if abstract:
fsets[0][1]['fields'].append('abstract')
if not placeholder:
fsets[0][1]['fields'].append('post_text')
if get_setting('MULTISITE') and not self.has_restricted_sites(request):
fsets[1][1]['fields'][0].append('sites')
if request.user.is_superuser:
fsets[1][1]['fields'][0].append('author')
if apps.is_installed('djangocms_blog.liveblog'):
fsets[2][1]['fields'][2].append('enable_liveblog')
filter_function = get_setting('ADMIN_POST_FIELDSET_FILTER')
if related and Post.objects.namespace(config.namespace).active_translations().exists():
fsets[1][1]['fields'][0].append('related')
if callable(filter_function):
fsets = filter_function(fsets, request, obj=obj)
return fsets | [
"def",
"get_fieldsets",
"(",
"self",
",",
"request",
",",
"obj",
"=",
"None",
")",
":",
"app_config_default",
"=",
"self",
".",
"_app_config_select",
"(",
"request",
",",
"obj",
")",
"if",
"app_config_default",
"is",
"None",
"and",
"request",
".",
"method",
"==",
"'GET'",
":",
"return",
"super",
"(",
"PostAdmin",
",",
"self",
")",
".",
"get_fieldsets",
"(",
"request",
",",
"obj",
")",
"if",
"not",
"obj",
":",
"config",
"=",
"app_config_default",
"else",
":",
"config",
"=",
"obj",
".",
"app_config",
"fsets",
"=",
"deepcopy",
"(",
"self",
".",
"_fieldsets",
")",
"if",
"config",
":",
"abstract",
"=",
"bool",
"(",
"config",
".",
"use_abstract",
")",
"placeholder",
"=",
"bool",
"(",
"config",
".",
"use_placeholder",
")",
"related",
"=",
"bool",
"(",
"config",
".",
"use_related",
")",
"else",
":",
"abstract",
"=",
"get_setting",
"(",
"'USE_ABSTRACT'",
")",
"placeholder",
"=",
"get_setting",
"(",
"'USE_PLACEHOLDER'",
")",
"related",
"=",
"get_setting",
"(",
"'USE_RELATED'",
")",
"if",
"abstract",
":",
"fsets",
"[",
"0",
"]",
"[",
"1",
"]",
"[",
"'fields'",
"]",
".",
"append",
"(",
"'abstract'",
")",
"if",
"not",
"placeholder",
":",
"fsets",
"[",
"0",
"]",
"[",
"1",
"]",
"[",
"'fields'",
"]",
".",
"append",
"(",
"'post_text'",
")",
"if",
"get_setting",
"(",
"'MULTISITE'",
")",
"and",
"not",
"self",
".",
"has_restricted_sites",
"(",
"request",
")",
":",
"fsets",
"[",
"1",
"]",
"[",
"1",
"]",
"[",
"'fields'",
"]",
"[",
"0",
"]",
".",
"append",
"(",
"'sites'",
")",
"if",
"request",
".",
"user",
".",
"is_superuser",
":",
"fsets",
"[",
"1",
"]",
"[",
"1",
"]",
"[",
"'fields'",
"]",
"[",
"0",
"]",
".",
"append",
"(",
"'author'",
")",
"if",
"apps",
".",
"is_installed",
"(",
"'djangocms_blog.liveblog'",
")",
":",
"fsets",
"[",
"2",
"]",
"[",
"1",
"]",
"[",
"'fields'",
"]",
"[",
"2",
"]",
".",
"append",
"(",
"'enable_liveblog'",
")",
"filter_function",
"=",
"get_setting",
"(",
"'ADMIN_POST_FIELDSET_FILTER'",
")",
"if",
"related",
"and",
"Post",
".",
"objects",
".",
"namespace",
"(",
"config",
".",
"namespace",
")",
".",
"active_translations",
"(",
")",
".",
"exists",
"(",
")",
":",
"fsets",
"[",
"1",
"]",
"[",
"1",
"]",
"[",
"'fields'",
"]",
"[",
"0",
"]",
".",
"append",
"(",
"'related'",
")",
"if",
"callable",
"(",
"filter_function",
")",
":",
"fsets",
"=",
"filter_function",
"(",
"fsets",
",",
"request",
",",
"obj",
"=",
"obj",
")",
"return",
"fsets"
] | Customize the fieldsets according to the app settings
:param request: request
:param obj: post
:return: fieldsets configuration | [
"Customize",
"the",
"fieldsets",
"according",
"to",
"the",
"app",
"settings"
] | python | train |
skyfielders/python-skyfield | skyfield/nutationlib.py | https://github.com/skyfielders/python-skyfield/blob/51d9e042e06457f6b1f2415296d50a38cb3a300f/skyfield/nutationlib.py#L19-L44 | def compute_nutation(t):
"""Generate the nutation rotations for Time `t`.
If the Julian date is scalar, a simple ``(3, 3)`` matrix is
returned; if the date is an array of length ``n``, then an array of
matrices is returned with dimensions ``(3, 3, n)``.
"""
oblm, oblt, eqeq, psi, eps = t._earth_tilt
cobm = cos(oblm * DEG2RAD)
sobm = sin(oblm * DEG2RAD)
cobt = cos(oblt * DEG2RAD)
sobt = sin(oblt * DEG2RAD)
cpsi = cos(psi * ASEC2RAD)
spsi = sin(psi * ASEC2RAD)
return array(((cpsi,
-spsi * cobm,
-spsi * sobm),
(spsi * cobt,
cpsi * cobm * cobt + sobm * sobt,
cpsi * sobm * cobt - cobm * sobt),
(spsi * sobt,
cpsi * cobm * sobt - sobm * cobt,
cpsi * sobm * sobt + cobm * cobt))) | [
"def",
"compute_nutation",
"(",
"t",
")",
":",
"oblm",
",",
"oblt",
",",
"eqeq",
",",
"psi",
",",
"eps",
"=",
"t",
".",
"_earth_tilt",
"cobm",
"=",
"cos",
"(",
"oblm",
"*",
"DEG2RAD",
")",
"sobm",
"=",
"sin",
"(",
"oblm",
"*",
"DEG2RAD",
")",
"cobt",
"=",
"cos",
"(",
"oblt",
"*",
"DEG2RAD",
")",
"sobt",
"=",
"sin",
"(",
"oblt",
"*",
"DEG2RAD",
")",
"cpsi",
"=",
"cos",
"(",
"psi",
"*",
"ASEC2RAD",
")",
"spsi",
"=",
"sin",
"(",
"psi",
"*",
"ASEC2RAD",
")",
"return",
"array",
"(",
"(",
"(",
"cpsi",
",",
"-",
"spsi",
"*",
"cobm",
",",
"-",
"spsi",
"*",
"sobm",
")",
",",
"(",
"spsi",
"*",
"cobt",
",",
"cpsi",
"*",
"cobm",
"*",
"cobt",
"+",
"sobm",
"*",
"sobt",
",",
"cpsi",
"*",
"sobm",
"*",
"cobt",
"-",
"cobm",
"*",
"sobt",
")",
",",
"(",
"spsi",
"*",
"sobt",
",",
"cpsi",
"*",
"cobm",
"*",
"sobt",
"-",
"sobm",
"*",
"cobt",
",",
"cpsi",
"*",
"sobm",
"*",
"sobt",
"+",
"cobm",
"*",
"cobt",
")",
")",
")"
] | Generate the nutation rotations for Time `t`.
If the Julian date is scalar, a simple ``(3, 3)`` matrix is
returned; if the date is an array of length ``n``, then an array of
matrices is returned with dimensions ``(3, 3, n)``. | [
"Generate",
"the",
"nutation",
"rotations",
"for",
"Time",
"t",
"."
] | python | train |
mmcauliffe/Conch-sounds | conch/analysis/mfcc/rastamat.py | https://github.com/mmcauliffe/Conch-sounds/blob/e05535fd08e4b0e47e37a77ef521d05eff1d6bc5/conch/analysis/mfcc/rastamat.py#L35-L66 | def construct_filterbank(num_filters, nfft, sr, min_freq, max_freq):
"""Constructs a mel-frequency filter bank.
Parameters
----------
nfft : int
Number of points in the FFT.
Returns
-------
array
Filter bank to multiply an FFT spectrum to create a mel-frequency
spectrum.
"""
min_mel = freq_to_mel(min_freq)
max_mel = freq_to_mel(max_freq)
mel_points = np.linspace(min_mel, max_mel, num_filters + 2)
bin_freqs = mel_to_freq(mel_points)
# bins = round((nfft - 1) * bin_freqs / sr)
fftfreqs = np.arange(int(nfft / 2 + 1)) / nfft * sr
fbank = np.zeros((num_filters, int(nfft / 2 + 1)))
for i in range(num_filters):
fs = bin_freqs[i + np.arange(3)]
fs = fs[1] + (fs - fs[1])
loslope = (fftfreqs - fs[0]) / (fs[1] - fs[0])
highslope = (fs[2] - fftfreqs) / (fs[2] - fs[1])
fbank[i, :] = np.maximum(np.zeros(loslope.shape), np.minimum(loslope, highslope))
return fbank.transpose() | [
"def",
"construct_filterbank",
"(",
"num_filters",
",",
"nfft",
",",
"sr",
",",
"min_freq",
",",
"max_freq",
")",
":",
"min_mel",
"=",
"freq_to_mel",
"(",
"min_freq",
")",
"max_mel",
"=",
"freq_to_mel",
"(",
"max_freq",
")",
"mel_points",
"=",
"np",
".",
"linspace",
"(",
"min_mel",
",",
"max_mel",
",",
"num_filters",
"+",
"2",
")",
"bin_freqs",
"=",
"mel_to_freq",
"(",
"mel_points",
")",
"# bins = round((nfft - 1) * bin_freqs / sr)",
"fftfreqs",
"=",
"np",
".",
"arange",
"(",
"int",
"(",
"nfft",
"/",
"2",
"+",
"1",
")",
")",
"/",
"nfft",
"*",
"sr",
"fbank",
"=",
"np",
".",
"zeros",
"(",
"(",
"num_filters",
",",
"int",
"(",
"nfft",
"/",
"2",
"+",
"1",
")",
")",
")",
"for",
"i",
"in",
"range",
"(",
"num_filters",
")",
":",
"fs",
"=",
"bin_freqs",
"[",
"i",
"+",
"np",
".",
"arange",
"(",
"3",
")",
"]",
"fs",
"=",
"fs",
"[",
"1",
"]",
"+",
"(",
"fs",
"-",
"fs",
"[",
"1",
"]",
")",
"loslope",
"=",
"(",
"fftfreqs",
"-",
"fs",
"[",
"0",
"]",
")",
"/",
"(",
"fs",
"[",
"1",
"]",
"-",
"fs",
"[",
"0",
"]",
")",
"highslope",
"=",
"(",
"fs",
"[",
"2",
"]",
"-",
"fftfreqs",
")",
"/",
"(",
"fs",
"[",
"2",
"]",
"-",
"fs",
"[",
"1",
"]",
")",
"fbank",
"[",
"i",
",",
":",
"]",
"=",
"np",
".",
"maximum",
"(",
"np",
".",
"zeros",
"(",
"loslope",
".",
"shape",
")",
",",
"np",
".",
"minimum",
"(",
"loslope",
",",
"highslope",
")",
")",
"return",
"fbank",
".",
"transpose",
"(",
")"
] | Constructs a mel-frequency filter bank.
Parameters
----------
nfft : int
Number of points in the FFT.
Returns
-------
array
Filter bank to multiply an FFT spectrum to create a mel-frequency
spectrum. | [
"Constructs",
"a",
"mel",
"-",
"frequency",
"filter",
"bank",
"."
] | python | train |
numenta/htmresearch | htmresearch/support/column_pooler_mixin.py | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/support/column_pooler_mixin.py#L201-L227 | def mmGetCellActivityPlot(self, title="", showReset=False,
resetShading=0.25, activityType="activeCells"):
"""
Returns plot of the cell activity.
@param title (string) an optional title for the figure
@param showReset (bool) if true, the first set of cell activities
after a reset will have a gray background
@param resetShading (float) if showReset is true, this float specifies the
intensity of the reset background with 0.0
being white and 1.0 being black
@param activityType (string) The type of cell activity to display. Valid
types include "activeCells"
@return (Plot) plot
"""
cellTrace = copy.deepcopy(self._mmTraces[activityType].data)
for i in xrange(len(cellTrace)):
cellTrace[i] = self.getCellIndices(cellTrace[i])
return self.mmGetCellTracePlot(cellTrace, self.numberOfCells(),
activityType, title, showReset,
resetShading) | [
"def",
"mmGetCellActivityPlot",
"(",
"self",
",",
"title",
"=",
"\"\"",
",",
"showReset",
"=",
"False",
",",
"resetShading",
"=",
"0.25",
",",
"activityType",
"=",
"\"activeCells\"",
")",
":",
"cellTrace",
"=",
"copy",
".",
"deepcopy",
"(",
"self",
".",
"_mmTraces",
"[",
"activityType",
"]",
".",
"data",
")",
"for",
"i",
"in",
"xrange",
"(",
"len",
"(",
"cellTrace",
")",
")",
":",
"cellTrace",
"[",
"i",
"]",
"=",
"self",
".",
"getCellIndices",
"(",
"cellTrace",
"[",
"i",
"]",
")",
"return",
"self",
".",
"mmGetCellTracePlot",
"(",
"cellTrace",
",",
"self",
".",
"numberOfCells",
"(",
")",
",",
"activityType",
",",
"title",
",",
"showReset",
",",
"resetShading",
")"
] | Returns plot of the cell activity.
@param title (string) an optional title for the figure
@param showReset (bool) if true, the first set of cell activities
after a reset will have a gray background
@param resetShading (float) if showReset is true, this float specifies the
intensity of the reset background with 0.0
being white and 1.0 being black
@param activityType (string) The type of cell activity to display. Valid
types include "activeCells"
@return (Plot) plot | [
"Returns",
"plot",
"of",
"the",
"cell",
"activity",
"."
] | python | train |
coopie/ttv | ttv.py | https://github.com/coopie/ttv/blob/43e2bcddf58945f27665d4db1362473842eb26f3/ttv.py#L140-L167 | def get_dataset(corpora):
"""
Return a dictionary of subjectID -> [path_to_resource].
"""
# TODO: make filter methods for the files
def make_posix_path(dirpath, filename):
dirpath = posixpath.sep.join(dirpath.split(os.sep))
return posixpath.join(dirpath, filename)
wav_files_in_corpora = filter(
lambda x: x.endswith('.wav'),
sum(
[list(map(lambda x: make_posix_path(corpus, x), os.listdir(corpus))) for corpus in corpora],
[]
),
)
dataset = {}
for wav_file in wav_files_in_corpora:
subjectID = os.path.split(wav_file)[-1].split('.')[0].split('_')[0]
if subjectID in dataset:
dataset[subjectID].append(wav_file)
else:
dataset[subjectID] = [wav_file]
return dataset | [
"def",
"get_dataset",
"(",
"corpora",
")",
":",
"# TODO: make filter methods for the files",
"def",
"make_posix_path",
"(",
"dirpath",
",",
"filename",
")",
":",
"dirpath",
"=",
"posixpath",
".",
"sep",
".",
"join",
"(",
"dirpath",
".",
"split",
"(",
"os",
".",
"sep",
")",
")",
"return",
"posixpath",
".",
"join",
"(",
"dirpath",
",",
"filename",
")",
"wav_files_in_corpora",
"=",
"filter",
"(",
"lambda",
"x",
":",
"x",
".",
"endswith",
"(",
"'.wav'",
")",
",",
"sum",
"(",
"[",
"list",
"(",
"map",
"(",
"lambda",
"x",
":",
"make_posix_path",
"(",
"corpus",
",",
"x",
")",
",",
"os",
".",
"listdir",
"(",
"corpus",
")",
")",
")",
"for",
"corpus",
"in",
"corpora",
"]",
",",
"[",
"]",
")",
",",
")",
"dataset",
"=",
"{",
"}",
"for",
"wav_file",
"in",
"wav_files_in_corpora",
":",
"subjectID",
"=",
"os",
".",
"path",
".",
"split",
"(",
"wav_file",
")",
"[",
"-",
"1",
"]",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
".",
"split",
"(",
"'_'",
")",
"[",
"0",
"]",
"if",
"subjectID",
"in",
"dataset",
":",
"dataset",
"[",
"subjectID",
"]",
".",
"append",
"(",
"wav_file",
")",
"else",
":",
"dataset",
"[",
"subjectID",
"]",
"=",
"[",
"wav_file",
"]",
"return",
"dataset"
] | Return a dictionary of subjectID -> [path_to_resource]. | [
"Return",
"a",
"dictionary",
"of",
"subjectID",
"-",
">",
"[",
"path_to_resource",
"]",
"."
] | python | train |
axialmarket/fsq | fsq/internal.py | https://github.com/axialmarket/fsq/blob/43b84c292cb8a187599d86753b947cf73248f989/fsq/internal.py#L167-L176 | def check_ttl_max_tries(tries, enqueued_at, max_tries, ttl):
'''Check that the ttl for an item has not expired, and that the item has
not exceeded it's maximum allotted tries'''
if max_tries > 0 and tries >= max_tries:
raise FSQMaxTriesError(errno.EINTR, u'Max tries exceded:'\
u' {0} ({1})'.format(max_tries, tries))
if ttl > 0 and datetime.datetime.now() < enqueued_at + datetime.timedelta(
seconds=ttl):
raise FSQTTLExpiredError(errno.EINTR, u'TTL Expired:'\
u' {0}'.format(ttl)) | [
"def",
"check_ttl_max_tries",
"(",
"tries",
",",
"enqueued_at",
",",
"max_tries",
",",
"ttl",
")",
":",
"if",
"max_tries",
">",
"0",
"and",
"tries",
">=",
"max_tries",
":",
"raise",
"FSQMaxTriesError",
"(",
"errno",
".",
"EINTR",
",",
"u'Max tries exceded:'",
"u' {0} ({1})'",
".",
"format",
"(",
"max_tries",
",",
"tries",
")",
")",
"if",
"ttl",
">",
"0",
"and",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"<",
"enqueued_at",
"+",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"ttl",
")",
":",
"raise",
"FSQTTLExpiredError",
"(",
"errno",
".",
"EINTR",
",",
"u'TTL Expired:'",
"u' {0}'",
".",
"format",
"(",
"ttl",
")",
")"
] | Check that the ttl for an item has not expired, and that the item has
not exceeded it's maximum allotted tries | [
"Check",
"that",
"the",
"ttl",
"for",
"an",
"item",
"has",
"not",
"expired",
"and",
"that",
"the",
"item",
"has",
"not",
"exceeded",
"it",
"s",
"maximum",
"allotted",
"tries"
] | python | train |
mitsei/dlkit | dlkit/json_/repository/sessions.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/repository/sessions.py#L5623-L5639 | def get_child_repository_ids(self, repository_id):
"""Gets the ``Ids`` of the children of the given repository.
arg: repository_id (osid.id.Id): the ``Id`` to query
return: (osid.id.IdList) - the children of the repository
raise: NotFound - ``repository_id`` not found
raise: NullArgument - ``repository_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.get_child_bin_ids
if self._catalog_session is not None:
return self._catalog_session.get_child_catalog_ids(catalog_id=repository_id)
return self._hierarchy_session.get_children(id_=repository_id) | [
"def",
"get_child_repository_ids",
"(",
"self",
",",
"repository_id",
")",
":",
"# Implemented from template for",
"# osid.resource.BinHierarchySession.get_child_bin_ids",
"if",
"self",
".",
"_catalog_session",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_catalog_session",
".",
"get_child_catalog_ids",
"(",
"catalog_id",
"=",
"repository_id",
")",
"return",
"self",
".",
"_hierarchy_session",
".",
"get_children",
"(",
"id_",
"=",
"repository_id",
")"
] | Gets the ``Ids`` of the children of the given repository.
arg: repository_id (osid.id.Id): the ``Id`` to query
return: (osid.id.IdList) - the children of the repository
raise: NotFound - ``repository_id`` not found
raise: NullArgument - ``repository_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* | [
"Gets",
"the",
"Ids",
"of",
"the",
"children",
"of",
"the",
"given",
"repository",
"."
] | python | train |
googledatalab/pydatalab | solutionbox/structured_data/mltoolbox/_structured_data/_package.py | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/structured_data/mltoolbox/_structured_data/_package.py#L67-L84 | def _assert_gcs_files(files):
"""Check files starts wtih gs://.
Args:
files: string to file path, or list of file paths.
"""
if sys.version_info.major > 2:
string_type = (str, bytes) # for python 3 compatibility
else:
string_type = basestring # noqa
if isinstance(files, string_type):
files = [files]
for f in files:
if f is not None and not f.startswith('gs://'):
raise ValueError('File %s is not a gcs path' % f) | [
"def",
"_assert_gcs_files",
"(",
"files",
")",
":",
"if",
"sys",
".",
"version_info",
".",
"major",
">",
"2",
":",
"string_type",
"=",
"(",
"str",
",",
"bytes",
")",
"# for python 3 compatibility",
"else",
":",
"string_type",
"=",
"basestring",
"# noqa",
"if",
"isinstance",
"(",
"files",
",",
"string_type",
")",
":",
"files",
"=",
"[",
"files",
"]",
"for",
"f",
"in",
"files",
":",
"if",
"f",
"is",
"not",
"None",
"and",
"not",
"f",
".",
"startswith",
"(",
"'gs://'",
")",
":",
"raise",
"ValueError",
"(",
"'File %s is not a gcs path'",
"%",
"f",
")"
] | Check files starts wtih gs://.
Args:
files: string to file path, or list of file paths. | [
"Check",
"files",
"starts",
"wtih",
"gs",
":",
"//",
"."
] | python | train |
sosreport/sos | sos/__init__.py | https://github.com/sosreport/sos/blob/2ebc04da53dc871c8dd5243567afa4f8592dca29/sos/__init__.py#L272-L285 | def merge(self, src, skip_default=True):
"""Merge another set of ``SoSOptions`` into this object.
Merge two ``SoSOptions`` objects by setting unset or default
values to their value in the ``src`` object.
:param src: the ``SoSOptions`` object to copy from
:param is_default: ``True`` if new default values are to be set.
"""
for arg in _arg_names:
if not hasattr(src, arg):
continue
if getattr(src, arg) is not None or not skip_default:
self._merge_opt(arg, src, False) | [
"def",
"merge",
"(",
"self",
",",
"src",
",",
"skip_default",
"=",
"True",
")",
":",
"for",
"arg",
"in",
"_arg_names",
":",
"if",
"not",
"hasattr",
"(",
"src",
",",
"arg",
")",
":",
"continue",
"if",
"getattr",
"(",
"src",
",",
"arg",
")",
"is",
"not",
"None",
"or",
"not",
"skip_default",
":",
"self",
".",
"_merge_opt",
"(",
"arg",
",",
"src",
",",
"False",
")"
] | Merge another set of ``SoSOptions`` into this object.
Merge two ``SoSOptions`` objects by setting unset or default
values to their value in the ``src`` object.
:param src: the ``SoSOptions`` object to copy from
:param is_default: ``True`` if new default values are to be set. | [
"Merge",
"another",
"set",
"of",
"SoSOptions",
"into",
"this",
"object",
"."
] | python | train |
sorgerlab/indra | indra/assemblers/sbgn/assembler.py | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/sbgn/assembler.py#L108-L123 | def print_model(self, pretty=True, encoding='utf8'):
"""Return the assembled SBGN model as an XML string.
Parameters
----------
pretty : Optional[bool]
If True, the SBGN string is formatted with indentation (for human
viewing) otherwise no indentation is used. Default: True
Returns
-------
sbgn_str : bytes (str in Python 2)
An XML string representation of the SBGN model.
"""
return lxml.etree.tostring(self.sbgn, pretty_print=pretty,
encoding=encoding, xml_declaration=True) | [
"def",
"print_model",
"(",
"self",
",",
"pretty",
"=",
"True",
",",
"encoding",
"=",
"'utf8'",
")",
":",
"return",
"lxml",
".",
"etree",
".",
"tostring",
"(",
"self",
".",
"sbgn",
",",
"pretty_print",
"=",
"pretty",
",",
"encoding",
"=",
"encoding",
",",
"xml_declaration",
"=",
"True",
")"
] | Return the assembled SBGN model as an XML string.
Parameters
----------
pretty : Optional[bool]
If True, the SBGN string is formatted with indentation (for human
viewing) otherwise no indentation is used. Default: True
Returns
-------
sbgn_str : bytes (str in Python 2)
An XML string representation of the SBGN model. | [
"Return",
"the",
"assembled",
"SBGN",
"model",
"as",
"an",
"XML",
"string",
"."
] | python | train |
cmbruns/pyopenvr | src/openvr/__init__.py | https://github.com/cmbruns/pyopenvr/blob/68395d26bb3df6ab1f0f059c38d441f962938be6/src/openvr/__init__.py#L2658-L2668 | def computeDistortion(self, eEye, fU, fV):
"""
Gets the result of the distortion function for the specified eye and input UVs. UVs go from 0,0 in
the upper left of that eye's viewport and 1,1 in the lower right of that eye's viewport.
Returns true for success. Otherwise, returns false, and distortion coordinates are not suitable.
"""
fn = self.function_table.computeDistortion
pDistortionCoordinates = DistortionCoordinates_t()
result = fn(eEye, fU, fV, byref(pDistortionCoordinates))
return result, pDistortionCoordinates | [
"def",
"computeDistortion",
"(",
"self",
",",
"eEye",
",",
"fU",
",",
"fV",
")",
":",
"fn",
"=",
"self",
".",
"function_table",
".",
"computeDistortion",
"pDistortionCoordinates",
"=",
"DistortionCoordinates_t",
"(",
")",
"result",
"=",
"fn",
"(",
"eEye",
",",
"fU",
",",
"fV",
",",
"byref",
"(",
"pDistortionCoordinates",
")",
")",
"return",
"result",
",",
"pDistortionCoordinates"
] | Gets the result of the distortion function for the specified eye and input UVs. UVs go from 0,0 in
the upper left of that eye's viewport and 1,1 in the lower right of that eye's viewport.
Returns true for success. Otherwise, returns false, and distortion coordinates are not suitable. | [
"Gets",
"the",
"result",
"of",
"the",
"distortion",
"function",
"for",
"the",
"specified",
"eye",
"and",
"input",
"UVs",
".",
"UVs",
"go",
"from",
"0",
"0",
"in",
"the",
"upper",
"left",
"of",
"that",
"eye",
"s",
"viewport",
"and",
"1",
"1",
"in",
"the",
"lower",
"right",
"of",
"that",
"eye",
"s",
"viewport",
".",
"Returns",
"true",
"for",
"success",
".",
"Otherwise",
"returns",
"false",
"and",
"distortion",
"coordinates",
"are",
"not",
"suitable",
"."
] | python | train |
pypa/pipenv | pipenv/vendor/distlib/_backport/tarfile.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L752-L778 | def read(self, size=None):
"""Read data from the file.
"""
if size is None:
size = self.size - self.position
else:
size = min(size, self.size - self.position)
buf = b""
while size > 0:
while True:
data, start, stop, offset = self.map[self.map_index]
if start <= self.position < stop:
break
else:
self.map_index += 1
if self.map_index == len(self.map):
self.map_index = 0
length = min(size, stop - self.position)
if data:
self.fileobj.seek(offset + (self.position - start))
buf += self.fileobj.read(length)
else:
buf += NUL * length
size -= length
self.position += length
return buf | [
"def",
"read",
"(",
"self",
",",
"size",
"=",
"None",
")",
":",
"if",
"size",
"is",
"None",
":",
"size",
"=",
"self",
".",
"size",
"-",
"self",
".",
"position",
"else",
":",
"size",
"=",
"min",
"(",
"size",
",",
"self",
".",
"size",
"-",
"self",
".",
"position",
")",
"buf",
"=",
"b\"\"",
"while",
"size",
">",
"0",
":",
"while",
"True",
":",
"data",
",",
"start",
",",
"stop",
",",
"offset",
"=",
"self",
".",
"map",
"[",
"self",
".",
"map_index",
"]",
"if",
"start",
"<=",
"self",
".",
"position",
"<",
"stop",
":",
"break",
"else",
":",
"self",
".",
"map_index",
"+=",
"1",
"if",
"self",
".",
"map_index",
"==",
"len",
"(",
"self",
".",
"map",
")",
":",
"self",
".",
"map_index",
"=",
"0",
"length",
"=",
"min",
"(",
"size",
",",
"stop",
"-",
"self",
".",
"position",
")",
"if",
"data",
":",
"self",
".",
"fileobj",
".",
"seek",
"(",
"offset",
"+",
"(",
"self",
".",
"position",
"-",
"start",
")",
")",
"buf",
"+=",
"self",
".",
"fileobj",
".",
"read",
"(",
"length",
")",
"else",
":",
"buf",
"+=",
"NUL",
"*",
"length",
"size",
"-=",
"length",
"self",
".",
"position",
"+=",
"length",
"return",
"buf"
] | Read data from the file. | [
"Read",
"data",
"from",
"the",
"file",
"."
] | python | train |
acutesoftware/AIKIF | aikif/agents/explore/agent_explore_grid.py | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/agents/explore/agent_explore_grid.py#L24-L41 | def set_world(self, grd, start_y_x, y_x):
"""
tell the agent to move to location y,x
Why is there another grd object in the agent? Because
this is NOT the main grid, rather a copy for the agent
to overwrite with planning routes, etc.
The real grid is initialised in World.__init__() class
"""
self.grd = grd
self.start_y = start_y_x[0]
self.start_x = start_y_x[1]
self.current_y = start_y_x[0]
self.current_x = start_y_x[1]
self.target_y = y_x[0]
self.target_x = y_x[1]
self.backtrack = [0,0] # set only if blocked and agent needs to go back
self.prefer_x = 0 # set only if backtracked as preferred direction x
self.prefer_y = 0 | [
"def",
"set_world",
"(",
"self",
",",
"grd",
",",
"start_y_x",
",",
"y_x",
")",
":",
"self",
".",
"grd",
"=",
"grd",
"self",
".",
"start_y",
"=",
"start_y_x",
"[",
"0",
"]",
"self",
".",
"start_x",
"=",
"start_y_x",
"[",
"1",
"]",
"self",
".",
"current_y",
"=",
"start_y_x",
"[",
"0",
"]",
"self",
".",
"current_x",
"=",
"start_y_x",
"[",
"1",
"]",
"self",
".",
"target_y",
"=",
"y_x",
"[",
"0",
"]",
"self",
".",
"target_x",
"=",
"y_x",
"[",
"1",
"]",
"self",
".",
"backtrack",
"=",
"[",
"0",
",",
"0",
"]",
"# set only if blocked and agent needs to go back",
"self",
".",
"prefer_x",
"=",
"0",
"# set only if backtracked as preferred direction x",
"self",
".",
"prefer_y",
"=",
"0"
] | tell the agent to move to location y,x
Why is there another grd object in the agent? Because
this is NOT the main grid, rather a copy for the agent
to overwrite with planning routes, etc.
The real grid is initialised in World.__init__() class | [
"tell",
"the",
"agent",
"to",
"move",
"to",
"location",
"y",
"x",
"Why",
"is",
"there",
"another",
"grd",
"object",
"in",
"the",
"agent?",
"Because",
"this",
"is",
"NOT",
"the",
"main",
"grid",
"rather",
"a",
"copy",
"for",
"the",
"agent",
"to",
"overwrite",
"with",
"planning",
"routes",
"etc",
".",
"The",
"real",
"grid",
"is",
"initialised",
"in",
"World",
".",
"__init__",
"()",
"class"
] | python | train |
HazyResearch/metal | metal/multitask/mt_end_model.py | https://github.com/HazyResearch/metal/blob/c24e3772e25ac6d0917b8b7af4c1bcb92928f84a/metal/multitask/mt_end_model.py#L281-L294 | def _preprocess_Y(self, Y, k=None):
"""Convert Y to t-length list of probabilistic labels if necessary"""
# If not a list, convert to a singleton list
if not isinstance(Y, list):
if self.t != 1:
msg = "For t > 1, Y must be a list of n-dim or [n, K_t] tensors"
raise ValueError(msg)
Y = [Y]
if not len(Y) == self.t:
msg = f"Expected Y to be a t-length list (t={self.t}), not {len(Y)}"
raise ValueError(msg)
return [EndModel._preprocess_Y(self, Y_t, self.K[t]) for t, Y_t in enumerate(Y)] | [
"def",
"_preprocess_Y",
"(",
"self",
",",
"Y",
",",
"k",
"=",
"None",
")",
":",
"# If not a list, convert to a singleton list",
"if",
"not",
"isinstance",
"(",
"Y",
",",
"list",
")",
":",
"if",
"self",
".",
"t",
"!=",
"1",
":",
"msg",
"=",
"\"For t > 1, Y must be a list of n-dim or [n, K_t] tensors\"",
"raise",
"ValueError",
"(",
"msg",
")",
"Y",
"=",
"[",
"Y",
"]",
"if",
"not",
"len",
"(",
"Y",
")",
"==",
"self",
".",
"t",
":",
"msg",
"=",
"f\"Expected Y to be a t-length list (t={self.t}), not {len(Y)}\"",
"raise",
"ValueError",
"(",
"msg",
")",
"return",
"[",
"EndModel",
".",
"_preprocess_Y",
"(",
"self",
",",
"Y_t",
",",
"self",
".",
"K",
"[",
"t",
"]",
")",
"for",
"t",
",",
"Y_t",
"in",
"enumerate",
"(",
"Y",
")",
"]"
] | Convert Y to t-length list of probabilistic labels if necessary | [
"Convert",
"Y",
"to",
"t",
"-",
"length",
"list",
"of",
"probabilistic",
"labels",
"if",
"necessary"
] | python | train |
tempodb/tempodb-python | tempodb/protocol/objects.py | https://github.com/tempodb/tempodb-python/blob/8ce45231bd728c6c97ef799cf0f1513ea3a9a7d3/tempodb/protocol/objects.py#L438-L448 | def to_dictionary(self):
"""Serialize an object into dictionary form. Useful if you have to
serialize an array of objects into JSON. Otherwise, if you call the
:meth:`to_json` method on each object in the list and then try to
dump the array, you end up with an array with one string."""
j = {}
j['interval'] = {'start': self.start.isoformat(),
'end': self.end.isoformat()}
j['found'] = {'v': self.v, 't': self.t.isoformat()}
return j | [
"def",
"to_dictionary",
"(",
"self",
")",
":",
"j",
"=",
"{",
"}",
"j",
"[",
"'interval'",
"]",
"=",
"{",
"'start'",
":",
"self",
".",
"start",
".",
"isoformat",
"(",
")",
",",
"'end'",
":",
"self",
".",
"end",
".",
"isoformat",
"(",
")",
"}",
"j",
"[",
"'found'",
"]",
"=",
"{",
"'v'",
":",
"self",
".",
"v",
",",
"'t'",
":",
"self",
".",
"t",
".",
"isoformat",
"(",
")",
"}",
"return",
"j"
] | Serialize an object into dictionary form. Useful if you have to
serialize an array of objects into JSON. Otherwise, if you call the
:meth:`to_json` method on each object in the list and then try to
dump the array, you end up with an array with one string. | [
"Serialize",
"an",
"object",
"into",
"dictionary",
"form",
".",
"Useful",
"if",
"you",
"have",
"to",
"serialize",
"an",
"array",
"of",
"objects",
"into",
"JSON",
".",
"Otherwise",
"if",
"you",
"call",
"the",
":",
"meth",
":",
"to_json",
"method",
"on",
"each",
"object",
"in",
"the",
"list",
"and",
"then",
"try",
"to",
"dump",
"the",
"array",
"you",
"end",
"up",
"with",
"an",
"array",
"with",
"one",
"string",
"."
] | python | train |
gem/oq-engine | openquake/engine/engine.py | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/engine/engine.py#L294-L373 | def run_calc(job_id, oqparam, exports, hazard_calculation_id=None, **kw):
"""
Run a calculation.
:param job_id:
ID of the current job
:param oqparam:
:class:`openquake.commonlib.oqvalidation.OqParam` instance
:param exports:
A comma-separated string of export types.
"""
setproctitle('oq-job-%d' % job_id)
calc = base.calculators(oqparam, calc_id=job_id)
logging.info('%s running %s [--hc=%s]',
getpass.getuser(),
calc.oqparam.inputs['job_ini'],
calc.oqparam.hazard_calculation_id)
logging.info('Using engine version %s', __version__)
msg = check_obsolete_version(oqparam.calculation_mode)
if msg:
logs.LOG.warn(msg)
if OQ_DISTRIBUTE.startswith(('celery', 'zmq')):
set_concurrent_tasks_default(job_id)
calc.from_engine = True
tb = 'None\n'
try:
if not oqparam.hazard_calculation_id:
if 'input_zip' in oqparam.inputs: # starting from an archive
with open(oqparam.inputs['input_zip'], 'rb') as arch:
data = numpy.array(arch.read())
else:
logs.LOG.info('zipping the input files')
bio = io.BytesIO()
oqzip.zip_job(oqparam.inputs['job_ini'], bio, (), oqparam,
logging.debug)
data = numpy.array(bio.getvalue())
del bio
calc.datastore['input/zip'] = data
calc.datastore.set_attrs('input/zip', nbytes=data.nbytes)
del data # save memory
poll_queue(job_id, _PID, poll_time=15)
t0 = time.time()
calc.run(exports=exports,
hazard_calculation_id=hazard_calculation_id,
close=False, **kw)
logs.LOG.info('Exposing the outputs to the database')
expose_outputs(calc.datastore)
duration = time.time() - t0
calc._monitor.flush()
records = views.performance_view(calc.datastore)
logs.dbcmd('save_performance', job_id, records)
calc.datastore.close()
logs.LOG.info('Calculation %d finished correctly in %d seconds',
job_id, duration)
logs.dbcmd('finish', job_id, 'complete')
except BaseException as exc:
if isinstance(exc, MasterKilled):
msg = 'aborted'
else:
msg = 'failed'
tb = traceback.format_exc()
try:
logs.LOG.critical(tb)
logs.dbcmd('finish', job_id, msg)
except BaseException: # an OperationalError may always happen
sys.stderr.write(tb)
raise
finally:
# if there was an error in the calculation, this part may fail;
# in such a situation, we simply log the cleanup error without
# taking further action, so that the real error can propagate
try:
if OQ_DISTRIBUTE.startswith('celery'):
celery_cleanup(TERMINATE)
except BaseException:
# log the finalization error only if there is no real error
if tb == 'None\n':
logs.LOG.error('finalizing', exc_info=True)
return calc | [
"def",
"run_calc",
"(",
"job_id",
",",
"oqparam",
",",
"exports",
",",
"hazard_calculation_id",
"=",
"None",
",",
"*",
"*",
"kw",
")",
":",
"setproctitle",
"(",
"'oq-job-%d'",
"%",
"job_id",
")",
"calc",
"=",
"base",
".",
"calculators",
"(",
"oqparam",
",",
"calc_id",
"=",
"job_id",
")",
"logging",
".",
"info",
"(",
"'%s running %s [--hc=%s]'",
",",
"getpass",
".",
"getuser",
"(",
")",
",",
"calc",
".",
"oqparam",
".",
"inputs",
"[",
"'job_ini'",
"]",
",",
"calc",
".",
"oqparam",
".",
"hazard_calculation_id",
")",
"logging",
".",
"info",
"(",
"'Using engine version %s'",
",",
"__version__",
")",
"msg",
"=",
"check_obsolete_version",
"(",
"oqparam",
".",
"calculation_mode",
")",
"if",
"msg",
":",
"logs",
".",
"LOG",
".",
"warn",
"(",
"msg",
")",
"if",
"OQ_DISTRIBUTE",
".",
"startswith",
"(",
"(",
"'celery'",
",",
"'zmq'",
")",
")",
":",
"set_concurrent_tasks_default",
"(",
"job_id",
")",
"calc",
".",
"from_engine",
"=",
"True",
"tb",
"=",
"'None\\n'",
"try",
":",
"if",
"not",
"oqparam",
".",
"hazard_calculation_id",
":",
"if",
"'input_zip'",
"in",
"oqparam",
".",
"inputs",
":",
"# starting from an archive",
"with",
"open",
"(",
"oqparam",
".",
"inputs",
"[",
"'input_zip'",
"]",
",",
"'rb'",
")",
"as",
"arch",
":",
"data",
"=",
"numpy",
".",
"array",
"(",
"arch",
".",
"read",
"(",
")",
")",
"else",
":",
"logs",
".",
"LOG",
".",
"info",
"(",
"'zipping the input files'",
")",
"bio",
"=",
"io",
".",
"BytesIO",
"(",
")",
"oqzip",
".",
"zip_job",
"(",
"oqparam",
".",
"inputs",
"[",
"'job_ini'",
"]",
",",
"bio",
",",
"(",
")",
",",
"oqparam",
",",
"logging",
".",
"debug",
")",
"data",
"=",
"numpy",
".",
"array",
"(",
"bio",
".",
"getvalue",
"(",
")",
")",
"del",
"bio",
"calc",
".",
"datastore",
"[",
"'input/zip'",
"]",
"=",
"data",
"calc",
".",
"datastore",
".",
"set_attrs",
"(",
"'input/zip'",
",",
"nbytes",
"=",
"data",
".",
"nbytes",
")",
"del",
"data",
"# save memory",
"poll_queue",
"(",
"job_id",
",",
"_PID",
",",
"poll_time",
"=",
"15",
")",
"t0",
"=",
"time",
".",
"time",
"(",
")",
"calc",
".",
"run",
"(",
"exports",
"=",
"exports",
",",
"hazard_calculation_id",
"=",
"hazard_calculation_id",
",",
"close",
"=",
"False",
",",
"*",
"*",
"kw",
")",
"logs",
".",
"LOG",
".",
"info",
"(",
"'Exposing the outputs to the database'",
")",
"expose_outputs",
"(",
"calc",
".",
"datastore",
")",
"duration",
"=",
"time",
".",
"time",
"(",
")",
"-",
"t0",
"calc",
".",
"_monitor",
".",
"flush",
"(",
")",
"records",
"=",
"views",
".",
"performance_view",
"(",
"calc",
".",
"datastore",
")",
"logs",
".",
"dbcmd",
"(",
"'save_performance'",
",",
"job_id",
",",
"records",
")",
"calc",
".",
"datastore",
".",
"close",
"(",
")",
"logs",
".",
"LOG",
".",
"info",
"(",
"'Calculation %d finished correctly in %d seconds'",
",",
"job_id",
",",
"duration",
")",
"logs",
".",
"dbcmd",
"(",
"'finish'",
",",
"job_id",
",",
"'complete'",
")",
"except",
"BaseException",
"as",
"exc",
":",
"if",
"isinstance",
"(",
"exc",
",",
"MasterKilled",
")",
":",
"msg",
"=",
"'aborted'",
"else",
":",
"msg",
"=",
"'failed'",
"tb",
"=",
"traceback",
".",
"format_exc",
"(",
")",
"try",
":",
"logs",
".",
"LOG",
".",
"critical",
"(",
"tb",
")",
"logs",
".",
"dbcmd",
"(",
"'finish'",
",",
"job_id",
",",
"msg",
")",
"except",
"BaseException",
":",
"# an OperationalError may always happen",
"sys",
".",
"stderr",
".",
"write",
"(",
"tb",
")",
"raise",
"finally",
":",
"# if there was an error in the calculation, this part may fail;",
"# in such a situation, we simply log the cleanup error without",
"# taking further action, so that the real error can propagate",
"try",
":",
"if",
"OQ_DISTRIBUTE",
".",
"startswith",
"(",
"'celery'",
")",
":",
"celery_cleanup",
"(",
"TERMINATE",
")",
"except",
"BaseException",
":",
"# log the finalization error only if there is no real error",
"if",
"tb",
"==",
"'None\\n'",
":",
"logs",
".",
"LOG",
".",
"error",
"(",
"'finalizing'",
",",
"exc_info",
"=",
"True",
")",
"return",
"calc"
] | Run a calculation.
:param job_id:
ID of the current job
:param oqparam:
:class:`openquake.commonlib.oqvalidation.OqParam` instance
:param exports:
A comma-separated string of export types. | [
"Run",
"a",
"calculation",
"."
] | python | train |
nutechsoftware/alarmdecoder | alarmdecoder/event/event.py | https://github.com/nutechsoftware/alarmdecoder/blob/b0c014089e24455228cb4402cf30ba98157578cd/alarmdecoder/event/event.py#L37-L45 | def _getfunctionlist(self):
"""(internal use) """
try:
eventhandler = self.obj.__eventhandler__
except AttributeError:
eventhandler = self.obj.__eventhandler__ = {}
return eventhandler.setdefault(self.event, []) | [
"def",
"_getfunctionlist",
"(",
"self",
")",
":",
"try",
":",
"eventhandler",
"=",
"self",
".",
"obj",
".",
"__eventhandler__",
"except",
"AttributeError",
":",
"eventhandler",
"=",
"self",
".",
"obj",
".",
"__eventhandler__",
"=",
"{",
"}",
"return",
"eventhandler",
".",
"setdefault",
"(",
"self",
".",
"event",
",",
"[",
"]",
")"
] | (internal use) | [
"(",
"internal",
"use",
")"
] | python | train |
JdeRobot/base | src/drivers/MAVLinkServer/MAVProxy/pymavlink/mavextra.py | https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/mavextra.py#L520-L526 | def rate_of_turn(speed, bank):
'''return expected rate of turn in degrees/s for given speed in m/s and
bank angle in degrees'''
if abs(speed) < 2 or abs(bank) > 80:
return 0
ret = degrees(9.81*tan(radians(bank))/speed)
return ret | [
"def",
"rate_of_turn",
"(",
"speed",
",",
"bank",
")",
":",
"if",
"abs",
"(",
"speed",
")",
"<",
"2",
"or",
"abs",
"(",
"bank",
")",
">",
"80",
":",
"return",
"0",
"ret",
"=",
"degrees",
"(",
"9.81",
"*",
"tan",
"(",
"radians",
"(",
"bank",
")",
")",
"/",
"speed",
")",
"return",
"ret"
] | return expected rate of turn in degrees/s for given speed in m/s and
bank angle in degrees | [
"return",
"expected",
"rate",
"of",
"turn",
"in",
"degrees",
"/",
"s",
"for",
"given",
"speed",
"in",
"m",
"/",
"s",
"and",
"bank",
"angle",
"in",
"degrees"
] | python | train |
jedie/DragonPy | PyDC/PyDC/utils.py | https://github.com/jedie/DragonPy/blob/6659e5b5133aab26979a498ee7453495773a4f6c/PyDC/PyDC/utils.py#L654-L670 | def get_word(byte_iterator):
"""
return a uint16 value
>>> g=iter([0x1e, 0x12])
>>> v=get_word(g)
>>> v
7698
>>> hex(v)
'0x1e12'
"""
byte_values = list(itertools.islice(byte_iterator, 2))
try:
word = (byte_values[0] << 8) | byte_values[1]
except TypeError, err:
raise TypeError("Can't build word from %s: %s" % (repr(byte_values), err))
return word | [
"def",
"get_word",
"(",
"byte_iterator",
")",
":",
"byte_values",
"=",
"list",
"(",
"itertools",
".",
"islice",
"(",
"byte_iterator",
",",
"2",
")",
")",
"try",
":",
"word",
"=",
"(",
"byte_values",
"[",
"0",
"]",
"<<",
"8",
")",
"|",
"byte_values",
"[",
"1",
"]",
"except",
"TypeError",
",",
"err",
":",
"raise",
"TypeError",
"(",
"\"Can't build word from %s: %s\"",
"%",
"(",
"repr",
"(",
"byte_values",
")",
",",
"err",
")",
")",
"return",
"word"
] | return a uint16 value
>>> g=iter([0x1e, 0x12])
>>> v=get_word(g)
>>> v
7698
>>> hex(v)
'0x1e12' | [
"return",
"a",
"uint16",
"value"
] | python | train |
tableau/document-api-python | tableaudocumentapi/connection.py | https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/connection.py#L71-L83 | def server(self, value):
"""
Set the connection's server property.
Args:
value: New server. String.
Returns:
Nothing.
"""
self._server = value
self._connectionXML.set('server', value) | [
"def",
"server",
"(",
"self",
",",
"value",
")",
":",
"self",
".",
"_server",
"=",
"value",
"self",
".",
"_connectionXML",
".",
"set",
"(",
"'server'",
",",
"value",
")"
] | Set the connection's server property.
Args:
value: New server. String.
Returns:
Nothing. | [
"Set",
"the",
"connection",
"s",
"server",
"property",
"."
] | python | train |
bpannier/simpletr64 | simpletr64/devicetr64.py | https://github.com/bpannier/simpletr64/blob/31081139f4e6c85084a56de1617df73927135466/simpletr64/devicetr64.py#L270-L297 | def getSCDPURL(self, serviceType, default=None):
"""Returns the SCDP (Service Control Protocol Document) URL for a given service type.
When the device definitions have been loaded with :meth:`~simpletr64.DeviceTR64.loadDeviceDefinitions` this
method returns for a given service type/namespace the associated URL to the SCDP. If the device definitions
have not been loaded a default value can be given which gets returned instead. The SCDP specifies all the
interaction functionality a device provides.
:param serviceType: the service type to look up for
:param default: the default return value in case the service type is not found and device definitions are not
loaded
:type default: str or None
:return: the URL/URI
:rtype: str or None
:raises ValueError: if the device did load device definitions and the service type is not known.
.. seealso::
:meth:`~simpletr64.DeviceTR64.loadDeviceDefinitions`
"""
if serviceType in self.__deviceServiceDefinitions.keys():
return self.__deviceServiceDefinitions[serviceType]["scpdURL"]
# check if definitions have been loaded, then dont return the default
if self.__deviceXMLInitialized:
raise ValueError("Device do not support given serviceType: " + serviceType)
return default | [
"def",
"getSCDPURL",
"(",
"self",
",",
"serviceType",
",",
"default",
"=",
"None",
")",
":",
"if",
"serviceType",
"in",
"self",
".",
"__deviceServiceDefinitions",
".",
"keys",
"(",
")",
":",
"return",
"self",
".",
"__deviceServiceDefinitions",
"[",
"serviceType",
"]",
"[",
"\"scpdURL\"",
"]",
"# check if definitions have been loaded, then dont return the default",
"if",
"self",
".",
"__deviceXMLInitialized",
":",
"raise",
"ValueError",
"(",
"\"Device do not support given serviceType: \"",
"+",
"serviceType",
")",
"return",
"default"
] | Returns the SCDP (Service Control Protocol Document) URL for a given service type.
When the device definitions have been loaded with :meth:`~simpletr64.DeviceTR64.loadDeviceDefinitions` this
method returns for a given service type/namespace the associated URL to the SCDP. If the device definitions
have not been loaded a default value can be given which gets returned instead. The SCDP specifies all the
interaction functionality a device provides.
:param serviceType: the service type to look up for
:param default: the default return value in case the service type is not found and device definitions are not
loaded
:type default: str or None
:return: the URL/URI
:rtype: str or None
:raises ValueError: if the device did load device definitions and the service type is not known.
.. seealso::
:meth:`~simpletr64.DeviceTR64.loadDeviceDefinitions` | [
"Returns",
"the",
"SCDP",
"(",
"Service",
"Control",
"Protocol",
"Document",
")",
"URL",
"for",
"a",
"given",
"service",
"type",
"."
] | python | train |
opennode/waldur-core | waldur_core/core/monkeypatch.py | https://github.com/opennode/waldur-core/blob/d6c17a9592bb6c49c33567542eef8d099605a46a/waldur_core/core/monkeypatch.py#L14-L21 | def subfield_get(self, obj, type=None):
"""
Verbatim copy from:
https://github.com/django/django/blob/1.9.13/django/db/models/fields/subclassing.py#L38
"""
if obj is None:
return self
return obj.__dict__[self.field.name] | [
"def",
"subfield_get",
"(",
"self",
",",
"obj",
",",
"type",
"=",
"None",
")",
":",
"if",
"obj",
"is",
"None",
":",
"return",
"self",
"return",
"obj",
".",
"__dict__",
"[",
"self",
".",
"field",
".",
"name",
"]"
] | Verbatim copy from:
https://github.com/django/django/blob/1.9.13/django/db/models/fields/subclassing.py#L38 | [
"Verbatim",
"copy",
"from",
":",
"https",
":",
"//",
"github",
".",
"com",
"/",
"django",
"/",
"django",
"/",
"blob",
"/",
"1",
".",
"9",
".",
"13",
"/",
"django",
"/",
"db",
"/",
"models",
"/",
"fields",
"/",
"subclassing",
".",
"py#L38"
] | python | train |
molmod/molmod | molmod/ic.py | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/ic.py#L747-L751 | def _opbend_angle_low(a, b, c, deriv=0):
"""Similar to opbend_angle, but with relative vectors"""
result = _opbend_cos_low(a, b, c, deriv)
sign = np.sign(np.linalg.det([a, b, c]))
return _cos_to_angle(result, deriv, sign) | [
"def",
"_opbend_angle_low",
"(",
"a",
",",
"b",
",",
"c",
",",
"deriv",
"=",
"0",
")",
":",
"result",
"=",
"_opbend_cos_low",
"(",
"a",
",",
"b",
",",
"c",
",",
"deriv",
")",
"sign",
"=",
"np",
".",
"sign",
"(",
"np",
".",
"linalg",
".",
"det",
"(",
"[",
"a",
",",
"b",
",",
"c",
"]",
")",
")",
"return",
"_cos_to_angle",
"(",
"result",
",",
"deriv",
",",
"sign",
")"
] | Similar to opbend_angle, but with relative vectors | [
"Similar",
"to",
"opbend_angle",
"but",
"with",
"relative",
"vectors"
] | python | train |
aws/sagemaker-python-sdk | src/sagemaker/estimator.py | https://github.com/aws/sagemaker-python-sdk/blob/a9e724c7d3f5572b68c3903548c792a59d99799a/src/sagemaker/estimator.py#L523-L531 | def get_vpc_config(self, vpc_config_override=vpc_utils.VPC_CONFIG_DEFAULT):
"""
Returns VpcConfig dict either from this Estimator's subnets and security groups,
or else validate and return an optional override value.
"""
if vpc_config_override is vpc_utils.VPC_CONFIG_DEFAULT:
return vpc_utils.to_dict(self.subnets, self.security_group_ids)
else:
return vpc_utils.sanitize(vpc_config_override) | [
"def",
"get_vpc_config",
"(",
"self",
",",
"vpc_config_override",
"=",
"vpc_utils",
".",
"VPC_CONFIG_DEFAULT",
")",
":",
"if",
"vpc_config_override",
"is",
"vpc_utils",
".",
"VPC_CONFIG_DEFAULT",
":",
"return",
"vpc_utils",
".",
"to_dict",
"(",
"self",
".",
"subnets",
",",
"self",
".",
"security_group_ids",
")",
"else",
":",
"return",
"vpc_utils",
".",
"sanitize",
"(",
"vpc_config_override",
")"
] | Returns VpcConfig dict either from this Estimator's subnets and security groups,
or else validate and return an optional override value. | [
"Returns",
"VpcConfig",
"dict",
"either",
"from",
"this",
"Estimator",
"s",
"subnets",
"and",
"security",
"groups",
"or",
"else",
"validate",
"and",
"return",
"an",
"optional",
"override",
"value",
"."
] | python | train |
gem/oq-engine | openquake/hazardlib/gsim/rietbrock_2013.py | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/rietbrock_2013.py#L122-L139 | def _get_distance_segment_coefficients(self, rval):
"""
Returns the coefficients describing the distance attenuation shape
for three different distance bins, equations 12a - 12c
"""
# Get distance segment ends
nsites = len(rval)
# Equation 12a
f_0 = np.log10(self.CONSTS["r0"] / rval)
f_0[rval > self.CONSTS["r0"]] = 0.0
# Equation 12b
f_1 = np.log10(rval)
f_1[rval > self.CONSTS["r1"]] = np.log10(self.CONSTS["r1"])
# Equation 12c
f_2 = np.log10(rval / self.CONSTS["r2"])
f_2[rval <= self.CONSTS["r2"]] = 0.0
return f_0, f_1, f_2 | [
"def",
"_get_distance_segment_coefficients",
"(",
"self",
",",
"rval",
")",
":",
"# Get distance segment ends",
"nsites",
"=",
"len",
"(",
"rval",
")",
"# Equation 12a",
"f_0",
"=",
"np",
".",
"log10",
"(",
"self",
".",
"CONSTS",
"[",
"\"r0\"",
"]",
"/",
"rval",
")",
"f_0",
"[",
"rval",
">",
"self",
".",
"CONSTS",
"[",
"\"r0\"",
"]",
"]",
"=",
"0.0",
"# Equation 12b",
"f_1",
"=",
"np",
".",
"log10",
"(",
"rval",
")",
"f_1",
"[",
"rval",
">",
"self",
".",
"CONSTS",
"[",
"\"r1\"",
"]",
"]",
"=",
"np",
".",
"log10",
"(",
"self",
".",
"CONSTS",
"[",
"\"r1\"",
"]",
")",
"# Equation 12c",
"f_2",
"=",
"np",
".",
"log10",
"(",
"rval",
"/",
"self",
".",
"CONSTS",
"[",
"\"r2\"",
"]",
")",
"f_2",
"[",
"rval",
"<=",
"self",
".",
"CONSTS",
"[",
"\"r2\"",
"]",
"]",
"=",
"0.0",
"return",
"f_0",
",",
"f_1",
",",
"f_2"
] | Returns the coefficients describing the distance attenuation shape
for three different distance bins, equations 12a - 12c | [
"Returns",
"the",
"coefficients",
"describing",
"the",
"distance",
"attenuation",
"shape",
"for",
"three",
"different",
"distance",
"bins",
"equations",
"12a",
"-",
"12c"
] | python | train |
FactoryBoy/factory_boy | factory/declarations.py | https://github.com/FactoryBoy/factory_boy/blob/edaa7c7f5a14065b229927903bd7989cc93cd069/factory/declarations.py#L100-L126 | def deepgetattr(obj, name, default=_UNSPECIFIED):
"""Try to retrieve the given attribute of an object, digging on '.'.
This is an extended getattr, digging deeper if '.' is found.
Args:
obj (object): the object of which an attribute should be read
name (str): the name of an attribute to look up.
default (object): the default value to use if the attribute wasn't found
Returns:
the attribute pointed to by 'name', splitting on '.'.
Raises:
AttributeError: if obj has no 'name' attribute.
"""
try:
if '.' in name:
attr, subname = name.split('.', 1)
return deepgetattr(getattr(obj, attr), subname, default)
else:
return getattr(obj, name)
except AttributeError:
if default is _UNSPECIFIED:
raise
else:
return default | [
"def",
"deepgetattr",
"(",
"obj",
",",
"name",
",",
"default",
"=",
"_UNSPECIFIED",
")",
":",
"try",
":",
"if",
"'.'",
"in",
"name",
":",
"attr",
",",
"subname",
"=",
"name",
".",
"split",
"(",
"'.'",
",",
"1",
")",
"return",
"deepgetattr",
"(",
"getattr",
"(",
"obj",
",",
"attr",
")",
",",
"subname",
",",
"default",
")",
"else",
":",
"return",
"getattr",
"(",
"obj",
",",
"name",
")",
"except",
"AttributeError",
":",
"if",
"default",
"is",
"_UNSPECIFIED",
":",
"raise",
"else",
":",
"return",
"default"
] | Try to retrieve the given attribute of an object, digging on '.'.
This is an extended getattr, digging deeper if '.' is found.
Args:
obj (object): the object of which an attribute should be read
name (str): the name of an attribute to look up.
default (object): the default value to use if the attribute wasn't found
Returns:
the attribute pointed to by 'name', splitting on '.'.
Raises:
AttributeError: if obj has no 'name' attribute. | [
"Try",
"to",
"retrieve",
"the",
"given",
"attribute",
"of",
"an",
"object",
"digging",
"on",
".",
"."
] | python | train |
pymoca/pymoca | src/pymoca/backends/xml/sim_scipy.py | https://github.com/pymoca/pymoca/blob/14b5eb7425e96689de6cc5c10f400895d586a978/src/pymoca/backends/xml/sim_scipy.py#L16-L188 | def sim(model: HybridOde, options: Dict = None, # noqa: too-complex
user_callback=None) -> Dict[str, np.array]:
"""
Simulates a Dae model.
:model: The model to simulate
:options: See opt dict below
:user_callback: A routine to call after each integration step,
f(t, x, y, m, p, c) -> ret (ret < 0 means abort)
"""
if model.f_x_rhs.shape[0] < 1:
raise ValueError("there are no ODE equations to simulate, "
"check that the model is explicit")
ic = {}
for f in ['x', 'y', 'm', 'p']:
ic[f] = []
for x in ca.vertsplit(getattr(model, f)):
start = model.prop[x.name()]['start']
value = model.prop[x.name()]['value']
if start is not None:
ic[f].append(ca.reshape(start, x.numel(), 1))
elif value is not None:
ic[f].append(ca.reshape(value, x.numel(), 1))
else:
ic[f].append(ca.DM.zeros(x.numel(), 1))
Warning("using default start value for", x.name())
ic[f] = np.array([ic[f]], dtype=float).T
# set options
opt = {
'x0': ic['x'],
'p': ic['p'],
't0': 0,
'tf': 1,
'dt': None,
'integrator': 'dopri5',
'atol': 1e-6,
'rtol': 1e-6,
'max_step': None,
'record_event_times': True,
'verbose': False,
}
if options is not None:
for k in options.keys():
if k in opt.keys():
opt[k] = options[k]
else:
raise ValueError("unknown option {:s}".format(k))
if opt['dt'] is None:
opt['dt'] = opt['tf']/100
if opt['max_step'] is None:
opt['max_step'] = opt['dt']/2
# create functions
f_y = model.create_function_f_y()
f_c = model.create_function_f_c()
f_m = model.create_function_f_m()
f_x_rhs = model.create_function_f_x_rhs()
f_J = model.create_function_f_J()
f_i = model.create_function_f_i()
# initialize sim loop
t0 = opt['t0']
tf = opt['tf']
x = opt['x0']
ng = np.zeros(model.ng.shape[0])
nu = np.zeros(model.nu.shape[0])
m = ic['m']
p = opt['p']
y0 = ic['y']
pre_c = np.array(f_c(t0, x, y0, m, p, ng, nu))
c = pre_c
y = f_y(t0, x, m, p, c, ng, nu)
dt = opt['dt']
t_vect = np.arange(t0, tf, dt)
n = len(t_vect)
data = {
't': [],
'x': [],
'm': [],
'y': [],
'c': [],
}
# create integrator
integrator = scipy.integrate.ode(f_x_rhs, f_J)
integrator.set_integrator(
opt['integrator'],
first_step=opt['max_step'],
atol=opt['atol'],
rtol=opt['rtol'],
max_step=opt['max_step'],
)
# try to catch events with sol out, (root finding)
def sol_out(t, x):
c = np.array(f_c(t, x, y, m, p, ng, nu))
if np.any(c != pre_c):
# print('event', t)
return -1
return 0
if opt['integrator'] in ['dopri5', 'dopri853']:
integrator.set_solout(sol_out)
# run sim loop
i = 0
dt_f_i = 0
dt_integrate = 0
integrator.set_initial_value(opt['x0'], t0)
while i < n:
t = integrator.t
# resample noise
ng = np.random.randn(model.ng.shape[0])
nu = np.random.randn(model.nu.shape[0])
# call reinit
start = time.time()
x = f_i(t, x, y, m, p, c, pre_c, ng, nu)
dt_f_i += (time.time() - start)
# setup next continuous integration step
integrator.set_initial_value(x, t)
integrator.set_f_params(y, m, p, c, ng, nu)
integrator.set_jac_params(y, m, p, c, ng, nu)
# integrate
t_goal = t0 + i*dt
start = time.time()
integrator.integrate(t_goal)
dt_integrate += (time.time() - start)
x = integrator.y
# compute new conditions
pre_c = c
c = np.array(f_c(t, x, y, m, p, ng, nu))
# compute output
y = f_y(t, x, m, p, c, ng, nu)
# compute discrete states
m = f_m(t, x, y, m, p, c, pre_c, ng, nu)
# store data
if opt['record_event_times'] or (integrator.t - t_goal == 0):
data['t'].append(t)
data['x'].append(ca.vertsplit(x))
data['y'].append(ca.vertsplit(y))
data['m'].append(ca.vertsplit(m))
data['c'].append(ca.vertsplit(c))
if user_callback is not None:
user_callback(t, x, y, m, p, c)
# increment time goal if reached
if integrator.t - t_goal == 0:
# update discrete states
# TODO: make this use sampling
i += 1
for k in data.keys():
data[k] = np.array(data[k])
data['labels'] = {}
for field in ['x', 'y', 'c', 'm']:
data['labels'][field] = [x.name() for x in ca.vertsplit(getattr(model, field))]
if opt['verbose']:
print('dt_f_i\t\t\t:', dt_f_i)
print('dt_integrate\t:', dt_integrate)
return data | [
"def",
"sim",
"(",
"model",
":",
"HybridOde",
",",
"options",
":",
"Dict",
"=",
"None",
",",
"# noqa: too-complex",
"user_callback",
"=",
"None",
")",
"->",
"Dict",
"[",
"str",
",",
"np",
".",
"array",
"]",
":",
"if",
"model",
".",
"f_x_rhs",
".",
"shape",
"[",
"0",
"]",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"\"there are no ODE equations to simulate, \"",
"\"check that the model is explicit\"",
")",
"ic",
"=",
"{",
"}",
"for",
"f",
"in",
"[",
"'x'",
",",
"'y'",
",",
"'m'",
",",
"'p'",
"]",
":",
"ic",
"[",
"f",
"]",
"=",
"[",
"]",
"for",
"x",
"in",
"ca",
".",
"vertsplit",
"(",
"getattr",
"(",
"model",
",",
"f",
")",
")",
":",
"start",
"=",
"model",
".",
"prop",
"[",
"x",
".",
"name",
"(",
")",
"]",
"[",
"'start'",
"]",
"value",
"=",
"model",
".",
"prop",
"[",
"x",
".",
"name",
"(",
")",
"]",
"[",
"'value'",
"]",
"if",
"start",
"is",
"not",
"None",
":",
"ic",
"[",
"f",
"]",
".",
"append",
"(",
"ca",
".",
"reshape",
"(",
"start",
",",
"x",
".",
"numel",
"(",
")",
",",
"1",
")",
")",
"elif",
"value",
"is",
"not",
"None",
":",
"ic",
"[",
"f",
"]",
".",
"append",
"(",
"ca",
".",
"reshape",
"(",
"value",
",",
"x",
".",
"numel",
"(",
")",
",",
"1",
")",
")",
"else",
":",
"ic",
"[",
"f",
"]",
".",
"append",
"(",
"ca",
".",
"DM",
".",
"zeros",
"(",
"x",
".",
"numel",
"(",
")",
",",
"1",
")",
")",
"Warning",
"(",
"\"using default start value for\"",
",",
"x",
".",
"name",
"(",
")",
")",
"ic",
"[",
"f",
"]",
"=",
"np",
".",
"array",
"(",
"[",
"ic",
"[",
"f",
"]",
"]",
",",
"dtype",
"=",
"float",
")",
".",
"T",
"# set options",
"opt",
"=",
"{",
"'x0'",
":",
"ic",
"[",
"'x'",
"]",
",",
"'p'",
":",
"ic",
"[",
"'p'",
"]",
",",
"'t0'",
":",
"0",
",",
"'tf'",
":",
"1",
",",
"'dt'",
":",
"None",
",",
"'integrator'",
":",
"'dopri5'",
",",
"'atol'",
":",
"1e-6",
",",
"'rtol'",
":",
"1e-6",
",",
"'max_step'",
":",
"None",
",",
"'record_event_times'",
":",
"True",
",",
"'verbose'",
":",
"False",
",",
"}",
"if",
"options",
"is",
"not",
"None",
":",
"for",
"k",
"in",
"options",
".",
"keys",
"(",
")",
":",
"if",
"k",
"in",
"opt",
".",
"keys",
"(",
")",
":",
"opt",
"[",
"k",
"]",
"=",
"options",
"[",
"k",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"\"unknown option {:s}\"",
".",
"format",
"(",
"k",
")",
")",
"if",
"opt",
"[",
"'dt'",
"]",
"is",
"None",
":",
"opt",
"[",
"'dt'",
"]",
"=",
"opt",
"[",
"'tf'",
"]",
"/",
"100",
"if",
"opt",
"[",
"'max_step'",
"]",
"is",
"None",
":",
"opt",
"[",
"'max_step'",
"]",
"=",
"opt",
"[",
"'dt'",
"]",
"/",
"2",
"# create functions",
"f_y",
"=",
"model",
".",
"create_function_f_y",
"(",
")",
"f_c",
"=",
"model",
".",
"create_function_f_c",
"(",
")",
"f_m",
"=",
"model",
".",
"create_function_f_m",
"(",
")",
"f_x_rhs",
"=",
"model",
".",
"create_function_f_x_rhs",
"(",
")",
"f_J",
"=",
"model",
".",
"create_function_f_J",
"(",
")",
"f_i",
"=",
"model",
".",
"create_function_f_i",
"(",
")",
"# initialize sim loop",
"t0",
"=",
"opt",
"[",
"'t0'",
"]",
"tf",
"=",
"opt",
"[",
"'tf'",
"]",
"x",
"=",
"opt",
"[",
"'x0'",
"]",
"ng",
"=",
"np",
".",
"zeros",
"(",
"model",
".",
"ng",
".",
"shape",
"[",
"0",
"]",
")",
"nu",
"=",
"np",
".",
"zeros",
"(",
"model",
".",
"nu",
".",
"shape",
"[",
"0",
"]",
")",
"m",
"=",
"ic",
"[",
"'m'",
"]",
"p",
"=",
"opt",
"[",
"'p'",
"]",
"y0",
"=",
"ic",
"[",
"'y'",
"]",
"pre_c",
"=",
"np",
".",
"array",
"(",
"f_c",
"(",
"t0",
",",
"x",
",",
"y0",
",",
"m",
",",
"p",
",",
"ng",
",",
"nu",
")",
")",
"c",
"=",
"pre_c",
"y",
"=",
"f_y",
"(",
"t0",
",",
"x",
",",
"m",
",",
"p",
",",
"c",
",",
"ng",
",",
"nu",
")",
"dt",
"=",
"opt",
"[",
"'dt'",
"]",
"t_vect",
"=",
"np",
".",
"arange",
"(",
"t0",
",",
"tf",
",",
"dt",
")",
"n",
"=",
"len",
"(",
"t_vect",
")",
"data",
"=",
"{",
"'t'",
":",
"[",
"]",
",",
"'x'",
":",
"[",
"]",
",",
"'m'",
":",
"[",
"]",
",",
"'y'",
":",
"[",
"]",
",",
"'c'",
":",
"[",
"]",
",",
"}",
"# create integrator",
"integrator",
"=",
"scipy",
".",
"integrate",
".",
"ode",
"(",
"f_x_rhs",
",",
"f_J",
")",
"integrator",
".",
"set_integrator",
"(",
"opt",
"[",
"'integrator'",
"]",
",",
"first_step",
"=",
"opt",
"[",
"'max_step'",
"]",
",",
"atol",
"=",
"opt",
"[",
"'atol'",
"]",
",",
"rtol",
"=",
"opt",
"[",
"'rtol'",
"]",
",",
"max_step",
"=",
"opt",
"[",
"'max_step'",
"]",
",",
")",
"# try to catch events with sol out, (root finding)",
"def",
"sol_out",
"(",
"t",
",",
"x",
")",
":",
"c",
"=",
"np",
".",
"array",
"(",
"f_c",
"(",
"t",
",",
"x",
",",
"y",
",",
"m",
",",
"p",
",",
"ng",
",",
"nu",
")",
")",
"if",
"np",
".",
"any",
"(",
"c",
"!=",
"pre_c",
")",
":",
"# print('event', t)",
"return",
"-",
"1",
"return",
"0",
"if",
"opt",
"[",
"'integrator'",
"]",
"in",
"[",
"'dopri5'",
",",
"'dopri853'",
"]",
":",
"integrator",
".",
"set_solout",
"(",
"sol_out",
")",
"# run sim loop",
"i",
"=",
"0",
"dt_f_i",
"=",
"0",
"dt_integrate",
"=",
"0",
"integrator",
".",
"set_initial_value",
"(",
"opt",
"[",
"'x0'",
"]",
",",
"t0",
")",
"while",
"i",
"<",
"n",
":",
"t",
"=",
"integrator",
".",
"t",
"# resample noise",
"ng",
"=",
"np",
".",
"random",
".",
"randn",
"(",
"model",
".",
"ng",
".",
"shape",
"[",
"0",
"]",
")",
"nu",
"=",
"np",
".",
"random",
".",
"randn",
"(",
"model",
".",
"nu",
".",
"shape",
"[",
"0",
"]",
")",
"# call reinit",
"start",
"=",
"time",
".",
"time",
"(",
")",
"x",
"=",
"f_i",
"(",
"t",
",",
"x",
",",
"y",
",",
"m",
",",
"p",
",",
"c",
",",
"pre_c",
",",
"ng",
",",
"nu",
")",
"dt_f_i",
"+=",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start",
")",
"# setup next continuous integration step",
"integrator",
".",
"set_initial_value",
"(",
"x",
",",
"t",
")",
"integrator",
".",
"set_f_params",
"(",
"y",
",",
"m",
",",
"p",
",",
"c",
",",
"ng",
",",
"nu",
")",
"integrator",
".",
"set_jac_params",
"(",
"y",
",",
"m",
",",
"p",
",",
"c",
",",
"ng",
",",
"nu",
")",
"# integrate",
"t_goal",
"=",
"t0",
"+",
"i",
"*",
"dt",
"start",
"=",
"time",
".",
"time",
"(",
")",
"integrator",
".",
"integrate",
"(",
"t_goal",
")",
"dt_integrate",
"+=",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start",
")",
"x",
"=",
"integrator",
".",
"y",
"# compute new conditions",
"pre_c",
"=",
"c",
"c",
"=",
"np",
".",
"array",
"(",
"f_c",
"(",
"t",
",",
"x",
",",
"y",
",",
"m",
",",
"p",
",",
"ng",
",",
"nu",
")",
")",
"# compute output",
"y",
"=",
"f_y",
"(",
"t",
",",
"x",
",",
"m",
",",
"p",
",",
"c",
",",
"ng",
",",
"nu",
")",
"# compute discrete states",
"m",
"=",
"f_m",
"(",
"t",
",",
"x",
",",
"y",
",",
"m",
",",
"p",
",",
"c",
",",
"pre_c",
",",
"ng",
",",
"nu",
")",
"# store data",
"if",
"opt",
"[",
"'record_event_times'",
"]",
"or",
"(",
"integrator",
".",
"t",
"-",
"t_goal",
"==",
"0",
")",
":",
"data",
"[",
"'t'",
"]",
".",
"append",
"(",
"t",
")",
"data",
"[",
"'x'",
"]",
".",
"append",
"(",
"ca",
".",
"vertsplit",
"(",
"x",
")",
")",
"data",
"[",
"'y'",
"]",
".",
"append",
"(",
"ca",
".",
"vertsplit",
"(",
"y",
")",
")",
"data",
"[",
"'m'",
"]",
".",
"append",
"(",
"ca",
".",
"vertsplit",
"(",
"m",
")",
")",
"data",
"[",
"'c'",
"]",
".",
"append",
"(",
"ca",
".",
"vertsplit",
"(",
"c",
")",
")",
"if",
"user_callback",
"is",
"not",
"None",
":",
"user_callback",
"(",
"t",
",",
"x",
",",
"y",
",",
"m",
",",
"p",
",",
"c",
")",
"# increment time goal if reached",
"if",
"integrator",
".",
"t",
"-",
"t_goal",
"==",
"0",
":",
"# update discrete states",
"# TODO: make this use sampling",
"i",
"+=",
"1",
"for",
"k",
"in",
"data",
".",
"keys",
"(",
")",
":",
"data",
"[",
"k",
"]",
"=",
"np",
".",
"array",
"(",
"data",
"[",
"k",
"]",
")",
"data",
"[",
"'labels'",
"]",
"=",
"{",
"}",
"for",
"field",
"in",
"[",
"'x'",
",",
"'y'",
",",
"'c'",
",",
"'m'",
"]",
":",
"data",
"[",
"'labels'",
"]",
"[",
"field",
"]",
"=",
"[",
"x",
".",
"name",
"(",
")",
"for",
"x",
"in",
"ca",
".",
"vertsplit",
"(",
"getattr",
"(",
"model",
",",
"field",
")",
")",
"]",
"if",
"opt",
"[",
"'verbose'",
"]",
":",
"print",
"(",
"'dt_f_i\\t\\t\\t:'",
",",
"dt_f_i",
")",
"print",
"(",
"'dt_integrate\\t:'",
",",
"dt_integrate",
")",
"return",
"data"
] | Simulates a Dae model.
:model: The model to simulate
:options: See opt dict below
:user_callback: A routine to call after each integration step,
f(t, x, y, m, p, c) -> ret (ret < 0 means abort) | [
"Simulates",
"a",
"Dae",
"model",
"."
] | python | train |
Opentrons/opentrons | api/src/opentrons/deck_calibration/endpoints.py | https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/deck_calibration/endpoints.py#L470-L504 | async def save_transform(data):
"""
Calculate the transormation matrix that calibrates the gantry to the deck
:param data: Information obtained from a POST request.
The content type is application/json.
The correct packet form should be as follows:
{
'token': UUID token from current session start
'command': 'save transform'
}
"""
if any([v is None for v in session.points.values()]):
message = "Not all points have been saved"
status = 400
else:
# expected values based on mechanical drawings of the robot
expected_pos = expected_points()
expected = [
expected_pos[p] for p in expected_pos.keys()]
# measured data
actual = [session.points[p] for p in sorted(session.points.keys())]
# Generate a 2 dimensional transform matrix from the two matricies
flat_matrix = solve(expected, actual)
# Add the z component to form the 3 dimensional transform
calibration_matrix = add_z(flat_matrix, session.z_value)
session.adapter.update_config(gantry_calibration=list(
map(lambda i: list(i), calibration_matrix)))
robot_configs.save_deck_calibration(session.adapter.config)
robot_configs.backup_configuration(session.adapter.config)
message = "Config file saved and backed up"
status = 200
return web.json_response({'message': message}, status=status) | [
"async",
"def",
"save_transform",
"(",
"data",
")",
":",
"if",
"any",
"(",
"[",
"v",
"is",
"None",
"for",
"v",
"in",
"session",
".",
"points",
".",
"values",
"(",
")",
"]",
")",
":",
"message",
"=",
"\"Not all points have been saved\"",
"status",
"=",
"400",
"else",
":",
"# expected values based on mechanical drawings of the robot",
"expected_pos",
"=",
"expected_points",
"(",
")",
"expected",
"=",
"[",
"expected_pos",
"[",
"p",
"]",
"for",
"p",
"in",
"expected_pos",
".",
"keys",
"(",
")",
"]",
"# measured data",
"actual",
"=",
"[",
"session",
".",
"points",
"[",
"p",
"]",
"for",
"p",
"in",
"sorted",
"(",
"session",
".",
"points",
".",
"keys",
"(",
")",
")",
"]",
"# Generate a 2 dimensional transform matrix from the two matricies",
"flat_matrix",
"=",
"solve",
"(",
"expected",
",",
"actual",
")",
"# Add the z component to form the 3 dimensional transform",
"calibration_matrix",
"=",
"add_z",
"(",
"flat_matrix",
",",
"session",
".",
"z_value",
")",
"session",
".",
"adapter",
".",
"update_config",
"(",
"gantry_calibration",
"=",
"list",
"(",
"map",
"(",
"lambda",
"i",
":",
"list",
"(",
"i",
")",
",",
"calibration_matrix",
")",
")",
")",
"robot_configs",
".",
"save_deck_calibration",
"(",
"session",
".",
"adapter",
".",
"config",
")",
"robot_configs",
".",
"backup_configuration",
"(",
"session",
".",
"adapter",
".",
"config",
")",
"message",
"=",
"\"Config file saved and backed up\"",
"status",
"=",
"200",
"return",
"web",
".",
"json_response",
"(",
"{",
"'message'",
":",
"message",
"}",
",",
"status",
"=",
"status",
")"
] | Calculate the transormation matrix that calibrates the gantry to the deck
:param data: Information obtained from a POST request.
The content type is application/json.
The correct packet form should be as follows:
{
'token': UUID token from current session start
'command': 'save transform'
} | [
"Calculate",
"the",
"transormation",
"matrix",
"that",
"calibrates",
"the",
"gantry",
"to",
"the",
"deck",
":",
"param",
"data",
":",
"Information",
"obtained",
"from",
"a",
"POST",
"request",
".",
"The",
"content",
"type",
"is",
"application",
"/",
"json",
".",
"The",
"correct",
"packet",
"form",
"should",
"be",
"as",
"follows",
":",
"{",
"token",
":",
"UUID",
"token",
"from",
"current",
"session",
"start",
"command",
":",
"save",
"transform",
"}"
] | python | train |
googlefonts/fontbakery | Lib/fontbakery/profiles/googlefonts.py | https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L1049-L1075 | def com_google_fonts_check_has_ttfautohint_params(ttFont):
""" Font has ttfautohint params? """
from fontbakery.utils import get_name_entry_strings
def ttfautohint_version(value):
# example string:
#'Version 1.000; ttfautohint (v0.93) -l 8 -r 50 -G 200 -x 14 -w "G"
import re
results = re.search(r'ttfautohint \(v(.*)\) ([^;]*)', value)
if results:
return results.group(1), results.group(2)
version_strings = get_name_entry_strings(ttFont, NameID.VERSION_STRING)
failed = True
for vstring in version_strings:
values = ttfautohint_version(vstring)
if values:
ttfa_version, params = values
if params:
yield PASS, f"Font has ttfautohint params ({params})"
failed = False
else:
yield SKIP, "Font appears to our heuristic as not hinted using ttfautohint."
failed = False
if failed:
yield FAIL, "Font is lacking ttfautohint params on its version strings on the name table." | [
"def",
"com_google_fonts_check_has_ttfautohint_params",
"(",
"ttFont",
")",
":",
"from",
"fontbakery",
".",
"utils",
"import",
"get_name_entry_strings",
"def",
"ttfautohint_version",
"(",
"value",
")",
":",
"# example string:",
"#'Version 1.000; ttfautohint (v0.93) -l 8 -r 50 -G 200 -x 14 -w \"G\"",
"import",
"re",
"results",
"=",
"re",
".",
"search",
"(",
"r'ttfautohint \\(v(.*)\\) ([^;]*)'",
",",
"value",
")",
"if",
"results",
":",
"return",
"results",
".",
"group",
"(",
"1",
")",
",",
"results",
".",
"group",
"(",
"2",
")",
"version_strings",
"=",
"get_name_entry_strings",
"(",
"ttFont",
",",
"NameID",
".",
"VERSION_STRING",
")",
"failed",
"=",
"True",
"for",
"vstring",
"in",
"version_strings",
":",
"values",
"=",
"ttfautohint_version",
"(",
"vstring",
")",
"if",
"values",
":",
"ttfa_version",
",",
"params",
"=",
"values",
"if",
"params",
":",
"yield",
"PASS",
",",
"f\"Font has ttfautohint params ({params})\"",
"failed",
"=",
"False",
"else",
":",
"yield",
"SKIP",
",",
"\"Font appears to our heuristic as not hinted using ttfautohint.\"",
"failed",
"=",
"False",
"if",
"failed",
":",
"yield",
"FAIL",
",",
"\"Font is lacking ttfautohint params on its version strings on the name table.\""
] | Font has ttfautohint params? | [
"Font",
"has",
"ttfautohint",
"params?"
] | python | train |
pyrapt/rapt | rapt/treebrd/grammars/core_grammar.py | https://github.com/pyrapt/rapt/blob/0193a07aafff83a887fdc9e5e0f25eafa5b1b205/rapt/treebrd/grammars/core_grammar.py#L143-L148 | def statement(self):
"""
A terminated relational algebra statement.
"""
return (self.assignment ^ self.expression) + Suppress(
self.syntax.terminator) | [
"def",
"statement",
"(",
"self",
")",
":",
"return",
"(",
"self",
".",
"assignment",
"^",
"self",
".",
"expression",
")",
"+",
"Suppress",
"(",
"self",
".",
"syntax",
".",
"terminator",
")"
] | A terminated relational algebra statement. | [
"A",
"terminated",
"relational",
"algebra",
"statement",
"."
] | python | train |
aacanakin/glim | glim/core.py | https://github.com/aacanakin/glim/blob/71a20ac149a1292c0d6c1dc7414985ea51854f7a/glim/core.py#L56-L68 | def set(self, key, value):
"""
Function deeply sets the key with "." notation
Args
----
key (string): A key with the "." notation.
value (unknown type): A dict or a primitive type.
"""
target = self.registrar
for element in key.split('.')[:-1]:
target = target.setdefault(element, dict())
target[key.split(".")[-1]] = value | [
"def",
"set",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"target",
"=",
"self",
".",
"registrar",
"for",
"element",
"in",
"key",
".",
"split",
"(",
"'.'",
")",
"[",
":",
"-",
"1",
"]",
":",
"target",
"=",
"target",
".",
"setdefault",
"(",
"element",
",",
"dict",
"(",
")",
")",
"target",
"[",
"key",
".",
"split",
"(",
"\".\"",
")",
"[",
"-",
"1",
"]",
"]",
"=",
"value"
] | Function deeply sets the key with "." notation
Args
----
key (string): A key with the "." notation.
value (unknown type): A dict or a primitive type. | [
"Function",
"deeply",
"sets",
"the",
"key",
"with",
".",
"notation"
] | python | train |
bimbar/pykwb | pykwb/kwb.py | https://github.com/bimbar/pykwb/blob/3f607c064cc53b8310d22d42506ce817a5b735fe/pykwb/kwb.py#L412-L416 | def run_thread(self):
"""Run the main thread."""
self._run_thread = True
self._thread.setDaemon(True)
self._thread.start() | [
"def",
"run_thread",
"(",
"self",
")",
":",
"self",
".",
"_run_thread",
"=",
"True",
"self",
".",
"_thread",
".",
"setDaemon",
"(",
"True",
")",
"self",
".",
"_thread",
".",
"start",
"(",
")"
] | Run the main thread. | [
"Run",
"the",
"main",
"thread",
"."
] | python | train |
joferkington/mplstereonet | examples/fault_slip_plot.py | https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/examples/fault_slip_plot.py#L35-L52 | def fault_and_striae_plot(ax, strikes, dips, rakes):
"""Makes a fault-and-striae plot (a.k.a. "Ball of String") for normal faults
with the given strikes, dips, and rakes."""
# Plot the planes
lines = ax.plane(strikes, dips, 'k-', lw=0.5)
# Calculate the position of the rake of the lineations, but don't plot yet
x, y = mplstereonet.rake(strikes, dips, rakes)
# Calculate the direction the arrows should point
# These are all normal faults, so the arrows point away from the center
# For thrusts, it would just be u, v = -x/mag, -y/mag
mag = np.hypot(x, y)
u, v = x / mag, y / mag
# Plot the arrows at the rake locations...
arrows = ax.quiver(x, y, u, v, width=1, headwidth=4, units='dots')
return lines, arrows | [
"def",
"fault_and_striae_plot",
"(",
"ax",
",",
"strikes",
",",
"dips",
",",
"rakes",
")",
":",
"# Plot the planes",
"lines",
"=",
"ax",
".",
"plane",
"(",
"strikes",
",",
"dips",
",",
"'k-'",
",",
"lw",
"=",
"0.5",
")",
"# Calculate the position of the rake of the lineations, but don't plot yet",
"x",
",",
"y",
"=",
"mplstereonet",
".",
"rake",
"(",
"strikes",
",",
"dips",
",",
"rakes",
")",
"# Calculate the direction the arrows should point",
"# These are all normal faults, so the arrows point away from the center",
"# For thrusts, it would just be u, v = -x/mag, -y/mag",
"mag",
"=",
"np",
".",
"hypot",
"(",
"x",
",",
"y",
")",
"u",
",",
"v",
"=",
"x",
"/",
"mag",
",",
"y",
"/",
"mag",
"# Plot the arrows at the rake locations...",
"arrows",
"=",
"ax",
".",
"quiver",
"(",
"x",
",",
"y",
",",
"u",
",",
"v",
",",
"width",
"=",
"1",
",",
"headwidth",
"=",
"4",
",",
"units",
"=",
"'dots'",
")",
"return",
"lines",
",",
"arrows"
] | Makes a fault-and-striae plot (a.k.a. "Ball of String") for normal faults
with the given strikes, dips, and rakes. | [
"Makes",
"a",
"fault",
"-",
"and",
"-",
"striae",
"plot",
"(",
"a",
".",
"k",
".",
"a",
".",
"Ball",
"of",
"String",
")",
"for",
"normal",
"faults",
"with",
"the",
"given",
"strikes",
"dips",
"and",
"rakes",
"."
] | python | train |
brocade/pynos | pynos/versions/ver_7/ver_7_1_0/yang/brocade_vswitch.py | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_vswitch.py#L1036-L1048 | def get_vmpolicy_macaddr_output_vmpolicy_macaddr_dvpg_nn(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vmpolicy_macaddr = ET.Element("get_vmpolicy_macaddr")
config = get_vmpolicy_macaddr
output = ET.SubElement(get_vmpolicy_macaddr, "output")
vmpolicy_macaddr = ET.SubElement(output, "vmpolicy-macaddr")
dvpg_nn = ET.SubElement(vmpolicy_macaddr, "dvpg-nn")
dvpg_nn.text = kwargs.pop('dvpg_nn')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"get_vmpolicy_macaddr_output_vmpolicy_macaddr_dvpg_nn",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"get_vmpolicy_macaddr",
"=",
"ET",
".",
"Element",
"(",
"\"get_vmpolicy_macaddr\"",
")",
"config",
"=",
"get_vmpolicy_macaddr",
"output",
"=",
"ET",
".",
"SubElement",
"(",
"get_vmpolicy_macaddr",
",",
"\"output\"",
")",
"vmpolicy_macaddr",
"=",
"ET",
".",
"SubElement",
"(",
"output",
",",
"\"vmpolicy-macaddr\"",
")",
"dvpg_nn",
"=",
"ET",
".",
"SubElement",
"(",
"vmpolicy_macaddr",
",",
"\"dvpg-nn\"",
")",
"dvpg_nn",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'dvpg_nn'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] | Auto Generated Code | [
"Auto",
"Generated",
"Code"
] | python | train |
brainiak/brainiak | brainiak/reprsimil/brsa.py | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/reprsimil/brsa.py#L584-L794 | def fit(self, X, design, nuisance=None, scan_onsets=None, coords=None,
inten=None):
"""Compute the Bayesian RSA
Parameters
----------
X: numpy array, shape=[time_points, voxels]
If you have multiple scans of the same participants that you
want to analyze together, you should concatenate them along
the time dimension after proper preprocessing (e.g. spatial
alignment), and specify the onsets of each scan in scan_onsets.
design: numpy array, shape=[time_points, conditions]
This is the design matrix. It should only include the hypothetic
response for task conditions. You should not include
regressors for a DC component or motion parameters, unless you
want to estimate their pattern similarity with response patterns
to your task conditions. If you want to model head motion,
you should include them in nuisance regressors.
If you have multiple run, the design matrix
of all runs should be concatenated along the time dimension,
with every column for one condition across runs.
For example, if you have 3 runs of experiment of one participant,
with each run lasting 200 TR. And you have 4 conditions,
then design should be a 600 x 4 numpy array.
nuisance: optional, numpy array, shape=[time_points, nuisance_factors]
The responses to these regressors will be marginalized out from
each voxel, which means they are considered, but won't be assumed
to share the same pseudo-SNR map with the design matrix.
Therefore, the pseudo-SNR map will only reflect the
relative contribution of design matrix to each voxel.
You can provide time courses such as those for head motion
to this parameter.
Note that if auto_nuisance is set to True, the first
n_nureg principal components of residual (excluding the response
to the design matrix and the user-provided nuisance regressors
and a constant baseline)
will be included as additional nuisance regressor after the
first round of fitting.
If auto_nuisance is set to False, the nuisance regressors supplied
by the users together with DC components will be used as
nuisance time series.
Please do not include time course of constant baseline in nuisance.
scan_onsets: optional, numpy array, shape=[runs,]
This specifies the indices of X which correspond to the onset
of each scanning run. For example, if you have two experimental
runs of the same subject, each with 100 TRs, then scan_onsets
should be [0,100].
If you do not provide the argument, the program will
assume all data are from the same run.
The effect of them is to make the inverse matrix
of the temporal covariance matrix of noise block-diagonal.
coords: optional, numpy array, shape=[voxels,3]
This is the coordinate of each voxel,
used for implementing Gaussian Process prior.
inten: optional, numpy array, shape=[voxel,]
This is the average fMRI intensity in each voxel.
It should be calculated from your data without any preprocessing
such as z-scoring. Because it should reflect
whether a voxel is bright (grey matter) or dark (white matter).
A Gaussian Process kernel defined on both coordinate and intensity
imposes a smoothness prior on adjcent voxels
but with the same tissue type. The Gaussian Process
is experimental and has shown good performance on
some visual datasets.
"""
logger.info('Running Bayesian RSA')
self.random_state_ = check_random_state(self.random_state)
# setting random seed
logger.debug('RandState set to {}'.format(self.random_state_))
assert not self.GP_inten or (self.GP_inten and self.GP_space),\
'You must speficiy GP_space to True'\
'if you want to use GP_inten'
# Check input data
assert_all_finite(X)
assert X.ndim == 2, 'The data should be 2-dimensional ndarray'
assert np.all(np.std(X, axis=0) > 0),\
'The time courses of some voxels do not change at all.'\
' Please make sure all voxels are within the brain'
# check design matrix
assert_all_finite(design)
assert design.ndim == 2,\
'The design matrix should be 2-dimensional ndarray'
assert np.linalg.matrix_rank(design) == design.shape[1], \
'Your design matrix has rank smaller than the number of'\
' columns. Some columns can be explained by linear '\
'combination of other columns. Please check your design matrix.'
assert np.size(design, axis=0) == np.size(X, axis=0),\
'Design matrix and data do not '\
'have the same number of time points.'
assert self.rank is None or self.rank <= design.shape[1],\
'Your design matrix has fewer columns than the rank you set'
# Check the nuisance regressors.
if nuisance is not None:
assert_all_finite(nuisance)
assert nuisance.ndim == 2,\
'The nuisance regressor should be 2-dimensional ndarray'
assert np.linalg.matrix_rank(nuisance) == nuisance.shape[1], \
'The nuisance regressor has rank smaller than the number of'\
'columns. Some columns can be explained by linear '\
'combination of other columns. Please check your nuisance' \
'regressors.'
assert np.size(nuisance, axis=0) == np.size(X, axis=0), \
'Nuisance regressor and data do not have the same '\
'number of time points.'
# check scan_onsets validity
assert scan_onsets is None or\
(np.max(scan_onsets) <= X.shape[0] and np.min(scan_onsets) >= 0),\
'Some scan onsets provided are out of the range of time points.'
# check the size of coords and inten
if self.GP_space:
logger.info('Fitting with Gaussian Process prior on log(SNR)')
assert coords is not None and coords.shape[0] == X.shape[1],\
'Spatial smoothness was requested by setting GP_space. '\
'But the voxel number of coords does not match that of '\
'data X, or voxel coordinates are not provided. '\
'Please make sure that coords is in the shape of '\
'[n_voxel x 3].'
assert coords.ndim == 2,\
'The coordinate matrix should be a 2-d array'
if self.GP_inten:
assert inten is not None and inten.shape[0] == X.shape[1],\
'The voxel number of intensity does not '\
'match that of data X, or intensity not provided.'
assert np.var(inten) > 0,\
'All voxels have the same intensity.'
if (not self.GP_space and coords is not None) or\
(not self.GP_inten and inten is not None):
logger.warning('Coordinates or image intensity provided'
' but GP_space or GP_inten is not set '
'to True. The coordinates or intensity are'
' ignored.')
# Estimate the number of necessary nuisance regressors
if self.auto_nuisance:
if self.n_nureg is None:
logger.info('number of nuisance regressors is determined '
'automatically.')
run_TRs, n_runs = self._run_TR_from_scan_onsets(
X.shape[0], scan_onsets)
ts_dc = self._gen_legendre(run_TRs, [0])
_, ts_base, _ = self._merge_DC_to_base(
ts_dc, nuisance, False)
ts_reg = np.concatenate((ts_base, design), axis=1)
beta_hat = np.linalg.lstsq(ts_reg, X)[0]
residuals = X - np.dot(ts_reg, beta_hat)
self.n_nureg_ = np.max(
[1, Ncomp_SVHT_MG_DLD_approx(residuals,
self.nureg_zscore)])
logger.info('Use {} nuisance regressors to model the spatial '
'correlation in noise.'.format(self.n_nureg_))
self.n_nureg_ = np.int32(self.n_nureg_)
else:
self.n_nureg_ = self.n_nureg
self.n_nureg_ = np.int32(self.n_nureg_)
# Run Bayesian RSA
# Note that we have a change of notation here. Within _fit_RSA_UV,
# design matrix is named X and data is named Y, to reflect the
# generative model that data Y is generated by mixing the response
# X to experiment conditions and other neural activity.
# However, in fit(), we keep the tradition of scikit-learn that
# X is the input data to fit and y, a reserved name not used, is
# the label to map to from X.
if not self.GP_space:
# If GP_space is not requested, then the model is fitted
# without imposing any Gaussian Process prior on log(SNR^2)
self.U_, self.L_, self.nSNR_, self.beta_, self.beta0_,\
self._beta_latent_, self.sigma_, self.rho_, _, _, _,\
self.X0_ = self._fit_RSA_UV(X=design, Y=X, X_base=nuisance,
scan_onsets=scan_onsets)
elif not self.GP_inten:
# If GP_space is requested, but GP_inten is not, a GP prior
# based on spatial locations of voxels will be imposed.
self.U_, self.L_, self.nSNR_, self.beta_, self.beta0_,\
self._beta_latent_, self.sigma_, self.rho_, \
self.lGPspace_, self.bGP_, _, \
self.X0_ = self._fit_RSA_UV(
X=design, Y=X, X_base=nuisance,
scan_onsets=scan_onsets, coords=coords)
else:
# If both self.GP_space and self.GP_inten are True,
# a GP prior based on both location and intensity is imposed.
self.U_, self.L_, self.nSNR_, self.beta_, self.beta0_,\
self._beta_latent_, self.sigma_, self.rho_, \
self.lGPspace_, self.bGP_, self.lGPinten_, self.X0_ = \
self._fit_RSA_UV(X=design, Y=X, X_base=nuisance,
scan_onsets=scan_onsets,
coords=coords, inten=inten)
self.C_ = utils.cov2corr(self.U_)
self.design_ = design.copy()
self._rho_design_, self._sigma2_design_ = \
self._est_AR1(self.design_, same_para=True)
self._rho_X0_, self._sigma2_X0_ = self._est_AR1(self.X0_)
# AR(1) parameters of the design matrix and nuisance regressors,
# which will be used in transform or score.
# Finally, we fit a null model with the same setting except
# that there is no response to X
self.beta0_null_, self.sigma_null_, self.rho_null_, \
self.X0_null_ = self._fit_null(Y=X, X_base=nuisance,
scan_onsets=scan_onsets)
self._rho_X0_null_, self._sigma2_X0_null_ =\
self._est_AR1(self.X0_null_)
return self | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"design",
",",
"nuisance",
"=",
"None",
",",
"scan_onsets",
"=",
"None",
",",
"coords",
"=",
"None",
",",
"inten",
"=",
"None",
")",
":",
"logger",
".",
"info",
"(",
"'Running Bayesian RSA'",
")",
"self",
".",
"random_state_",
"=",
"check_random_state",
"(",
"self",
".",
"random_state",
")",
"# setting random seed",
"logger",
".",
"debug",
"(",
"'RandState set to {}'",
".",
"format",
"(",
"self",
".",
"random_state_",
")",
")",
"assert",
"not",
"self",
".",
"GP_inten",
"or",
"(",
"self",
".",
"GP_inten",
"and",
"self",
".",
"GP_space",
")",
",",
"'You must speficiy GP_space to True'",
"'if you want to use GP_inten'",
"# Check input data",
"assert_all_finite",
"(",
"X",
")",
"assert",
"X",
".",
"ndim",
"==",
"2",
",",
"'The data should be 2-dimensional ndarray'",
"assert",
"np",
".",
"all",
"(",
"np",
".",
"std",
"(",
"X",
",",
"axis",
"=",
"0",
")",
">",
"0",
")",
",",
"'The time courses of some voxels do not change at all.'",
"' Please make sure all voxels are within the brain'",
"# check design matrix",
"assert_all_finite",
"(",
"design",
")",
"assert",
"design",
".",
"ndim",
"==",
"2",
",",
"'The design matrix should be 2-dimensional ndarray'",
"assert",
"np",
".",
"linalg",
".",
"matrix_rank",
"(",
"design",
")",
"==",
"design",
".",
"shape",
"[",
"1",
"]",
",",
"'Your design matrix has rank smaller than the number of'",
"' columns. Some columns can be explained by linear '",
"'combination of other columns. Please check your design matrix.'",
"assert",
"np",
".",
"size",
"(",
"design",
",",
"axis",
"=",
"0",
")",
"==",
"np",
".",
"size",
"(",
"X",
",",
"axis",
"=",
"0",
")",
",",
"'Design matrix and data do not '",
"'have the same number of time points.'",
"assert",
"self",
".",
"rank",
"is",
"None",
"or",
"self",
".",
"rank",
"<=",
"design",
".",
"shape",
"[",
"1",
"]",
",",
"'Your design matrix has fewer columns than the rank you set'",
"# Check the nuisance regressors.",
"if",
"nuisance",
"is",
"not",
"None",
":",
"assert_all_finite",
"(",
"nuisance",
")",
"assert",
"nuisance",
".",
"ndim",
"==",
"2",
",",
"'The nuisance regressor should be 2-dimensional ndarray'",
"assert",
"np",
".",
"linalg",
".",
"matrix_rank",
"(",
"nuisance",
")",
"==",
"nuisance",
".",
"shape",
"[",
"1",
"]",
",",
"'The nuisance regressor has rank smaller than the number of'",
"'columns. Some columns can be explained by linear '",
"'combination of other columns. Please check your nuisance'",
"'regressors.'",
"assert",
"np",
".",
"size",
"(",
"nuisance",
",",
"axis",
"=",
"0",
")",
"==",
"np",
".",
"size",
"(",
"X",
",",
"axis",
"=",
"0",
")",
",",
"'Nuisance regressor and data do not have the same '",
"'number of time points.'",
"# check scan_onsets validity",
"assert",
"scan_onsets",
"is",
"None",
"or",
"(",
"np",
".",
"max",
"(",
"scan_onsets",
")",
"<=",
"X",
".",
"shape",
"[",
"0",
"]",
"and",
"np",
".",
"min",
"(",
"scan_onsets",
")",
">=",
"0",
")",
",",
"'Some scan onsets provided are out of the range of time points.'",
"# check the size of coords and inten",
"if",
"self",
".",
"GP_space",
":",
"logger",
".",
"info",
"(",
"'Fitting with Gaussian Process prior on log(SNR)'",
")",
"assert",
"coords",
"is",
"not",
"None",
"and",
"coords",
".",
"shape",
"[",
"0",
"]",
"==",
"X",
".",
"shape",
"[",
"1",
"]",
",",
"'Spatial smoothness was requested by setting GP_space. '",
"'But the voxel number of coords does not match that of '",
"'data X, or voxel coordinates are not provided. '",
"'Please make sure that coords is in the shape of '",
"'[n_voxel x 3].'",
"assert",
"coords",
".",
"ndim",
"==",
"2",
",",
"'The coordinate matrix should be a 2-d array'",
"if",
"self",
".",
"GP_inten",
":",
"assert",
"inten",
"is",
"not",
"None",
"and",
"inten",
".",
"shape",
"[",
"0",
"]",
"==",
"X",
".",
"shape",
"[",
"1",
"]",
",",
"'The voxel number of intensity does not '",
"'match that of data X, or intensity not provided.'",
"assert",
"np",
".",
"var",
"(",
"inten",
")",
">",
"0",
",",
"'All voxels have the same intensity.'",
"if",
"(",
"not",
"self",
".",
"GP_space",
"and",
"coords",
"is",
"not",
"None",
")",
"or",
"(",
"not",
"self",
".",
"GP_inten",
"and",
"inten",
"is",
"not",
"None",
")",
":",
"logger",
".",
"warning",
"(",
"'Coordinates or image intensity provided'",
"' but GP_space or GP_inten is not set '",
"'to True. The coordinates or intensity are'",
"' ignored.'",
")",
"# Estimate the number of necessary nuisance regressors",
"if",
"self",
".",
"auto_nuisance",
":",
"if",
"self",
".",
"n_nureg",
"is",
"None",
":",
"logger",
".",
"info",
"(",
"'number of nuisance regressors is determined '",
"'automatically.'",
")",
"run_TRs",
",",
"n_runs",
"=",
"self",
".",
"_run_TR_from_scan_onsets",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
",",
"scan_onsets",
")",
"ts_dc",
"=",
"self",
".",
"_gen_legendre",
"(",
"run_TRs",
",",
"[",
"0",
"]",
")",
"_",
",",
"ts_base",
",",
"_",
"=",
"self",
".",
"_merge_DC_to_base",
"(",
"ts_dc",
",",
"nuisance",
",",
"False",
")",
"ts_reg",
"=",
"np",
".",
"concatenate",
"(",
"(",
"ts_base",
",",
"design",
")",
",",
"axis",
"=",
"1",
")",
"beta_hat",
"=",
"np",
".",
"linalg",
".",
"lstsq",
"(",
"ts_reg",
",",
"X",
")",
"[",
"0",
"]",
"residuals",
"=",
"X",
"-",
"np",
".",
"dot",
"(",
"ts_reg",
",",
"beta_hat",
")",
"self",
".",
"n_nureg_",
"=",
"np",
".",
"max",
"(",
"[",
"1",
",",
"Ncomp_SVHT_MG_DLD_approx",
"(",
"residuals",
",",
"self",
".",
"nureg_zscore",
")",
"]",
")",
"logger",
".",
"info",
"(",
"'Use {} nuisance regressors to model the spatial '",
"'correlation in noise.'",
".",
"format",
"(",
"self",
".",
"n_nureg_",
")",
")",
"self",
".",
"n_nureg_",
"=",
"np",
".",
"int32",
"(",
"self",
".",
"n_nureg_",
")",
"else",
":",
"self",
".",
"n_nureg_",
"=",
"self",
".",
"n_nureg",
"self",
".",
"n_nureg_",
"=",
"np",
".",
"int32",
"(",
"self",
".",
"n_nureg_",
")",
"# Run Bayesian RSA",
"# Note that we have a change of notation here. Within _fit_RSA_UV,",
"# design matrix is named X and data is named Y, to reflect the",
"# generative model that data Y is generated by mixing the response",
"# X to experiment conditions and other neural activity.",
"# However, in fit(), we keep the tradition of scikit-learn that",
"# X is the input data to fit and y, a reserved name not used, is",
"# the label to map to from X.",
"if",
"not",
"self",
".",
"GP_space",
":",
"# If GP_space is not requested, then the model is fitted",
"# without imposing any Gaussian Process prior on log(SNR^2)",
"self",
".",
"U_",
",",
"self",
".",
"L_",
",",
"self",
".",
"nSNR_",
",",
"self",
".",
"beta_",
",",
"self",
".",
"beta0_",
",",
"self",
".",
"_beta_latent_",
",",
"self",
".",
"sigma_",
",",
"self",
".",
"rho_",
",",
"_",
",",
"_",
",",
"_",
",",
"self",
".",
"X0_",
"=",
"self",
".",
"_fit_RSA_UV",
"(",
"X",
"=",
"design",
",",
"Y",
"=",
"X",
",",
"X_base",
"=",
"nuisance",
",",
"scan_onsets",
"=",
"scan_onsets",
")",
"elif",
"not",
"self",
".",
"GP_inten",
":",
"# If GP_space is requested, but GP_inten is not, a GP prior",
"# based on spatial locations of voxels will be imposed.",
"self",
".",
"U_",
",",
"self",
".",
"L_",
",",
"self",
".",
"nSNR_",
",",
"self",
".",
"beta_",
",",
"self",
".",
"beta0_",
",",
"self",
".",
"_beta_latent_",
",",
"self",
".",
"sigma_",
",",
"self",
".",
"rho_",
",",
"self",
".",
"lGPspace_",
",",
"self",
".",
"bGP_",
",",
"_",
",",
"self",
".",
"X0_",
"=",
"self",
".",
"_fit_RSA_UV",
"(",
"X",
"=",
"design",
",",
"Y",
"=",
"X",
",",
"X_base",
"=",
"nuisance",
",",
"scan_onsets",
"=",
"scan_onsets",
",",
"coords",
"=",
"coords",
")",
"else",
":",
"# If both self.GP_space and self.GP_inten are True,",
"# a GP prior based on both location and intensity is imposed.",
"self",
".",
"U_",
",",
"self",
".",
"L_",
",",
"self",
".",
"nSNR_",
",",
"self",
".",
"beta_",
",",
"self",
".",
"beta0_",
",",
"self",
".",
"_beta_latent_",
",",
"self",
".",
"sigma_",
",",
"self",
".",
"rho_",
",",
"self",
".",
"lGPspace_",
",",
"self",
".",
"bGP_",
",",
"self",
".",
"lGPinten_",
",",
"self",
".",
"X0_",
"=",
"self",
".",
"_fit_RSA_UV",
"(",
"X",
"=",
"design",
",",
"Y",
"=",
"X",
",",
"X_base",
"=",
"nuisance",
",",
"scan_onsets",
"=",
"scan_onsets",
",",
"coords",
"=",
"coords",
",",
"inten",
"=",
"inten",
")",
"self",
".",
"C_",
"=",
"utils",
".",
"cov2corr",
"(",
"self",
".",
"U_",
")",
"self",
".",
"design_",
"=",
"design",
".",
"copy",
"(",
")",
"self",
".",
"_rho_design_",
",",
"self",
".",
"_sigma2_design_",
"=",
"self",
".",
"_est_AR1",
"(",
"self",
".",
"design_",
",",
"same_para",
"=",
"True",
")",
"self",
".",
"_rho_X0_",
",",
"self",
".",
"_sigma2_X0_",
"=",
"self",
".",
"_est_AR1",
"(",
"self",
".",
"X0_",
")",
"# AR(1) parameters of the design matrix and nuisance regressors,",
"# which will be used in transform or score.",
"# Finally, we fit a null model with the same setting except",
"# that there is no response to X",
"self",
".",
"beta0_null_",
",",
"self",
".",
"sigma_null_",
",",
"self",
".",
"rho_null_",
",",
"self",
".",
"X0_null_",
"=",
"self",
".",
"_fit_null",
"(",
"Y",
"=",
"X",
",",
"X_base",
"=",
"nuisance",
",",
"scan_onsets",
"=",
"scan_onsets",
")",
"self",
".",
"_rho_X0_null_",
",",
"self",
".",
"_sigma2_X0_null_",
"=",
"self",
".",
"_est_AR1",
"(",
"self",
".",
"X0_null_",
")",
"return",
"self"
] | Compute the Bayesian RSA
Parameters
----------
X: numpy array, shape=[time_points, voxels]
If you have multiple scans of the same participants that you
want to analyze together, you should concatenate them along
the time dimension after proper preprocessing (e.g. spatial
alignment), and specify the onsets of each scan in scan_onsets.
design: numpy array, shape=[time_points, conditions]
This is the design matrix. It should only include the hypothetic
response for task conditions. You should not include
regressors for a DC component or motion parameters, unless you
want to estimate their pattern similarity with response patterns
to your task conditions. If you want to model head motion,
you should include them in nuisance regressors.
If you have multiple run, the design matrix
of all runs should be concatenated along the time dimension,
with every column for one condition across runs.
For example, if you have 3 runs of experiment of one participant,
with each run lasting 200 TR. And you have 4 conditions,
then design should be a 600 x 4 numpy array.
nuisance: optional, numpy array, shape=[time_points, nuisance_factors]
The responses to these regressors will be marginalized out from
each voxel, which means they are considered, but won't be assumed
to share the same pseudo-SNR map with the design matrix.
Therefore, the pseudo-SNR map will only reflect the
relative contribution of design matrix to each voxel.
You can provide time courses such as those for head motion
to this parameter.
Note that if auto_nuisance is set to True, the first
n_nureg principal components of residual (excluding the response
to the design matrix and the user-provided nuisance regressors
and a constant baseline)
will be included as additional nuisance regressor after the
first round of fitting.
If auto_nuisance is set to False, the nuisance regressors supplied
by the users together with DC components will be used as
nuisance time series.
Please do not include time course of constant baseline in nuisance.
scan_onsets: optional, numpy array, shape=[runs,]
This specifies the indices of X which correspond to the onset
of each scanning run. For example, if you have two experimental
runs of the same subject, each with 100 TRs, then scan_onsets
should be [0,100].
If you do not provide the argument, the program will
assume all data are from the same run.
The effect of them is to make the inverse matrix
of the temporal covariance matrix of noise block-diagonal.
coords: optional, numpy array, shape=[voxels,3]
This is the coordinate of each voxel,
used for implementing Gaussian Process prior.
inten: optional, numpy array, shape=[voxel,]
This is the average fMRI intensity in each voxel.
It should be calculated from your data without any preprocessing
such as z-scoring. Because it should reflect
whether a voxel is bright (grey matter) or dark (white matter).
A Gaussian Process kernel defined on both coordinate and intensity
imposes a smoothness prior on adjcent voxels
but with the same tissue type. The Gaussian Process
is experimental and has shown good performance on
some visual datasets. | [
"Compute",
"the",
"Bayesian",
"RSA"
] | python | train |
limodou/uliweb | uliweb/lib/werkzeug/script.py | https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/script.py#L186-L192 | def find_actions(namespace, action_prefix):
"""Find all the actions in the namespace."""
actions = {}
for key, value in iteritems(namespace):
if key.startswith(action_prefix):
actions[key[len(action_prefix):]] = analyse_action(value)
return actions | [
"def",
"find_actions",
"(",
"namespace",
",",
"action_prefix",
")",
":",
"actions",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"iteritems",
"(",
"namespace",
")",
":",
"if",
"key",
".",
"startswith",
"(",
"action_prefix",
")",
":",
"actions",
"[",
"key",
"[",
"len",
"(",
"action_prefix",
")",
":",
"]",
"]",
"=",
"analyse_action",
"(",
"value",
")",
"return",
"actions"
] | Find all the actions in the namespace. | [
"Find",
"all",
"the",
"actions",
"in",
"the",
"namespace",
"."
] | python | train |
biocore/burrito-fillings | bfillings/mafft_v7.py | https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/mafft_v7.py#L204-L225 | def _input_as_seqs(self, data):
"""Format a list of seq as input.
Parameters
----------
data: list of strings
Each string is a sequence to be aligned.
Returns
-------
A temp file name that contains the sequences.
See Also
--------
burrito.util.CommandLineApplication
"""
lines = []
for i, s in enumerate(data):
# will number the sequences 1,2,3,etc.
lines.append(''.join(['>', str(i+1)]))
lines.append(s)
return self._input_as_lines(lines) | [
"def",
"_input_as_seqs",
"(",
"self",
",",
"data",
")",
":",
"lines",
"=",
"[",
"]",
"for",
"i",
",",
"s",
"in",
"enumerate",
"(",
"data",
")",
":",
"# will number the sequences 1,2,3,etc.",
"lines",
".",
"append",
"(",
"''",
".",
"join",
"(",
"[",
"'>'",
",",
"str",
"(",
"i",
"+",
"1",
")",
"]",
")",
")",
"lines",
".",
"append",
"(",
"s",
")",
"return",
"self",
".",
"_input_as_lines",
"(",
"lines",
")"
] | Format a list of seq as input.
Parameters
----------
data: list of strings
Each string is a sequence to be aligned.
Returns
-------
A temp file name that contains the sequences.
See Also
--------
burrito.util.CommandLineApplication | [
"Format",
"a",
"list",
"of",
"seq",
"as",
"input",
"."
] | python | train |
spyder-ide/spyder | spyder/preferences/configdialog.py | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/preferences/configdialog.py#L749-L791 | def create_fontgroup(self, option=None, text=None, title=None,
tip=None, fontfilters=None, without_group=False):
"""Option=None -> setting plugin font"""
if title:
fontlabel = QLabel(title)
else:
fontlabel = QLabel(_("Font"))
fontbox = QFontComboBox()
if fontfilters is not None:
fontbox.setFontFilters(fontfilters)
sizelabel = QLabel(" "+_("Size"))
sizebox = QSpinBox()
sizebox.setRange(7, 100)
self.fontboxes[(fontbox, sizebox)] = option
layout = QHBoxLayout()
for subwidget in (fontlabel, fontbox, sizelabel, sizebox):
layout.addWidget(subwidget)
layout.addStretch(1)
widget = QWidget(self)
widget.fontlabel = fontlabel
widget.sizelabel = sizelabel
widget.fontbox = fontbox
widget.sizebox = sizebox
widget.setLayout(layout)
if not without_group:
if text is None:
text = _("Font style")
group = QGroupBox(text)
group.setLayout(layout)
if tip is not None:
group.setToolTip(tip)
return group
else:
return widget | [
"def",
"create_fontgroup",
"(",
"self",
",",
"option",
"=",
"None",
",",
"text",
"=",
"None",
",",
"title",
"=",
"None",
",",
"tip",
"=",
"None",
",",
"fontfilters",
"=",
"None",
",",
"without_group",
"=",
"False",
")",
":",
"if",
"title",
":",
"fontlabel",
"=",
"QLabel",
"(",
"title",
")",
"else",
":",
"fontlabel",
"=",
"QLabel",
"(",
"_",
"(",
"\"Font\"",
")",
")",
"fontbox",
"=",
"QFontComboBox",
"(",
")",
"if",
"fontfilters",
"is",
"not",
"None",
":",
"fontbox",
".",
"setFontFilters",
"(",
"fontfilters",
")",
"sizelabel",
"=",
"QLabel",
"(",
"\" \"",
"+",
"_",
"(",
"\"Size\"",
")",
")",
"sizebox",
"=",
"QSpinBox",
"(",
")",
"sizebox",
".",
"setRange",
"(",
"7",
",",
"100",
")",
"self",
".",
"fontboxes",
"[",
"(",
"fontbox",
",",
"sizebox",
")",
"]",
"=",
"option",
"layout",
"=",
"QHBoxLayout",
"(",
")",
"for",
"subwidget",
"in",
"(",
"fontlabel",
",",
"fontbox",
",",
"sizelabel",
",",
"sizebox",
")",
":",
"layout",
".",
"addWidget",
"(",
"subwidget",
")",
"layout",
".",
"addStretch",
"(",
"1",
")",
"widget",
"=",
"QWidget",
"(",
"self",
")",
"widget",
".",
"fontlabel",
"=",
"fontlabel",
"widget",
".",
"sizelabel",
"=",
"sizelabel",
"widget",
".",
"fontbox",
"=",
"fontbox",
"widget",
".",
"sizebox",
"=",
"sizebox",
"widget",
".",
"setLayout",
"(",
"layout",
")",
"if",
"not",
"without_group",
":",
"if",
"text",
"is",
"None",
":",
"text",
"=",
"_",
"(",
"\"Font style\"",
")",
"group",
"=",
"QGroupBox",
"(",
"text",
")",
"group",
".",
"setLayout",
"(",
"layout",
")",
"if",
"tip",
"is",
"not",
"None",
":",
"group",
".",
"setToolTip",
"(",
"tip",
")",
"return",
"group",
"else",
":",
"return",
"widget"
] | Option=None -> setting plugin font | [
"Option",
"=",
"None",
"-",
">",
"setting",
"plugin",
"font"
] | python | train |
MakerReduxCorp/PLOD | PLOD/__init__.py | https://github.com/MakerReduxCorp/PLOD/blob/707502cd928e5be6bd5e46d7f6de7da0e188cf1e/PLOD/__init__.py#L251-L278 | def insert(self, new_entry):
'''Insert a new entry to the end of the list of dictionaries.
This entry retains the original index tracking but adds this
entry incrementally at the end.
>>> test = [
... {"name": "Jim", "age": 18, "income": 93000, "wigs": 68 },
... {"name": "Larry", "age": 18, "wigs": [3, 2, 9]},
... {"name": "Joe", "age": 20, "income": 15000, "wigs": [1, 2, 3]},
... {"name": "Bill", "age": 19, "income": 29000 },
... ]
>>> entryA = {"name": "Willie", "age": 77}
>>> print PLOD(test).insert(entryA).returnString()
[
{age: 18, income: 93000, name: 'Jim' , wigs: 68},
{age: 18, income: None , name: 'Larry' , wigs: [3, 2, 9]},
{age: 20, income: 15000, name: 'Joe' , wigs: [1, 2, 3]},
{age: 19, income: 29000, name: 'Bill' , wigs: None },
{age: 77, income: None , name: 'Willie', wigs: None }
]
:param new_entry:
The new list entry to insert.
'''
self.index_track.append(len(self.table))
self.table.append(new_entry)
return self | [
"def",
"insert",
"(",
"self",
",",
"new_entry",
")",
":",
"self",
".",
"index_track",
".",
"append",
"(",
"len",
"(",
"self",
".",
"table",
")",
")",
"self",
".",
"table",
".",
"append",
"(",
"new_entry",
")",
"return",
"self"
] | Insert a new entry to the end of the list of dictionaries.
This entry retains the original index tracking but adds this
entry incrementally at the end.
>>> test = [
... {"name": "Jim", "age": 18, "income": 93000, "wigs": 68 },
... {"name": "Larry", "age": 18, "wigs": [3, 2, 9]},
... {"name": "Joe", "age": 20, "income": 15000, "wigs": [1, 2, 3]},
... {"name": "Bill", "age": 19, "income": 29000 },
... ]
>>> entryA = {"name": "Willie", "age": 77}
>>> print PLOD(test).insert(entryA).returnString()
[
{age: 18, income: 93000, name: 'Jim' , wigs: 68},
{age: 18, income: None , name: 'Larry' , wigs: [3, 2, 9]},
{age: 20, income: 15000, name: 'Joe' , wigs: [1, 2, 3]},
{age: 19, income: 29000, name: 'Bill' , wigs: None },
{age: 77, income: None , name: 'Willie', wigs: None }
]
:param new_entry:
The new list entry to insert. | [
"Insert",
"a",
"new",
"entry",
"to",
"the",
"end",
"of",
"the",
"list",
"of",
"dictionaries",
"."
] | python | train |
Robpol86/sphinxcontrib-versioning | sphinxcontrib/versioning/sphinx_.py | https://github.com/Robpol86/sphinxcontrib-versioning/blob/920edec0ac764081b583a2ecf4e6952762b9dbf2/sphinxcontrib/versioning/sphinx_.py#L172-L204 | def _build(argv, config, versions, current_name, is_root):
"""Build Sphinx docs via multiprocessing for isolation.
:param tuple argv: Arguments to pass to Sphinx.
:param sphinxcontrib.versioning.lib.Config config: Runtime configuration.
:param sphinxcontrib.versioning.versions.Versions versions: Versions class instance.
:param str current_name: The ref name of the current version being built.
:param bool is_root: Is this build in the web root?
"""
# Patch.
application.Config = ConfigInject
if config.show_banner:
EventHandlers.BANNER_GREATEST_TAG = config.banner_greatest_tag
EventHandlers.BANNER_MAIN_VERSION = config.banner_main_ref
EventHandlers.BANNER_RECENT_TAG = config.banner_recent_tag
EventHandlers.SHOW_BANNER = True
EventHandlers.CURRENT_VERSION = current_name
EventHandlers.IS_ROOT = is_root
EventHandlers.VERSIONS = versions
SC_VERSIONING_VERSIONS[:] = [p for r in versions.remotes for p in sorted(r.items()) if p[0] not in ('sha', 'date')]
# Update argv.
if config.verbose > 1:
argv += ('-v',) * (config.verbose - 1)
if config.no_colors:
argv += ('-N',)
if config.overflow:
argv += config.overflow
# Build.
result = build_main(argv)
if result != 0:
raise SphinxError | [
"def",
"_build",
"(",
"argv",
",",
"config",
",",
"versions",
",",
"current_name",
",",
"is_root",
")",
":",
"# Patch.",
"application",
".",
"Config",
"=",
"ConfigInject",
"if",
"config",
".",
"show_banner",
":",
"EventHandlers",
".",
"BANNER_GREATEST_TAG",
"=",
"config",
".",
"banner_greatest_tag",
"EventHandlers",
".",
"BANNER_MAIN_VERSION",
"=",
"config",
".",
"banner_main_ref",
"EventHandlers",
".",
"BANNER_RECENT_TAG",
"=",
"config",
".",
"banner_recent_tag",
"EventHandlers",
".",
"SHOW_BANNER",
"=",
"True",
"EventHandlers",
".",
"CURRENT_VERSION",
"=",
"current_name",
"EventHandlers",
".",
"IS_ROOT",
"=",
"is_root",
"EventHandlers",
".",
"VERSIONS",
"=",
"versions",
"SC_VERSIONING_VERSIONS",
"[",
":",
"]",
"=",
"[",
"p",
"for",
"r",
"in",
"versions",
".",
"remotes",
"for",
"p",
"in",
"sorted",
"(",
"r",
".",
"items",
"(",
")",
")",
"if",
"p",
"[",
"0",
"]",
"not",
"in",
"(",
"'sha'",
",",
"'date'",
")",
"]",
"# Update argv.",
"if",
"config",
".",
"verbose",
">",
"1",
":",
"argv",
"+=",
"(",
"'-v'",
",",
")",
"*",
"(",
"config",
".",
"verbose",
"-",
"1",
")",
"if",
"config",
".",
"no_colors",
":",
"argv",
"+=",
"(",
"'-N'",
",",
")",
"if",
"config",
".",
"overflow",
":",
"argv",
"+=",
"config",
".",
"overflow",
"# Build.",
"result",
"=",
"build_main",
"(",
"argv",
")",
"if",
"result",
"!=",
"0",
":",
"raise",
"SphinxError"
] | Build Sphinx docs via multiprocessing for isolation.
:param tuple argv: Arguments to pass to Sphinx.
:param sphinxcontrib.versioning.lib.Config config: Runtime configuration.
:param sphinxcontrib.versioning.versions.Versions versions: Versions class instance.
:param str current_name: The ref name of the current version being built.
:param bool is_root: Is this build in the web root? | [
"Build",
"Sphinx",
"docs",
"via",
"multiprocessing",
"for",
"isolation",
"."
] | python | train |
lexibank/pylexibank | src/pylexibank/cldf.py | https://github.com/lexibank/pylexibank/blob/c28e7f122f20de1232623dd7003cb5b01535e581/src/pylexibank/cldf.py#L106-L147 | def add_lexemes(self, **kw):
"""
:return: list of dicts corresponding to newly created Lexemes
"""
lexemes = []
# Do we have morpheme segmentation on top of phonemes?
with_morphemes = '+' in self['FormTable', 'Segments'].separator
for i, form in enumerate(self.dataset.split_forms(kw, kw['Value'])):
kw_ = kw.copy()
if form:
if form != kw_['Value']:
self.dataset.log.debug(
'iter_forms split: "{0}" -> "{1}"'.format(kw_['Value'], form))
if form:
kw_.setdefault('Segments', self.tokenize(kw_, form) or [])
kw_.update(ID=self.lexeme_id(kw), Form=form)
lexemes.append(self._add_object(self.dataset.lexeme_class, **kw_))
if kw_['Segments']:
analysis = self.dataset.tr_analyses.setdefault(
kw_['Language_ID'], Analysis())
try:
segments = kw_['Segments']
if with_morphemes:
segments = list(chain(*[s.split() for s in segments]))
_, _bipa, _sc, _analysis = analyze(segments, analysis)
# update the list of `bad_words` if necessary; we precompute a
# list of data types in `_bipa` just to make the conditional
# checking easier
_bipa_types = [type(s) for s in _bipa]
if pyclts.models.UnknownSound in _bipa_types or '?' in _sc:
self.dataset.tr_bad_words.append(kw_)
except ValueError: # pragma: no cover
self.dataset.tr_invalid_words.append(kw_)
except (KeyError, AttributeError): # pragma: no cover
print(kw_['Form'], kw_)
raise
return lexemes | [
"def",
"add_lexemes",
"(",
"self",
",",
"*",
"*",
"kw",
")",
":",
"lexemes",
"=",
"[",
"]",
"# Do we have morpheme segmentation on top of phonemes?",
"with_morphemes",
"=",
"'+'",
"in",
"self",
"[",
"'FormTable'",
",",
"'Segments'",
"]",
".",
"separator",
"for",
"i",
",",
"form",
"in",
"enumerate",
"(",
"self",
".",
"dataset",
".",
"split_forms",
"(",
"kw",
",",
"kw",
"[",
"'Value'",
"]",
")",
")",
":",
"kw_",
"=",
"kw",
".",
"copy",
"(",
")",
"if",
"form",
":",
"if",
"form",
"!=",
"kw_",
"[",
"'Value'",
"]",
":",
"self",
".",
"dataset",
".",
"log",
".",
"debug",
"(",
"'iter_forms split: \"{0}\" -> \"{1}\"'",
".",
"format",
"(",
"kw_",
"[",
"'Value'",
"]",
",",
"form",
")",
")",
"if",
"form",
":",
"kw_",
".",
"setdefault",
"(",
"'Segments'",
",",
"self",
".",
"tokenize",
"(",
"kw_",
",",
"form",
")",
"or",
"[",
"]",
")",
"kw_",
".",
"update",
"(",
"ID",
"=",
"self",
".",
"lexeme_id",
"(",
"kw",
")",
",",
"Form",
"=",
"form",
")",
"lexemes",
".",
"append",
"(",
"self",
".",
"_add_object",
"(",
"self",
".",
"dataset",
".",
"lexeme_class",
",",
"*",
"*",
"kw_",
")",
")",
"if",
"kw_",
"[",
"'Segments'",
"]",
":",
"analysis",
"=",
"self",
".",
"dataset",
".",
"tr_analyses",
".",
"setdefault",
"(",
"kw_",
"[",
"'Language_ID'",
"]",
",",
"Analysis",
"(",
")",
")",
"try",
":",
"segments",
"=",
"kw_",
"[",
"'Segments'",
"]",
"if",
"with_morphemes",
":",
"segments",
"=",
"list",
"(",
"chain",
"(",
"*",
"[",
"s",
".",
"split",
"(",
")",
"for",
"s",
"in",
"segments",
"]",
")",
")",
"_",
",",
"_bipa",
",",
"_sc",
",",
"_analysis",
"=",
"analyze",
"(",
"segments",
",",
"analysis",
")",
"# update the list of `bad_words` if necessary; we precompute a",
"# list of data types in `_bipa` just to make the conditional",
"# checking easier",
"_bipa_types",
"=",
"[",
"type",
"(",
"s",
")",
"for",
"s",
"in",
"_bipa",
"]",
"if",
"pyclts",
".",
"models",
".",
"UnknownSound",
"in",
"_bipa_types",
"or",
"'?'",
"in",
"_sc",
":",
"self",
".",
"dataset",
".",
"tr_bad_words",
".",
"append",
"(",
"kw_",
")",
"except",
"ValueError",
":",
"# pragma: no cover",
"self",
".",
"dataset",
".",
"tr_invalid_words",
".",
"append",
"(",
"kw_",
")",
"except",
"(",
"KeyError",
",",
"AttributeError",
")",
":",
"# pragma: no cover",
"print",
"(",
"kw_",
"[",
"'Form'",
"]",
",",
"kw_",
")",
"raise",
"return",
"lexemes"
] | :return: list of dicts corresponding to newly created Lexemes | [
":",
"return",
":",
"list",
"of",
"dicts",
"corresponding",
"to",
"newly",
"created",
"Lexemes"
] | python | train |
ToFuProject/tofu | tofu/data/_core.py | https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/data/_core.py#L1431-L1543 | def select_ch(self, val=None, key=None, log='any', touch=None, out=bool):
""" Return a channels index array
Return a boolean or integer index array, hereafter called 'ind'
The array refers to the reference channel/'X' vector self.ddataRef['X']
There are 3 different ways of selecting channels, by refering to:
- The 'X' vector/array values in self.dataRef['X']
- The dict of channels keys/values (if self.dchans != None)
- which element each LOS touches (if self.lCam != None)
Parameters
----------
val : None / str / float / np.array / list / tuple
The value against which to dicriminate.
Behaviour depends whether key is provided:
- key is None => val compares vs self.ddataRef['X']
- key provided => val compares vs self.dchans[key]
If key is None, the behaviour is similar to self.select_indt():
- None : ind matches all channels
- float : ind is True only for X closest to val
- np.ndarray : ind is True only for X closest to val
- list (len()==2): ind is True for X inside [val[0],val[1]]
- tuple (len()==2): ind is True for X outside ]val[0];val[1][
key : None / str
If provided, dict key to indicate which self.dchans[key] to use
log : str
If key provided, val can be a list of criteria
Then, log indicates whether all / any / none should be matched
touch : None
If key and val are None, return the indices of the LOS touching the
elements indicated in touch.
Requires that self.dgeom['lCam'] is not None (tf.geom.Cam.select())
out : type
Specifies the type of the output index array:
- bool : return a boolean array of shape (self.ddataRef['nt'],)
- int : return the array as integers indices
Return
------
ind : np.ndarray
The array of indices, of dtype specified by keywordarg out
"""
assert out in [int,bool]
assert log in ['any','all','not']
lc = [val is None, key is None, touch is None]
lC = [all(lc), all(lc[:2]) and not lc[2],
not lc[0] and all(lc[1:]), not any(lc[:2]) and lc[2]]
assert np.sum(lC)==1
if lC[0]:
# get all channels
ind = np.ones((self._ddataRef['nch'],),dtype=bool)
elif lC[1]:
# get touch
if self._dgeom['lCam'] is None:
msg = "self.dgeom['lCam'] must be set to use touch !"
raise Exception(msg)
if any([type(cc) is str for cc in self._dgeom['lCam']]):
msg = "self.dgeom['lCam'] contains pathfiles !"
msg += "\n => Run self.strip(0)"
raise Exception(msg)
ind = []
for cc in self._dgeom['lCam']:
ind.append(cc.select(touch=touch, log=log, out=bool))
if len(ind)==1:
ind = ind[0]
else:
ind = np.concatenate(tuple(ind))
elif lC[2]:
# get values on X
if self._ddataRef['nnch']==1:
ind = _select_ind(val, self._ddataRef['X'], self._ddataRef['nch'])
else:
ind = np.zeros((self._ddataRef['nt'],self._ddataRef['nch']),dtype=bool)
for ii in range(0,self._ddataRef['nnch']):
iind = self._ddataRef['indtX']==ii
ind[iind,:] = _select_ind(val, self._ddataRef['X'],
self._ddataRef['nch'])[np.newaxis,:]
else:
if not (type(key) is str and key in self._dchans.keys()):
msg = "Provided key not valid!\n"
msg += " - key: %s\n"%str(key)
msg += "Please provide a valid key of self.dchans():\n"
msg += " - " + "\n - ".join(self._dchans.keys())
raise Exception(msg)
ltypes = [str,int,float,np.int64,np.float64]
C0 = type(val) in ltypes
C1 = type(val) in [list,tuple,np.ndarray]
assert C0 or C1
if C0:
val = [val]
else:
assert all([type(vv) in ltypes for vv in val])
ind = np.vstack([self._dchans[key]==ii for ii in val])
if log=='any':
ind = np.any(ind,axis=0)
elif log=='all':
ind = np.all(ind,axis=0)
else:
ind = ~np.any(ind,axis=0)
if out is int:
ind = ind.nonzero()[0]
return ind | [
"def",
"select_ch",
"(",
"self",
",",
"val",
"=",
"None",
",",
"key",
"=",
"None",
",",
"log",
"=",
"'any'",
",",
"touch",
"=",
"None",
",",
"out",
"=",
"bool",
")",
":",
"assert",
"out",
"in",
"[",
"int",
",",
"bool",
"]",
"assert",
"log",
"in",
"[",
"'any'",
",",
"'all'",
",",
"'not'",
"]",
"lc",
"=",
"[",
"val",
"is",
"None",
",",
"key",
"is",
"None",
",",
"touch",
"is",
"None",
"]",
"lC",
"=",
"[",
"all",
"(",
"lc",
")",
",",
"all",
"(",
"lc",
"[",
":",
"2",
"]",
")",
"and",
"not",
"lc",
"[",
"2",
"]",
",",
"not",
"lc",
"[",
"0",
"]",
"and",
"all",
"(",
"lc",
"[",
"1",
":",
"]",
")",
",",
"not",
"any",
"(",
"lc",
"[",
":",
"2",
"]",
")",
"and",
"lc",
"[",
"2",
"]",
"]",
"assert",
"np",
".",
"sum",
"(",
"lC",
")",
"==",
"1",
"if",
"lC",
"[",
"0",
"]",
":",
"# get all channels",
"ind",
"=",
"np",
".",
"ones",
"(",
"(",
"self",
".",
"_ddataRef",
"[",
"'nch'",
"]",
",",
")",
",",
"dtype",
"=",
"bool",
")",
"elif",
"lC",
"[",
"1",
"]",
":",
"# get touch",
"if",
"self",
".",
"_dgeom",
"[",
"'lCam'",
"]",
"is",
"None",
":",
"msg",
"=",
"\"self.dgeom['lCam'] must be set to use touch !\"",
"raise",
"Exception",
"(",
"msg",
")",
"if",
"any",
"(",
"[",
"type",
"(",
"cc",
")",
"is",
"str",
"for",
"cc",
"in",
"self",
".",
"_dgeom",
"[",
"'lCam'",
"]",
"]",
")",
":",
"msg",
"=",
"\"self.dgeom['lCam'] contains pathfiles !\"",
"msg",
"+=",
"\"\\n => Run self.strip(0)\"",
"raise",
"Exception",
"(",
"msg",
")",
"ind",
"=",
"[",
"]",
"for",
"cc",
"in",
"self",
".",
"_dgeom",
"[",
"'lCam'",
"]",
":",
"ind",
".",
"append",
"(",
"cc",
".",
"select",
"(",
"touch",
"=",
"touch",
",",
"log",
"=",
"log",
",",
"out",
"=",
"bool",
")",
")",
"if",
"len",
"(",
"ind",
")",
"==",
"1",
":",
"ind",
"=",
"ind",
"[",
"0",
"]",
"else",
":",
"ind",
"=",
"np",
".",
"concatenate",
"(",
"tuple",
"(",
"ind",
")",
")",
"elif",
"lC",
"[",
"2",
"]",
":",
"# get values on X",
"if",
"self",
".",
"_ddataRef",
"[",
"'nnch'",
"]",
"==",
"1",
":",
"ind",
"=",
"_select_ind",
"(",
"val",
",",
"self",
".",
"_ddataRef",
"[",
"'X'",
"]",
",",
"self",
".",
"_ddataRef",
"[",
"'nch'",
"]",
")",
"else",
":",
"ind",
"=",
"np",
".",
"zeros",
"(",
"(",
"self",
".",
"_ddataRef",
"[",
"'nt'",
"]",
",",
"self",
".",
"_ddataRef",
"[",
"'nch'",
"]",
")",
",",
"dtype",
"=",
"bool",
")",
"for",
"ii",
"in",
"range",
"(",
"0",
",",
"self",
".",
"_ddataRef",
"[",
"'nnch'",
"]",
")",
":",
"iind",
"=",
"self",
".",
"_ddataRef",
"[",
"'indtX'",
"]",
"==",
"ii",
"ind",
"[",
"iind",
",",
":",
"]",
"=",
"_select_ind",
"(",
"val",
",",
"self",
".",
"_ddataRef",
"[",
"'X'",
"]",
",",
"self",
".",
"_ddataRef",
"[",
"'nch'",
"]",
")",
"[",
"np",
".",
"newaxis",
",",
":",
"]",
"else",
":",
"if",
"not",
"(",
"type",
"(",
"key",
")",
"is",
"str",
"and",
"key",
"in",
"self",
".",
"_dchans",
".",
"keys",
"(",
")",
")",
":",
"msg",
"=",
"\"Provided key not valid!\\n\"",
"msg",
"+=",
"\" - key: %s\\n\"",
"%",
"str",
"(",
"key",
")",
"msg",
"+=",
"\"Please provide a valid key of self.dchans():\\n\"",
"msg",
"+=",
"\" - \"",
"+",
"\"\\n - \"",
".",
"join",
"(",
"self",
".",
"_dchans",
".",
"keys",
"(",
")",
")",
"raise",
"Exception",
"(",
"msg",
")",
"ltypes",
"=",
"[",
"str",
",",
"int",
",",
"float",
",",
"np",
".",
"int64",
",",
"np",
".",
"float64",
"]",
"C0",
"=",
"type",
"(",
"val",
")",
"in",
"ltypes",
"C1",
"=",
"type",
"(",
"val",
")",
"in",
"[",
"list",
",",
"tuple",
",",
"np",
".",
"ndarray",
"]",
"assert",
"C0",
"or",
"C1",
"if",
"C0",
":",
"val",
"=",
"[",
"val",
"]",
"else",
":",
"assert",
"all",
"(",
"[",
"type",
"(",
"vv",
")",
"in",
"ltypes",
"for",
"vv",
"in",
"val",
"]",
")",
"ind",
"=",
"np",
".",
"vstack",
"(",
"[",
"self",
".",
"_dchans",
"[",
"key",
"]",
"==",
"ii",
"for",
"ii",
"in",
"val",
"]",
")",
"if",
"log",
"==",
"'any'",
":",
"ind",
"=",
"np",
".",
"any",
"(",
"ind",
",",
"axis",
"=",
"0",
")",
"elif",
"log",
"==",
"'all'",
":",
"ind",
"=",
"np",
".",
"all",
"(",
"ind",
",",
"axis",
"=",
"0",
")",
"else",
":",
"ind",
"=",
"~",
"np",
".",
"any",
"(",
"ind",
",",
"axis",
"=",
"0",
")",
"if",
"out",
"is",
"int",
":",
"ind",
"=",
"ind",
".",
"nonzero",
"(",
")",
"[",
"0",
"]",
"return",
"ind"
] | Return a channels index array
Return a boolean or integer index array, hereafter called 'ind'
The array refers to the reference channel/'X' vector self.ddataRef['X']
There are 3 different ways of selecting channels, by refering to:
- The 'X' vector/array values in self.dataRef['X']
- The dict of channels keys/values (if self.dchans != None)
- which element each LOS touches (if self.lCam != None)
Parameters
----------
val : None / str / float / np.array / list / tuple
The value against which to dicriminate.
Behaviour depends whether key is provided:
- key is None => val compares vs self.ddataRef['X']
- key provided => val compares vs self.dchans[key]
If key is None, the behaviour is similar to self.select_indt():
- None : ind matches all channels
- float : ind is True only for X closest to val
- np.ndarray : ind is True only for X closest to val
- list (len()==2): ind is True for X inside [val[0],val[1]]
- tuple (len()==2): ind is True for X outside ]val[0];val[1][
key : None / str
If provided, dict key to indicate which self.dchans[key] to use
log : str
If key provided, val can be a list of criteria
Then, log indicates whether all / any / none should be matched
touch : None
If key and val are None, return the indices of the LOS touching the
elements indicated in touch.
Requires that self.dgeom['lCam'] is not None (tf.geom.Cam.select())
out : type
Specifies the type of the output index array:
- bool : return a boolean array of shape (self.ddataRef['nt'],)
- int : return the array as integers indices
Return
------
ind : np.ndarray
The array of indices, of dtype specified by keywordarg out | [
"Return",
"a",
"channels",
"index",
"array"
] | python | train |
tensorpack/tensorpack | tensorpack/utils/concurrency.py | https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/utils/concurrency.py#L68-L74 | def queue_get_stoppable(self, q):
""" Take obj from queue, but will give up when the thread is stopped"""
while not self.stopped():
try:
return q.get(timeout=5)
except queue.Empty:
pass | [
"def",
"queue_get_stoppable",
"(",
"self",
",",
"q",
")",
":",
"while",
"not",
"self",
".",
"stopped",
"(",
")",
":",
"try",
":",
"return",
"q",
".",
"get",
"(",
"timeout",
"=",
"5",
")",
"except",
"queue",
".",
"Empty",
":",
"pass"
] | Take obj from queue, but will give up when the thread is stopped | [
"Take",
"obj",
"from",
"queue",
"but",
"will",
"give",
"up",
"when",
"the",
"thread",
"is",
"stopped"
] | python | train |
bulkan/robotframework-requests | src/RequestsLibrary/RequestsKeywords.py | https://github.com/bulkan/robotframework-requests/blob/11baa3277f1cb728712e26d996200703c15254a8/src/RequestsLibrary/RequestsKeywords.py#L550-L607 | def post_request(
self,
alias,
uri,
data=None,
json=None,
params=None,
headers=None,
files=None,
allow_redirects=None,
timeout=None):
""" Send a POST request on the session object found using the
given `alias`
``alias`` that will be used to identify the Session object in the cache
``uri`` to send the POST request to
``data`` a dictionary of key-value pairs that will be urlencoded
and sent as POST data
or binary data that is sent as the raw body content
or passed as such for multipart form data if ``files`` is also
defined
``json`` a value that will be json encoded
and sent as POST data if files or data is not specified
``params`` url parameters to append to the uri
``headers`` a dictionary of headers to use with the request
``files`` a dictionary of file names containing file data to POST to the server
``allow_redirects`` Boolean. Set to True if POST/PUT/DELETE redirect following is allowed.
``timeout`` connection timeout
"""
session = self._cache.switch(alias)
if not files:
data = self._format_data_according_to_header(session, data, headers)
redir = True if allow_redirects is None else allow_redirects
response = self._body_request(
"post",
session,
uri,
data,
json,
params,
files,
headers,
redir,
timeout)
dataStr = self._format_data_to_log_string_according_to_header(data, headers)
logger.info('Post Request using : alias=%s, uri=%s, data=%s, headers=%s, files=%s, allow_redirects=%s '
% (alias, uri, dataStr, headers, files, redir))
return response | [
"def",
"post_request",
"(",
"self",
",",
"alias",
",",
"uri",
",",
"data",
"=",
"None",
",",
"json",
"=",
"None",
",",
"params",
"=",
"None",
",",
"headers",
"=",
"None",
",",
"files",
"=",
"None",
",",
"allow_redirects",
"=",
"None",
",",
"timeout",
"=",
"None",
")",
":",
"session",
"=",
"self",
".",
"_cache",
".",
"switch",
"(",
"alias",
")",
"if",
"not",
"files",
":",
"data",
"=",
"self",
".",
"_format_data_according_to_header",
"(",
"session",
",",
"data",
",",
"headers",
")",
"redir",
"=",
"True",
"if",
"allow_redirects",
"is",
"None",
"else",
"allow_redirects",
"response",
"=",
"self",
".",
"_body_request",
"(",
"\"post\"",
",",
"session",
",",
"uri",
",",
"data",
",",
"json",
",",
"params",
",",
"files",
",",
"headers",
",",
"redir",
",",
"timeout",
")",
"dataStr",
"=",
"self",
".",
"_format_data_to_log_string_according_to_header",
"(",
"data",
",",
"headers",
")",
"logger",
".",
"info",
"(",
"'Post Request using : alias=%s, uri=%s, data=%s, headers=%s, files=%s, allow_redirects=%s '",
"%",
"(",
"alias",
",",
"uri",
",",
"dataStr",
",",
"headers",
",",
"files",
",",
"redir",
")",
")",
"return",
"response"
] | Send a POST request on the session object found using the
given `alias`
``alias`` that will be used to identify the Session object in the cache
``uri`` to send the POST request to
``data`` a dictionary of key-value pairs that will be urlencoded
and sent as POST data
or binary data that is sent as the raw body content
or passed as such for multipart form data if ``files`` is also
defined
``json`` a value that will be json encoded
and sent as POST data if files or data is not specified
``params`` url parameters to append to the uri
``headers`` a dictionary of headers to use with the request
``files`` a dictionary of file names containing file data to POST to the server
``allow_redirects`` Boolean. Set to True if POST/PUT/DELETE redirect following is allowed.
``timeout`` connection timeout | [
"Send",
"a",
"POST",
"request",
"on",
"the",
"session",
"object",
"found",
"using",
"the",
"given",
"alias"
] | python | train |
dbrattli/OSlash | oslash/observable.py | https://github.com/dbrattli/OSlash/blob/ffdc714c5d454f7519f740254de89f70850929eb/oslash/observable.py#L39-L45 | def map(self, mapper: Callable[[Any], Any]) -> 'Observable':
r"""Map a function over an observable.
Haskell: fmap f m = Cont $ \c -> runCont m (c . f)
"""
source = self
return Observable(lambda on_next: source.subscribe(compose(on_next, mapper))) | [
"def",
"map",
"(",
"self",
",",
"mapper",
":",
"Callable",
"[",
"[",
"Any",
"]",
",",
"Any",
"]",
")",
"->",
"'Observable'",
":",
"source",
"=",
"self",
"return",
"Observable",
"(",
"lambda",
"on_next",
":",
"source",
".",
"subscribe",
"(",
"compose",
"(",
"on_next",
",",
"mapper",
")",
")",
")"
] | r"""Map a function over an observable.
Haskell: fmap f m = Cont $ \c -> runCont m (c . f) | [
"r",
"Map",
"a",
"function",
"over",
"an",
"observable",
"."
] | python | train |
saltstack/salt | salt/cloud/clouds/gce.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/gce.py#L645-L701 | def delete_network(kwargs=None, call=None):
'''
Permanently delete a network.
CLI Example:
.. code-block:: bash
salt-cloud -f delete_network gce name=mynet
'''
if call != 'function':
raise SaltCloudSystemExit(
'The delete_network function must be called with -f or --function.'
)
if not kwargs or 'name' not in kwargs:
log.error(
'A name must be specified when deleting a network.'
)
return False
name = kwargs['name']
conn = get_conn()
__utils__['cloud.fire_event'](
'event',
'deleting network',
'salt/cloud/net/deleting',
args={
'name': name,
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
try:
result = conn.ex_destroy_network(
conn.ex_get_network(name)
)
except ResourceNotFoundError as exc:
log.error(
'Nework %s was not found. Exception was: %s',
name, exc, exc_info_on_loglevel=logging.DEBUG
)
return False
__utils__['cloud.fire_event'](
'event',
'deleted network',
'salt/cloud/net/deleted',
args={
'name': name,
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return result | [
"def",
"delete_network",
"(",
"kwargs",
"=",
"None",
",",
"call",
"=",
"None",
")",
":",
"if",
"call",
"!=",
"'function'",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'The delete_network function must be called with -f or --function.'",
")",
"if",
"not",
"kwargs",
"or",
"'name'",
"not",
"in",
"kwargs",
":",
"log",
".",
"error",
"(",
"'A name must be specified when deleting a network.'",
")",
"return",
"False",
"name",
"=",
"kwargs",
"[",
"'name'",
"]",
"conn",
"=",
"get_conn",
"(",
")",
"__utils__",
"[",
"'cloud.fire_event'",
"]",
"(",
"'event'",
",",
"'deleting network'",
",",
"'salt/cloud/net/deleting'",
",",
"args",
"=",
"{",
"'name'",
":",
"name",
",",
"}",
",",
"sock_dir",
"=",
"__opts__",
"[",
"'sock_dir'",
"]",
",",
"transport",
"=",
"__opts__",
"[",
"'transport'",
"]",
")",
"try",
":",
"result",
"=",
"conn",
".",
"ex_destroy_network",
"(",
"conn",
".",
"ex_get_network",
"(",
"name",
")",
")",
"except",
"ResourceNotFoundError",
"as",
"exc",
":",
"log",
".",
"error",
"(",
"'Nework %s was not found. Exception was: %s'",
",",
"name",
",",
"exc",
",",
"exc_info_on_loglevel",
"=",
"logging",
".",
"DEBUG",
")",
"return",
"False",
"__utils__",
"[",
"'cloud.fire_event'",
"]",
"(",
"'event'",
",",
"'deleted network'",
",",
"'salt/cloud/net/deleted'",
",",
"args",
"=",
"{",
"'name'",
":",
"name",
",",
"}",
",",
"sock_dir",
"=",
"__opts__",
"[",
"'sock_dir'",
"]",
",",
"transport",
"=",
"__opts__",
"[",
"'transport'",
"]",
")",
"return",
"result"
] | Permanently delete a network.
CLI Example:
.. code-block:: bash
salt-cloud -f delete_network gce name=mynet | [
"Permanently",
"delete",
"a",
"network",
"."
] | python | train |
aleju/imgaug | imgaug/augmentables/polys.py | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/polys.py#L836-L890 | def exterior_almost_equals(self, other, max_distance=1e-6, points_per_edge=8):
"""
Estimate if this and other polygon's exterior are almost identical.
The two exteriors can have different numbers of points, but any point
randomly sampled on the exterior of one polygon should be close to the
closest point on the exterior of the other polygon.
Note that this method works approximately. One can come up with
polygons with fairly different shapes that will still be estimated as
equal by this method. In practice however this should be unlikely to be
the case. The probability for something like that goes down as the
interpolation parameter is increased.
Parameters
----------
other : imgaug.Polygon or (N,2) ndarray or list of tuple
The other polygon with which to compare the exterior.
If this is an ndarray, it is assumed to represent an exterior.
It must then have dtype ``float32`` and shape ``(N,2)`` with the
second dimension denoting xy-coordinates.
If this is a list of tuples, it is assumed to represent an exterior.
Each tuple then must contain exactly two numbers, denoting
xy-coordinates.
max_distance : number, optional
The maximum euclidean distance between a point on one polygon and
the closest point on the other polygon. If the distance is exceeded
for any such pair, the two exteriors are not viewed as equal. The
points are other the points contained in the polygon's exterior
ndarray or interpolated points between these.
points_per_edge : int, optional
How many points to interpolate on each edge.
Returns
-------
bool
Whether the two polygon's exteriors can be viewed as equal
(approximate test).
"""
if isinstance(other, list):
other = Polygon(np.float32(other))
elif ia.is_np_array(other):
other = Polygon(other)
else:
assert isinstance(other, Polygon)
other = other
return self.to_line_string(closed=True).coords_almost_equals(
other.to_line_string(closed=True),
max_distance=max_distance,
points_per_edge=points_per_edge
) | [
"def",
"exterior_almost_equals",
"(",
"self",
",",
"other",
",",
"max_distance",
"=",
"1e-6",
",",
"points_per_edge",
"=",
"8",
")",
":",
"if",
"isinstance",
"(",
"other",
",",
"list",
")",
":",
"other",
"=",
"Polygon",
"(",
"np",
".",
"float32",
"(",
"other",
")",
")",
"elif",
"ia",
".",
"is_np_array",
"(",
"other",
")",
":",
"other",
"=",
"Polygon",
"(",
"other",
")",
"else",
":",
"assert",
"isinstance",
"(",
"other",
",",
"Polygon",
")",
"other",
"=",
"other",
"return",
"self",
".",
"to_line_string",
"(",
"closed",
"=",
"True",
")",
".",
"coords_almost_equals",
"(",
"other",
".",
"to_line_string",
"(",
"closed",
"=",
"True",
")",
",",
"max_distance",
"=",
"max_distance",
",",
"points_per_edge",
"=",
"points_per_edge",
")"
] | Estimate if this and other polygon's exterior are almost identical.
The two exteriors can have different numbers of points, but any point
randomly sampled on the exterior of one polygon should be close to the
closest point on the exterior of the other polygon.
Note that this method works approximately. One can come up with
polygons with fairly different shapes that will still be estimated as
equal by this method. In practice however this should be unlikely to be
the case. The probability for something like that goes down as the
interpolation parameter is increased.
Parameters
----------
other : imgaug.Polygon or (N,2) ndarray or list of tuple
The other polygon with which to compare the exterior.
If this is an ndarray, it is assumed to represent an exterior.
It must then have dtype ``float32`` and shape ``(N,2)`` with the
second dimension denoting xy-coordinates.
If this is a list of tuples, it is assumed to represent an exterior.
Each tuple then must contain exactly two numbers, denoting
xy-coordinates.
max_distance : number, optional
The maximum euclidean distance between a point on one polygon and
the closest point on the other polygon. If the distance is exceeded
for any such pair, the two exteriors are not viewed as equal. The
points are other the points contained in the polygon's exterior
ndarray or interpolated points between these.
points_per_edge : int, optional
How many points to interpolate on each edge.
Returns
-------
bool
Whether the two polygon's exteriors can be viewed as equal
(approximate test). | [
"Estimate",
"if",
"this",
"and",
"other",
"polygon",
"s",
"exterior",
"are",
"almost",
"identical",
"."
] | python | valid |
materialsproject/pymatgen | pymatgen/phonon/plotter.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/phonon/plotter.py#L116-L191 | def get_plot(self, xlim=None, ylim=None, units="thz"):
"""
Get a matplotlib plot showing the DOS.
Args:
xlim: Specifies the x-axis limits. Set to None for automatic
determination.
ylim: Specifies the y-axis limits.
units: units for the frequencies. Accepted values thz, ev, mev, ha, cm-1, cm^-1.
"""
u = freq_units(units)
ncolors = max(3, len(self._doses))
ncolors = min(9, ncolors)
import palettable
colors = palettable.colorbrewer.qualitative.Set1_9.mpl_colors
y = None
alldensities = []
allfrequencies = []
plt = pretty_plot(12, 8)
# Note that this complicated processing of frequencies is to allow for
# stacked plots in matplotlib.
for key, dos in self._doses.items():
frequencies = dos['frequencies'] * u.factor
densities = dos['densities']
if y is None:
y = np.zeros(frequencies.shape)
if self.stack:
y += densities
newdens = y.copy()
else:
newdens = densities
allfrequencies.append(frequencies)
alldensities.append(newdens)
keys = list(self._doses.keys())
keys.reverse()
alldensities.reverse()
allfrequencies.reverse()
allpts = []
for i, (key, frequencies, densities) in enumerate(zip(keys, allfrequencies, alldensities)):
allpts.extend(list(zip(frequencies, densities)))
if self.stack:
plt.fill(frequencies, densities, color=colors[i % ncolors],
label=str(key))
else:
plt.plot(frequencies, densities, color=colors[i % ncolors],
label=str(key), linewidth=3)
if xlim:
plt.xlim(xlim)
if ylim:
plt.ylim(ylim)
else:
xlim = plt.xlim()
relevanty = [p[1] for p in allpts
if xlim[0] < p[0] < xlim[1]]
plt.ylim((min(relevanty), max(relevanty)))
ylim = plt.ylim()
plt.plot([0, 0], ylim, 'k--', linewidth=2)
plt.xlabel(r'$\mathrm{{Frequencies\ ({})}}$'.format(u.label))
plt.ylabel(r'$\mathrm{Density\ of\ states}$')
plt.legend()
leg = plt.gca().get_legend()
ltext = leg.get_texts() # all the text.Text instance in the legend
plt.setp(ltext, fontsize=30)
plt.tight_layout()
return plt | [
"def",
"get_plot",
"(",
"self",
",",
"xlim",
"=",
"None",
",",
"ylim",
"=",
"None",
",",
"units",
"=",
"\"thz\"",
")",
":",
"u",
"=",
"freq_units",
"(",
"units",
")",
"ncolors",
"=",
"max",
"(",
"3",
",",
"len",
"(",
"self",
".",
"_doses",
")",
")",
"ncolors",
"=",
"min",
"(",
"9",
",",
"ncolors",
")",
"import",
"palettable",
"colors",
"=",
"palettable",
".",
"colorbrewer",
".",
"qualitative",
".",
"Set1_9",
".",
"mpl_colors",
"y",
"=",
"None",
"alldensities",
"=",
"[",
"]",
"allfrequencies",
"=",
"[",
"]",
"plt",
"=",
"pretty_plot",
"(",
"12",
",",
"8",
")",
"# Note that this complicated processing of frequencies is to allow for",
"# stacked plots in matplotlib.",
"for",
"key",
",",
"dos",
"in",
"self",
".",
"_doses",
".",
"items",
"(",
")",
":",
"frequencies",
"=",
"dos",
"[",
"'frequencies'",
"]",
"*",
"u",
".",
"factor",
"densities",
"=",
"dos",
"[",
"'densities'",
"]",
"if",
"y",
"is",
"None",
":",
"y",
"=",
"np",
".",
"zeros",
"(",
"frequencies",
".",
"shape",
")",
"if",
"self",
".",
"stack",
":",
"y",
"+=",
"densities",
"newdens",
"=",
"y",
".",
"copy",
"(",
")",
"else",
":",
"newdens",
"=",
"densities",
"allfrequencies",
".",
"append",
"(",
"frequencies",
")",
"alldensities",
".",
"append",
"(",
"newdens",
")",
"keys",
"=",
"list",
"(",
"self",
".",
"_doses",
".",
"keys",
"(",
")",
")",
"keys",
".",
"reverse",
"(",
")",
"alldensities",
".",
"reverse",
"(",
")",
"allfrequencies",
".",
"reverse",
"(",
")",
"allpts",
"=",
"[",
"]",
"for",
"i",
",",
"(",
"key",
",",
"frequencies",
",",
"densities",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"keys",
",",
"allfrequencies",
",",
"alldensities",
")",
")",
":",
"allpts",
".",
"extend",
"(",
"list",
"(",
"zip",
"(",
"frequencies",
",",
"densities",
")",
")",
")",
"if",
"self",
".",
"stack",
":",
"plt",
".",
"fill",
"(",
"frequencies",
",",
"densities",
",",
"color",
"=",
"colors",
"[",
"i",
"%",
"ncolors",
"]",
",",
"label",
"=",
"str",
"(",
"key",
")",
")",
"else",
":",
"plt",
".",
"plot",
"(",
"frequencies",
",",
"densities",
",",
"color",
"=",
"colors",
"[",
"i",
"%",
"ncolors",
"]",
",",
"label",
"=",
"str",
"(",
"key",
")",
",",
"linewidth",
"=",
"3",
")",
"if",
"xlim",
":",
"plt",
".",
"xlim",
"(",
"xlim",
")",
"if",
"ylim",
":",
"plt",
".",
"ylim",
"(",
"ylim",
")",
"else",
":",
"xlim",
"=",
"plt",
".",
"xlim",
"(",
")",
"relevanty",
"=",
"[",
"p",
"[",
"1",
"]",
"for",
"p",
"in",
"allpts",
"if",
"xlim",
"[",
"0",
"]",
"<",
"p",
"[",
"0",
"]",
"<",
"xlim",
"[",
"1",
"]",
"]",
"plt",
".",
"ylim",
"(",
"(",
"min",
"(",
"relevanty",
")",
",",
"max",
"(",
"relevanty",
")",
")",
")",
"ylim",
"=",
"plt",
".",
"ylim",
"(",
")",
"plt",
".",
"plot",
"(",
"[",
"0",
",",
"0",
"]",
",",
"ylim",
",",
"'k--'",
",",
"linewidth",
"=",
"2",
")",
"plt",
".",
"xlabel",
"(",
"r'$\\mathrm{{Frequencies\\ ({})}}$'",
".",
"format",
"(",
"u",
".",
"label",
")",
")",
"plt",
".",
"ylabel",
"(",
"r'$\\mathrm{Density\\ of\\ states}$'",
")",
"plt",
".",
"legend",
"(",
")",
"leg",
"=",
"plt",
".",
"gca",
"(",
")",
".",
"get_legend",
"(",
")",
"ltext",
"=",
"leg",
".",
"get_texts",
"(",
")",
"# all the text.Text instance in the legend",
"plt",
".",
"setp",
"(",
"ltext",
",",
"fontsize",
"=",
"30",
")",
"plt",
".",
"tight_layout",
"(",
")",
"return",
"plt"
] | Get a matplotlib plot showing the DOS.
Args:
xlim: Specifies the x-axis limits. Set to None for automatic
determination.
ylim: Specifies the y-axis limits.
units: units for the frequencies. Accepted values thz, ev, mev, ha, cm-1, cm^-1. | [
"Get",
"a",
"matplotlib",
"plot",
"showing",
"the",
"DOS",
"."
] | python | train |
tensorflow/tensor2tensor | tensor2tensor/utils/hparam.py | https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/hparam.py#L482-L504 | def parse(self, values):
"""Override existing hyperparameter values, parsing new values from a string.
See parse_values for more detail on the allowed format for values.
Args:
values: String. Comma separated list of `name=value` pairs where 'value'
must follow the syntax described above.
Returns:
The `HParams` instance.
Raises:
ValueError: If `values` cannot be parsed or a hyperparameter in `values`
doesn't exist.
"""
type_map = {}
for name, t in self._hparam_types.items():
param_type, _ = t
type_map[name] = param_type
values_map = parse_values(values, type_map)
return self.override_from_dict(values_map) | [
"def",
"parse",
"(",
"self",
",",
"values",
")",
":",
"type_map",
"=",
"{",
"}",
"for",
"name",
",",
"t",
"in",
"self",
".",
"_hparam_types",
".",
"items",
"(",
")",
":",
"param_type",
",",
"_",
"=",
"t",
"type_map",
"[",
"name",
"]",
"=",
"param_type",
"values_map",
"=",
"parse_values",
"(",
"values",
",",
"type_map",
")",
"return",
"self",
".",
"override_from_dict",
"(",
"values_map",
")"
] | Override existing hyperparameter values, parsing new values from a string.
See parse_values for more detail on the allowed format for values.
Args:
values: String. Comma separated list of `name=value` pairs where 'value'
must follow the syntax described above.
Returns:
The `HParams` instance.
Raises:
ValueError: If `values` cannot be parsed or a hyperparameter in `values`
doesn't exist. | [
"Override",
"existing",
"hyperparameter",
"values",
"parsing",
"new",
"values",
"from",
"a",
"string",
"."
] | python | train |
chrippa/python-librtmp | librtmp/rtmp.py | https://github.com/chrippa/python-librtmp/blob/6efefd5edd76cad7a3b53f7c87c1c7350448224d/librtmp/rtmp.py#L242-L271 | def read_packet(self):
"""Reads a RTMP packet from the server.
Returns a :class:`RTMPPacket`.
Raises :exc:`RTMPError` on error.
Raises :exc:`RTMPTimeoutError` on timeout.
Usage::
>>> packet = conn.read_packet()
>>> packet.body
b'packet body ...'
"""
packet = ffi.new("RTMPPacket*")
packet_complete = False
while not packet_complete:
res = librtmp.RTMP_ReadPacket(self.rtmp, packet)
if res < 1:
if librtmp.RTMP_IsTimedout(self.rtmp):
raise RTMPTimeoutError("Timed out while reading packet")
else:
raise RTMPError("Failed to read packet")
packet_complete = packet.m_nBytesRead == packet.m_nBodySize
return RTMPPacket._from_pointer(packet) | [
"def",
"read_packet",
"(",
"self",
")",
":",
"packet",
"=",
"ffi",
".",
"new",
"(",
"\"RTMPPacket*\"",
")",
"packet_complete",
"=",
"False",
"while",
"not",
"packet_complete",
":",
"res",
"=",
"librtmp",
".",
"RTMP_ReadPacket",
"(",
"self",
".",
"rtmp",
",",
"packet",
")",
"if",
"res",
"<",
"1",
":",
"if",
"librtmp",
".",
"RTMP_IsTimedout",
"(",
"self",
".",
"rtmp",
")",
":",
"raise",
"RTMPTimeoutError",
"(",
"\"Timed out while reading packet\"",
")",
"else",
":",
"raise",
"RTMPError",
"(",
"\"Failed to read packet\"",
")",
"packet_complete",
"=",
"packet",
".",
"m_nBytesRead",
"==",
"packet",
".",
"m_nBodySize",
"return",
"RTMPPacket",
".",
"_from_pointer",
"(",
"packet",
")"
] | Reads a RTMP packet from the server.
Returns a :class:`RTMPPacket`.
Raises :exc:`RTMPError` on error.
Raises :exc:`RTMPTimeoutError` on timeout.
Usage::
>>> packet = conn.read_packet()
>>> packet.body
b'packet body ...' | [
"Reads",
"a",
"RTMP",
"packet",
"from",
"the",
"server",
"."
] | python | train |
yougov/mongo-connector | mongo_connector/connector.py | https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/connector.py#L272-L314 | def read_oplog_progress(self):
"""Reads oplog progress from file provided by user.
This method is only called once before any threads are spanwed.
"""
if self.oplog_checkpoint is None:
return None
# Check for empty file
try:
if os.stat(self.oplog_checkpoint).st_size == 0:
LOG.info("MongoConnector: Empty oplog progress file.")
return None
except OSError:
return None
with open(self.oplog_checkpoint, "r") as progress_file:
try:
data = json.load(progress_file)
except ValueError:
LOG.exception(
'Cannot read oplog progress file "%s". '
"It may be corrupt after Mongo Connector was shut down"
"uncleanly. You can try to recover from a backup file "
'(may be called "%s.backup") or create a new progress file '
"starting at the current moment in time by running "
"mongo-connector --no-dump <other options>. "
"You may also be trying to read an oplog progress file "
"created with the old format for sharded clusters. "
"See https://github.com/10gen-labs/mongo-connector/wiki"
"/Oplog-Progress-File for complete documentation."
% (self.oplog_checkpoint, self.oplog_checkpoint)
)
return
# data format:
# [name, timestamp] = replica set
# [[name, timestamp], [name, timestamp], ...] = sharded cluster
if not isinstance(data[0], list):
data = [data]
with self.oplog_progress:
self.oplog_progress.dict = dict(
(name, util.long_to_bson_ts(timestamp)) for name, timestamp in data
) | [
"def",
"read_oplog_progress",
"(",
"self",
")",
":",
"if",
"self",
".",
"oplog_checkpoint",
"is",
"None",
":",
"return",
"None",
"# Check for empty file",
"try",
":",
"if",
"os",
".",
"stat",
"(",
"self",
".",
"oplog_checkpoint",
")",
".",
"st_size",
"==",
"0",
":",
"LOG",
".",
"info",
"(",
"\"MongoConnector: Empty oplog progress file.\"",
")",
"return",
"None",
"except",
"OSError",
":",
"return",
"None",
"with",
"open",
"(",
"self",
".",
"oplog_checkpoint",
",",
"\"r\"",
")",
"as",
"progress_file",
":",
"try",
":",
"data",
"=",
"json",
".",
"load",
"(",
"progress_file",
")",
"except",
"ValueError",
":",
"LOG",
".",
"exception",
"(",
"'Cannot read oplog progress file \"%s\". '",
"\"It may be corrupt after Mongo Connector was shut down\"",
"\"uncleanly. You can try to recover from a backup file \"",
"'(may be called \"%s.backup\") or create a new progress file '",
"\"starting at the current moment in time by running \"",
"\"mongo-connector --no-dump <other options>. \"",
"\"You may also be trying to read an oplog progress file \"",
"\"created with the old format for sharded clusters. \"",
"\"See https://github.com/10gen-labs/mongo-connector/wiki\"",
"\"/Oplog-Progress-File for complete documentation.\"",
"%",
"(",
"self",
".",
"oplog_checkpoint",
",",
"self",
".",
"oplog_checkpoint",
")",
")",
"return",
"# data format:",
"# [name, timestamp] = replica set",
"# [[name, timestamp], [name, timestamp], ...] = sharded cluster",
"if",
"not",
"isinstance",
"(",
"data",
"[",
"0",
"]",
",",
"list",
")",
":",
"data",
"=",
"[",
"data",
"]",
"with",
"self",
".",
"oplog_progress",
":",
"self",
".",
"oplog_progress",
".",
"dict",
"=",
"dict",
"(",
"(",
"name",
",",
"util",
".",
"long_to_bson_ts",
"(",
"timestamp",
")",
")",
"for",
"name",
",",
"timestamp",
"in",
"data",
")"
] | Reads oplog progress from file provided by user.
This method is only called once before any threads are spanwed. | [
"Reads",
"oplog",
"progress",
"from",
"file",
"provided",
"by",
"user",
".",
"This",
"method",
"is",
"only",
"called",
"once",
"before",
"any",
"threads",
"are",
"spanwed",
"."
] | python | train |
petl-developers/petl | petl/io/whoosh.py | https://github.com/petl-developers/petl/blob/1d33ca055f7e04e0d28a772041c9fd30c8d415d6/petl/io/whoosh.py#L257-L334 | def searchtextindex(index_or_dirname, query, limit=10, indexname=None,
docnum_field=None, score_field=None, fieldboosts=None,
search_kwargs=None):
"""
Search a Whoosh index using a query. E.g.::
>>> import petl as etl
>>> import os
>>> # set up an index and load some documents via the Whoosh API
... from whoosh.index import create_in
>>> from whoosh.fields import *
>>> schema = Schema(title=TEXT(stored=True), path=ID(stored=True),
... content=TEXT)
>>> dirname = 'example.whoosh'
>>> if not os.path.exists(dirname):
... os.mkdir(dirname)
...
>>> index = create_in('example.whoosh', schema)
>>> writer = index.writer()
>>> writer.add_document(title=u"Oranges", path=u"/a",
... content=u"This is the first document we've added!")
>>> writer.add_document(title=u"Apples", path=u"/b",
... content=u"The second document is even more "
... u"interesting!")
>>> writer.commit()
>>> # demonstrate the use of searchtextindex()
... table1 = etl.searchtextindex('example.whoosh', 'oranges')
>>> table1
+------+-----------+
| path | title |
+======+===========+
| '/a' | 'Oranges' |
+------+-----------+
>>> table2 = etl.searchtextindex('example.whoosh', 'doc*')
>>> table2
+------+-----------+
| path | title |
+======+===========+
| '/a' | 'Oranges' |
+------+-----------+
| '/b' | 'Apples' |
+------+-----------+
Keyword arguments:
index_or_dirname
Either an instance of `whoosh.index.Index` or a string containing the
directory path where the index is to be stored.
query
Either a string or an instance of `whoosh.query.Query`. If a string,
it will be parsed as a multi-field query, i.e., any terms not bound
to a specific field will match **any** field.
limit
Return at most `limit` results.
indexname
String containing the name of the index, if multiple indexes are stored
in the same directory.
docnum_field
If not None, an extra field will be added to the output table containing
the internal document number stored in the index. The name of the field
will be the value of this argument.
score_field
If not None, an extra field will be added to the output table containing
the score of the result. The name of the field will be the value of this
argument.
fieldboosts
An optional dictionary mapping field names to boosts.
search_kwargs
Any extra keyword arguments to be passed through to the Whoosh
`search()` method.
"""
return SearchTextIndexView(index_or_dirname, query, limit=limit,
indexname=indexname, docnum_field=docnum_field,
score_field=score_field, fieldboosts=fieldboosts,
search_kwargs=search_kwargs) | [
"def",
"searchtextindex",
"(",
"index_or_dirname",
",",
"query",
",",
"limit",
"=",
"10",
",",
"indexname",
"=",
"None",
",",
"docnum_field",
"=",
"None",
",",
"score_field",
"=",
"None",
",",
"fieldboosts",
"=",
"None",
",",
"search_kwargs",
"=",
"None",
")",
":",
"return",
"SearchTextIndexView",
"(",
"index_or_dirname",
",",
"query",
",",
"limit",
"=",
"limit",
",",
"indexname",
"=",
"indexname",
",",
"docnum_field",
"=",
"docnum_field",
",",
"score_field",
"=",
"score_field",
",",
"fieldboosts",
"=",
"fieldboosts",
",",
"search_kwargs",
"=",
"search_kwargs",
")"
] | Search a Whoosh index using a query. E.g.::
>>> import petl as etl
>>> import os
>>> # set up an index and load some documents via the Whoosh API
... from whoosh.index import create_in
>>> from whoosh.fields import *
>>> schema = Schema(title=TEXT(stored=True), path=ID(stored=True),
... content=TEXT)
>>> dirname = 'example.whoosh'
>>> if not os.path.exists(dirname):
... os.mkdir(dirname)
...
>>> index = create_in('example.whoosh', schema)
>>> writer = index.writer()
>>> writer.add_document(title=u"Oranges", path=u"/a",
... content=u"This is the first document we've added!")
>>> writer.add_document(title=u"Apples", path=u"/b",
... content=u"The second document is even more "
... u"interesting!")
>>> writer.commit()
>>> # demonstrate the use of searchtextindex()
... table1 = etl.searchtextindex('example.whoosh', 'oranges')
>>> table1
+------+-----------+
| path | title |
+======+===========+
| '/a' | 'Oranges' |
+------+-----------+
>>> table2 = etl.searchtextindex('example.whoosh', 'doc*')
>>> table2
+------+-----------+
| path | title |
+======+===========+
| '/a' | 'Oranges' |
+------+-----------+
| '/b' | 'Apples' |
+------+-----------+
Keyword arguments:
index_or_dirname
Either an instance of `whoosh.index.Index` or a string containing the
directory path where the index is to be stored.
query
Either a string or an instance of `whoosh.query.Query`. If a string,
it will be parsed as a multi-field query, i.e., any terms not bound
to a specific field will match **any** field.
limit
Return at most `limit` results.
indexname
String containing the name of the index, if multiple indexes are stored
in the same directory.
docnum_field
If not None, an extra field will be added to the output table containing
the internal document number stored in the index. The name of the field
will be the value of this argument.
score_field
If not None, an extra field will be added to the output table containing
the score of the result. The name of the field will be the value of this
argument.
fieldboosts
An optional dictionary mapping field names to boosts.
search_kwargs
Any extra keyword arguments to be passed through to the Whoosh
`search()` method. | [
"Search",
"a",
"Whoosh",
"index",
"using",
"a",
"query",
".",
"E",
".",
"g",
".",
"::"
] | python | train |
tensorflow/cleverhans | cleverhans/utils_pytorch.py | https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/utils_pytorch.py#L41-L94 | def convert_pytorch_model_to_tf(model, out_dims=None):
"""
Convert a pytorch model into a tensorflow op that allows backprop
:param model: A pytorch nn.Module object
:param out_dims: The number of output dimensions (classes) for the model
:return: A model function that maps an input (tf.Tensor) to the
output of the model (tf.Tensor)
"""
warnings.warn("convert_pytorch_model_to_tf is deprecated, switch to"
+ " dedicated PyTorch support provided by CleverHans v4.")
torch_state = {
'logits': None,
'x': None,
}
if not out_dims:
out_dims = list(model.modules())[-1].out_features
def _fprop_fn(x_np):
"""TODO: write this"""
x_tensor = torch.Tensor(x_np)
if torch.cuda.is_available():
x_tensor = x_tensor.cuda()
torch_state['x'] = Variable(x_tensor, requires_grad=True)
torch_state['logits'] = model(torch_state['x'])
return torch_state['logits'].data.cpu().numpy()
def _bprop_fn(x_np, grads_in_np):
"""TODO: write this"""
_fprop_fn(x_np)
grads_in_tensor = torch.Tensor(grads_in_np)
if torch.cuda.is_available():
grads_in_tensor = grads_in_tensor.cuda()
# Run our backprop through our logits to our xs
loss = torch.sum(torch_state['logits'] * grads_in_tensor)
loss.backward()
return torch_state['x'].grad.cpu().data.numpy()
def _tf_gradient_fn(op, grads_in):
"""TODO: write this"""
return tf.py_func(_bprop_fn, [op.inputs[0], grads_in],
Tout=[tf.float32])
def tf_model_fn(x_op):
"""TODO: write this"""
out = _py_func_with_gradient(_fprop_fn, [x_op], Tout=[tf.float32],
stateful=True,
grad_func=_tf_gradient_fn)[0]
out.set_shape([None, out_dims])
return out
return tf_model_fn | [
"def",
"convert_pytorch_model_to_tf",
"(",
"model",
",",
"out_dims",
"=",
"None",
")",
":",
"warnings",
".",
"warn",
"(",
"\"convert_pytorch_model_to_tf is deprecated, switch to\"",
"+",
"\" dedicated PyTorch support provided by CleverHans v4.\"",
")",
"torch_state",
"=",
"{",
"'logits'",
":",
"None",
",",
"'x'",
":",
"None",
",",
"}",
"if",
"not",
"out_dims",
":",
"out_dims",
"=",
"list",
"(",
"model",
".",
"modules",
"(",
")",
")",
"[",
"-",
"1",
"]",
".",
"out_features",
"def",
"_fprop_fn",
"(",
"x_np",
")",
":",
"\"\"\"TODO: write this\"\"\"",
"x_tensor",
"=",
"torch",
".",
"Tensor",
"(",
"x_np",
")",
"if",
"torch",
".",
"cuda",
".",
"is_available",
"(",
")",
":",
"x_tensor",
"=",
"x_tensor",
".",
"cuda",
"(",
")",
"torch_state",
"[",
"'x'",
"]",
"=",
"Variable",
"(",
"x_tensor",
",",
"requires_grad",
"=",
"True",
")",
"torch_state",
"[",
"'logits'",
"]",
"=",
"model",
"(",
"torch_state",
"[",
"'x'",
"]",
")",
"return",
"torch_state",
"[",
"'logits'",
"]",
".",
"data",
".",
"cpu",
"(",
")",
".",
"numpy",
"(",
")",
"def",
"_bprop_fn",
"(",
"x_np",
",",
"grads_in_np",
")",
":",
"\"\"\"TODO: write this\"\"\"",
"_fprop_fn",
"(",
"x_np",
")",
"grads_in_tensor",
"=",
"torch",
".",
"Tensor",
"(",
"grads_in_np",
")",
"if",
"torch",
".",
"cuda",
".",
"is_available",
"(",
")",
":",
"grads_in_tensor",
"=",
"grads_in_tensor",
".",
"cuda",
"(",
")",
"# Run our backprop through our logits to our xs",
"loss",
"=",
"torch",
".",
"sum",
"(",
"torch_state",
"[",
"'logits'",
"]",
"*",
"grads_in_tensor",
")",
"loss",
".",
"backward",
"(",
")",
"return",
"torch_state",
"[",
"'x'",
"]",
".",
"grad",
".",
"cpu",
"(",
")",
".",
"data",
".",
"numpy",
"(",
")",
"def",
"_tf_gradient_fn",
"(",
"op",
",",
"grads_in",
")",
":",
"\"\"\"TODO: write this\"\"\"",
"return",
"tf",
".",
"py_func",
"(",
"_bprop_fn",
",",
"[",
"op",
".",
"inputs",
"[",
"0",
"]",
",",
"grads_in",
"]",
",",
"Tout",
"=",
"[",
"tf",
".",
"float32",
"]",
")",
"def",
"tf_model_fn",
"(",
"x_op",
")",
":",
"\"\"\"TODO: write this\"\"\"",
"out",
"=",
"_py_func_with_gradient",
"(",
"_fprop_fn",
",",
"[",
"x_op",
"]",
",",
"Tout",
"=",
"[",
"tf",
".",
"float32",
"]",
",",
"stateful",
"=",
"True",
",",
"grad_func",
"=",
"_tf_gradient_fn",
")",
"[",
"0",
"]",
"out",
".",
"set_shape",
"(",
"[",
"None",
",",
"out_dims",
"]",
")",
"return",
"out",
"return",
"tf_model_fn"
] | Convert a pytorch model into a tensorflow op that allows backprop
:param model: A pytorch nn.Module object
:param out_dims: The number of output dimensions (classes) for the model
:return: A model function that maps an input (tf.Tensor) to the
output of the model (tf.Tensor) | [
"Convert",
"a",
"pytorch",
"model",
"into",
"a",
"tensorflow",
"op",
"that",
"allows",
"backprop",
":",
"param",
"model",
":",
"A",
"pytorch",
"nn",
".",
"Module",
"object",
":",
"param",
"out_dims",
":",
"The",
"number",
"of",
"output",
"dimensions",
"(",
"classes",
")",
"for",
"the",
"model",
":",
"return",
":",
"A",
"model",
"function",
"that",
"maps",
"an",
"input",
"(",
"tf",
".",
"Tensor",
")",
"to",
"the",
"output",
"of",
"the",
"model",
"(",
"tf",
".",
"Tensor",
")"
] | python | train |
pandas-dev/pandas | pandas/io/formats/style.py | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/style.py#L883-L931 | def background_gradient(self, cmap='PuBu', low=0, high=0, axis=0,
subset=None, text_color_threshold=0.408):
"""
Color the background in a gradient according to
the data in each column (optionally row).
Requires matplotlib.
Parameters
----------
cmap : str or colormap
matplotlib colormap
low, high : float
compress the range by these values.
axis : {0 or 'index', 1 or 'columns', None}, default 0
apply to each column (``axis=0`` or ``'index'``), to each row
(``axis=1`` or ``'columns'``), or to the entire DataFrame at once
with ``axis=None``.
subset : IndexSlice
a valid slice for ``data`` to limit the style application to.
text_color_threshold : float or int
luminance threshold for determining text color. Facilitates text
visibility across varying background colors. From 0 to 1.
0 = all text is dark colored, 1 = all text is light colored.
.. versionadded:: 0.24.0
Returns
-------
self : Styler
Raises
------
ValueError
If ``text_color_threshold`` is not a value from 0 to 1.
Notes
-----
Set ``text_color_threshold`` or tune ``low`` and ``high`` to keep the
text legible by not using the entire range of the color map. The range
of the data is extended by ``low * (x.max() - x.min())`` and ``high *
(x.max() - x.min())`` before normalizing.
"""
subset = _maybe_numeric_slice(self.data, subset)
subset = _non_reducing_slice(subset)
self.apply(self._background_gradient, cmap=cmap, subset=subset,
axis=axis, low=low, high=high,
text_color_threshold=text_color_threshold)
return self | [
"def",
"background_gradient",
"(",
"self",
",",
"cmap",
"=",
"'PuBu'",
",",
"low",
"=",
"0",
",",
"high",
"=",
"0",
",",
"axis",
"=",
"0",
",",
"subset",
"=",
"None",
",",
"text_color_threshold",
"=",
"0.408",
")",
":",
"subset",
"=",
"_maybe_numeric_slice",
"(",
"self",
".",
"data",
",",
"subset",
")",
"subset",
"=",
"_non_reducing_slice",
"(",
"subset",
")",
"self",
".",
"apply",
"(",
"self",
".",
"_background_gradient",
",",
"cmap",
"=",
"cmap",
",",
"subset",
"=",
"subset",
",",
"axis",
"=",
"axis",
",",
"low",
"=",
"low",
",",
"high",
"=",
"high",
",",
"text_color_threshold",
"=",
"text_color_threshold",
")",
"return",
"self"
] | Color the background in a gradient according to
the data in each column (optionally row).
Requires matplotlib.
Parameters
----------
cmap : str or colormap
matplotlib colormap
low, high : float
compress the range by these values.
axis : {0 or 'index', 1 or 'columns', None}, default 0
apply to each column (``axis=0`` or ``'index'``), to each row
(``axis=1`` or ``'columns'``), or to the entire DataFrame at once
with ``axis=None``.
subset : IndexSlice
a valid slice for ``data`` to limit the style application to.
text_color_threshold : float or int
luminance threshold for determining text color. Facilitates text
visibility across varying background colors. From 0 to 1.
0 = all text is dark colored, 1 = all text is light colored.
.. versionadded:: 0.24.0
Returns
-------
self : Styler
Raises
------
ValueError
If ``text_color_threshold`` is not a value from 0 to 1.
Notes
-----
Set ``text_color_threshold`` or tune ``low`` and ``high`` to keep the
text legible by not using the entire range of the color map. The range
of the data is extended by ``low * (x.max() - x.min())`` and ``high *
(x.max() - x.min())`` before normalizing. | [
"Color",
"the",
"background",
"in",
"a",
"gradient",
"according",
"to",
"the",
"data",
"in",
"each",
"column",
"(",
"optionally",
"row",
")",
"."
] | python | train |
materialsproject/pymatgen | pymatgen/analysis/elasticity/elastic.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/elasticity/elastic.py#L65-L79 | def calculate_stress(self, strain):
"""
Calculate's a given elastic tensor's contribution to the
stress using Einstein summation
Args:
strain (3x3 array-like): matrix corresponding to strain
"""
strain = np.array(strain)
if strain.shape == (6,):
strain = Strain.from_voigt(strain)
assert strain.shape == (3, 3), "Strain must be 3x3 or voigt-notation"
stress_matrix = self.einsum_sequence([strain]*(self.order - 1)) \
/ factorial(self.order - 1)
return Stress(stress_matrix) | [
"def",
"calculate_stress",
"(",
"self",
",",
"strain",
")",
":",
"strain",
"=",
"np",
".",
"array",
"(",
"strain",
")",
"if",
"strain",
".",
"shape",
"==",
"(",
"6",
",",
")",
":",
"strain",
"=",
"Strain",
".",
"from_voigt",
"(",
"strain",
")",
"assert",
"strain",
".",
"shape",
"==",
"(",
"3",
",",
"3",
")",
",",
"\"Strain must be 3x3 or voigt-notation\"",
"stress_matrix",
"=",
"self",
".",
"einsum_sequence",
"(",
"[",
"strain",
"]",
"*",
"(",
"self",
".",
"order",
"-",
"1",
")",
")",
"/",
"factorial",
"(",
"self",
".",
"order",
"-",
"1",
")",
"return",
"Stress",
"(",
"stress_matrix",
")"
] | Calculate's a given elastic tensor's contribution to the
stress using Einstein summation
Args:
strain (3x3 array-like): matrix corresponding to strain | [
"Calculate",
"s",
"a",
"given",
"elastic",
"tensor",
"s",
"contribution",
"to",
"the",
"stress",
"using",
"Einstein",
"summation"
] | python | train |
pytroll/satpy | satpy/readers/aapp_l1b.py | https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/readers/aapp_l1b.py#L158-L191 | def get_angles(self, angle_id):
"""Get sun-satellite viewing angles"""
tic = datetime.now()
sunz40km = self._data["ang"][:, :, 0] * 1e-2
satz40km = self._data["ang"][:, :, 1] * 1e-2
azidiff40km = self._data["ang"][:, :, 2] * 1e-2
try:
from geotiepoints.interpolator import Interpolator
except ImportError:
logger.warning("Could not interpolate sun-sat angles, "
"python-geotiepoints missing.")
self.sunz, self.satz, self.azidiff = sunz40km, satz40km, azidiff40km
else:
cols40km = np.arange(24, 2048, 40)
cols1km = np.arange(2048)
lines = sunz40km.shape[0]
rows40km = np.arange(lines)
rows1km = np.arange(lines)
along_track_order = 1
cross_track_order = 3
satint = Interpolator(
[sunz40km, satz40km, azidiff40km], (rows40km, cols40km),
(rows1km, cols1km), along_track_order, cross_track_order)
self.sunz, self.satz, self.azidiff = satint.interpolate()
logger.debug("Interpolate sun-sat angles: time %s",
str(datetime.now() - tic))
return create_xarray(getattr(self, ANGLES[angle_id])) | [
"def",
"get_angles",
"(",
"self",
",",
"angle_id",
")",
":",
"tic",
"=",
"datetime",
".",
"now",
"(",
")",
"sunz40km",
"=",
"self",
".",
"_data",
"[",
"\"ang\"",
"]",
"[",
":",
",",
":",
",",
"0",
"]",
"*",
"1e-2",
"satz40km",
"=",
"self",
".",
"_data",
"[",
"\"ang\"",
"]",
"[",
":",
",",
":",
",",
"1",
"]",
"*",
"1e-2",
"azidiff40km",
"=",
"self",
".",
"_data",
"[",
"\"ang\"",
"]",
"[",
":",
",",
":",
",",
"2",
"]",
"*",
"1e-2",
"try",
":",
"from",
"geotiepoints",
".",
"interpolator",
"import",
"Interpolator",
"except",
"ImportError",
":",
"logger",
".",
"warning",
"(",
"\"Could not interpolate sun-sat angles, \"",
"\"python-geotiepoints missing.\"",
")",
"self",
".",
"sunz",
",",
"self",
".",
"satz",
",",
"self",
".",
"azidiff",
"=",
"sunz40km",
",",
"satz40km",
",",
"azidiff40km",
"else",
":",
"cols40km",
"=",
"np",
".",
"arange",
"(",
"24",
",",
"2048",
",",
"40",
")",
"cols1km",
"=",
"np",
".",
"arange",
"(",
"2048",
")",
"lines",
"=",
"sunz40km",
".",
"shape",
"[",
"0",
"]",
"rows40km",
"=",
"np",
".",
"arange",
"(",
"lines",
")",
"rows1km",
"=",
"np",
".",
"arange",
"(",
"lines",
")",
"along_track_order",
"=",
"1",
"cross_track_order",
"=",
"3",
"satint",
"=",
"Interpolator",
"(",
"[",
"sunz40km",
",",
"satz40km",
",",
"azidiff40km",
"]",
",",
"(",
"rows40km",
",",
"cols40km",
")",
",",
"(",
"rows1km",
",",
"cols1km",
")",
",",
"along_track_order",
",",
"cross_track_order",
")",
"self",
".",
"sunz",
",",
"self",
".",
"satz",
",",
"self",
".",
"azidiff",
"=",
"satint",
".",
"interpolate",
"(",
")",
"logger",
".",
"debug",
"(",
"\"Interpolate sun-sat angles: time %s\"",
",",
"str",
"(",
"datetime",
".",
"now",
"(",
")",
"-",
"tic",
")",
")",
"return",
"create_xarray",
"(",
"getattr",
"(",
"self",
",",
"ANGLES",
"[",
"angle_id",
"]",
")",
")"
] | Get sun-satellite viewing angles | [
"Get",
"sun",
"-",
"satellite",
"viewing",
"angles"
] | python | train |
cloud9ers/gurumate | environment/lib/python2.7/site-packages/IPython/utils/_process_posix.py | https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/utils/_process_posix.py#L119-L187 | def system(self, cmd):
"""Execute a command in a subshell.
Parameters
----------
cmd : str
A command to be executed in the system shell.
Returns
-------
int : child's exitstatus
"""
# Get likely encoding for the output.
enc = DEFAULT_ENCODING
# Patterns to match on the output, for pexpect. We read input and
# allow either a short timeout or EOF
patterns = [pexpect.TIMEOUT, pexpect.EOF]
# the index of the EOF pattern in the list.
# even though we know it's 1, this call means we don't have to worry if
# we change the above list, and forget to change this value:
EOF_index = patterns.index(pexpect.EOF)
# The size of the output stored so far in the process output buffer.
# Since pexpect only appends to this buffer, each time we print we
# record how far we've printed, so that next time we only print *new*
# content from the buffer.
out_size = 0
try:
# Since we're not really searching the buffer for text patterns, we
# can set pexpect's search window to be tiny and it won't matter.
# We only search for the 'patterns' timeout or EOF, which aren't in
# the text itself.
#child = pexpect.spawn(pcmd, searchwindowsize=1)
if hasattr(pexpect, 'spawnb'):
child = pexpect.spawnb(self.sh, args=['-c', cmd]) # Pexpect-U
else:
child = pexpect.spawn(self.sh, args=['-c', cmd]) # Vanilla Pexpect
flush = sys.stdout.flush
while True:
# res is the index of the pattern that caused the match, so we
# know whether we've finished (if we matched EOF) or not
res_idx = child.expect_list(patterns, self.read_timeout)
print(child.before[out_size:].decode(enc, 'replace'), end='')
flush()
if res_idx==EOF_index:
break
# Update the pointer to what we've already printed
out_size = len(child.before)
except KeyboardInterrupt:
# We need to send ^C to the process. The ascii code for '^C' is 3
# (the character is known as ETX for 'End of Text', see
# curses.ascii.ETX).
child.sendline(chr(3))
# Read and print any more output the program might produce on its
# way out.
try:
out_size = len(child.before)
child.expect_list(patterns, self.terminate_timeout)
print(child.before[out_size:].decode(enc, 'replace'), end='')
sys.stdout.flush()
except KeyboardInterrupt:
# Impatient users tend to type it multiple times
pass
finally:
# Ensure the subprocess really is terminated
child.terminate(force=True)
# add isalive check, to ensure exitstatus is set:
child.isalive()
return child.exitstatus | [
"def",
"system",
"(",
"self",
",",
"cmd",
")",
":",
"# Get likely encoding for the output.",
"enc",
"=",
"DEFAULT_ENCODING",
"# Patterns to match on the output, for pexpect. We read input and",
"# allow either a short timeout or EOF",
"patterns",
"=",
"[",
"pexpect",
".",
"TIMEOUT",
",",
"pexpect",
".",
"EOF",
"]",
"# the index of the EOF pattern in the list.",
"# even though we know it's 1, this call means we don't have to worry if",
"# we change the above list, and forget to change this value:",
"EOF_index",
"=",
"patterns",
".",
"index",
"(",
"pexpect",
".",
"EOF",
")",
"# The size of the output stored so far in the process output buffer.",
"# Since pexpect only appends to this buffer, each time we print we",
"# record how far we've printed, so that next time we only print *new*",
"# content from the buffer.",
"out_size",
"=",
"0",
"try",
":",
"# Since we're not really searching the buffer for text patterns, we",
"# can set pexpect's search window to be tiny and it won't matter.",
"# We only search for the 'patterns' timeout or EOF, which aren't in",
"# the text itself.",
"#child = pexpect.spawn(pcmd, searchwindowsize=1)",
"if",
"hasattr",
"(",
"pexpect",
",",
"'spawnb'",
")",
":",
"child",
"=",
"pexpect",
".",
"spawnb",
"(",
"self",
".",
"sh",
",",
"args",
"=",
"[",
"'-c'",
",",
"cmd",
"]",
")",
"# Pexpect-U",
"else",
":",
"child",
"=",
"pexpect",
".",
"spawn",
"(",
"self",
".",
"sh",
",",
"args",
"=",
"[",
"'-c'",
",",
"cmd",
"]",
")",
"# Vanilla Pexpect",
"flush",
"=",
"sys",
".",
"stdout",
".",
"flush",
"while",
"True",
":",
"# res is the index of the pattern that caused the match, so we",
"# know whether we've finished (if we matched EOF) or not",
"res_idx",
"=",
"child",
".",
"expect_list",
"(",
"patterns",
",",
"self",
".",
"read_timeout",
")",
"print",
"(",
"child",
".",
"before",
"[",
"out_size",
":",
"]",
".",
"decode",
"(",
"enc",
",",
"'replace'",
")",
",",
"end",
"=",
"''",
")",
"flush",
"(",
")",
"if",
"res_idx",
"==",
"EOF_index",
":",
"break",
"# Update the pointer to what we've already printed",
"out_size",
"=",
"len",
"(",
"child",
".",
"before",
")",
"except",
"KeyboardInterrupt",
":",
"# We need to send ^C to the process. The ascii code for '^C' is 3",
"# (the character is known as ETX for 'End of Text', see",
"# curses.ascii.ETX).",
"child",
".",
"sendline",
"(",
"chr",
"(",
"3",
")",
")",
"# Read and print any more output the program might produce on its",
"# way out.",
"try",
":",
"out_size",
"=",
"len",
"(",
"child",
".",
"before",
")",
"child",
".",
"expect_list",
"(",
"patterns",
",",
"self",
".",
"terminate_timeout",
")",
"print",
"(",
"child",
".",
"before",
"[",
"out_size",
":",
"]",
".",
"decode",
"(",
"enc",
",",
"'replace'",
")",
",",
"end",
"=",
"''",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"except",
"KeyboardInterrupt",
":",
"# Impatient users tend to type it multiple times",
"pass",
"finally",
":",
"# Ensure the subprocess really is terminated",
"child",
".",
"terminate",
"(",
"force",
"=",
"True",
")",
"# add isalive check, to ensure exitstatus is set:",
"child",
".",
"isalive",
"(",
")",
"return",
"child",
".",
"exitstatus"
] | Execute a command in a subshell.
Parameters
----------
cmd : str
A command to be executed in the system shell.
Returns
-------
int : child's exitstatus | [
"Execute",
"a",
"command",
"in",
"a",
"subshell",
"."
] | python | test |
fkarb/xltable | xltable/worksheet.py | https://github.com/fkarb/xltable/blob/7a592642d27ad5ee90d2aa8c26338abaa9d84bea/xltable/worksheet.py#L189-L278 | def _get_all_styles(self):
"""
return a dictionary of {(row, col) -> CellStyle}
for all cells that use a non-default style.
"""
_styles = {}
def _get_style(bold=False, bg_col=None, border=None):
if (bold, bg_col, border) not in _styles:
_styles[(bold, bg_col, border)] = CellStyle(bold=bold,
bg_color=bg_col,
border=border)
return _styles[(bold, bg_col, border)]
ws_styles = {}
for table, (row, col) in self.__tables.values():
for r in range(row, row + table.header_height):
for c in range(col, col + table.width):
if isinstance(table.header_style, dict):
col_name = table.dataframe.columns[c - col]
style = table.header_style.get(col_name, _get_style(bold=True))
else:
style = table.header_style or _get_style(bold=True)
ws_styles[(r, c)] = style
for c in range(col, col + table.row_labels_width):
for r in range(row + table.header_height, row + table.height):
if isinstance(table.index_style, dict):
row_name = table.dataframe.index[r - row]
style = table.index_style.get(row_name, _get_style(bold=True))
else:
style = table.index_style or _get_style(bold=True)
ws_styles[(r, c)] = style
if table.style.stripe_colors or table.style.border:
num_bg_cols = len(table.style.stripe_colors) if \
table.style.stripe_colors else 1
bg_cols = table.style.stripe_colors if \
table.style.stripe_colors else None
for i, row_offset in enumerate(range(table.header_height,
table.height)):
for c in range(col, col + table.width):
bg_col = bg_cols[i % num_bg_cols] if bg_cols else None
style = _get_style(bold=None, bg_col=bg_col, border=table.style.border)
if (row + row_offset, c) in ws_styles:
style = style + ws_styles[(row + row_offset, c)]
ws_styles[(row + row_offset, c)] = style
for col_name, col_style in table.column_styles.items():
try:
col_offset = table.get_column_offset(col_name)
except KeyError:
continue
for i, r in enumerate(range(row + table.header_height, row + table.height)):
style = col_style
if (r, col + col_offset) in ws_styles:
style = ws_styles[(r, col + col_offset)] + style
ws_styles[(r, col + col_offset)] = style
for row_name, row_style in table.row_styles.items():
try:
row_offset = table.get_row_offset(row_name)
except KeyError:
continue
for i, c in enumerate(range(col + table.row_labels_width, col + table.width)):
style = row_style
if (row + row_offset, c) in ws_styles:
style = ws_styles[(row + row_offset, c)] + style
ws_styles[(row + row_offset, c)] = style
for (row_name, col_name), cell_style in table.cell_styles.items():
try:
col_offset = table.get_column_offset(col_name)
row_offset = table.get_row_offset(row_name)
except KeyError:
continue
style = cell_style
if (row + row_offset, col + col_offset) in ws_styles:
style = ws_styles[(row + row_offset, col + col_offset)] + style
ws_styles[(row + row_offset, col + col_offset)] = style
for (row, col), value in self.__values.items():
if isinstance(value, Value):
style = value.style
if style:
if (row, col) in ws_styles:
style = style + ws_styles[(row, col)]
ws_styles[(row, col)] = style
return ws_styles | [
"def",
"_get_all_styles",
"(",
"self",
")",
":",
"_styles",
"=",
"{",
"}",
"def",
"_get_style",
"(",
"bold",
"=",
"False",
",",
"bg_col",
"=",
"None",
",",
"border",
"=",
"None",
")",
":",
"if",
"(",
"bold",
",",
"bg_col",
",",
"border",
")",
"not",
"in",
"_styles",
":",
"_styles",
"[",
"(",
"bold",
",",
"bg_col",
",",
"border",
")",
"]",
"=",
"CellStyle",
"(",
"bold",
"=",
"bold",
",",
"bg_color",
"=",
"bg_col",
",",
"border",
"=",
"border",
")",
"return",
"_styles",
"[",
"(",
"bold",
",",
"bg_col",
",",
"border",
")",
"]",
"ws_styles",
"=",
"{",
"}",
"for",
"table",
",",
"(",
"row",
",",
"col",
")",
"in",
"self",
".",
"__tables",
".",
"values",
"(",
")",
":",
"for",
"r",
"in",
"range",
"(",
"row",
",",
"row",
"+",
"table",
".",
"header_height",
")",
":",
"for",
"c",
"in",
"range",
"(",
"col",
",",
"col",
"+",
"table",
".",
"width",
")",
":",
"if",
"isinstance",
"(",
"table",
".",
"header_style",
",",
"dict",
")",
":",
"col_name",
"=",
"table",
".",
"dataframe",
".",
"columns",
"[",
"c",
"-",
"col",
"]",
"style",
"=",
"table",
".",
"header_style",
".",
"get",
"(",
"col_name",
",",
"_get_style",
"(",
"bold",
"=",
"True",
")",
")",
"else",
":",
"style",
"=",
"table",
".",
"header_style",
"or",
"_get_style",
"(",
"bold",
"=",
"True",
")",
"ws_styles",
"[",
"(",
"r",
",",
"c",
")",
"]",
"=",
"style",
"for",
"c",
"in",
"range",
"(",
"col",
",",
"col",
"+",
"table",
".",
"row_labels_width",
")",
":",
"for",
"r",
"in",
"range",
"(",
"row",
"+",
"table",
".",
"header_height",
",",
"row",
"+",
"table",
".",
"height",
")",
":",
"if",
"isinstance",
"(",
"table",
".",
"index_style",
",",
"dict",
")",
":",
"row_name",
"=",
"table",
".",
"dataframe",
".",
"index",
"[",
"r",
"-",
"row",
"]",
"style",
"=",
"table",
".",
"index_style",
".",
"get",
"(",
"row_name",
",",
"_get_style",
"(",
"bold",
"=",
"True",
")",
")",
"else",
":",
"style",
"=",
"table",
".",
"index_style",
"or",
"_get_style",
"(",
"bold",
"=",
"True",
")",
"ws_styles",
"[",
"(",
"r",
",",
"c",
")",
"]",
"=",
"style",
"if",
"table",
".",
"style",
".",
"stripe_colors",
"or",
"table",
".",
"style",
".",
"border",
":",
"num_bg_cols",
"=",
"len",
"(",
"table",
".",
"style",
".",
"stripe_colors",
")",
"if",
"table",
".",
"style",
".",
"stripe_colors",
"else",
"1",
"bg_cols",
"=",
"table",
".",
"style",
".",
"stripe_colors",
"if",
"table",
".",
"style",
".",
"stripe_colors",
"else",
"None",
"for",
"i",
",",
"row_offset",
"in",
"enumerate",
"(",
"range",
"(",
"table",
".",
"header_height",
",",
"table",
".",
"height",
")",
")",
":",
"for",
"c",
"in",
"range",
"(",
"col",
",",
"col",
"+",
"table",
".",
"width",
")",
":",
"bg_col",
"=",
"bg_cols",
"[",
"i",
"%",
"num_bg_cols",
"]",
"if",
"bg_cols",
"else",
"None",
"style",
"=",
"_get_style",
"(",
"bold",
"=",
"None",
",",
"bg_col",
"=",
"bg_col",
",",
"border",
"=",
"table",
".",
"style",
".",
"border",
")",
"if",
"(",
"row",
"+",
"row_offset",
",",
"c",
")",
"in",
"ws_styles",
":",
"style",
"=",
"style",
"+",
"ws_styles",
"[",
"(",
"row",
"+",
"row_offset",
",",
"c",
")",
"]",
"ws_styles",
"[",
"(",
"row",
"+",
"row_offset",
",",
"c",
")",
"]",
"=",
"style",
"for",
"col_name",
",",
"col_style",
"in",
"table",
".",
"column_styles",
".",
"items",
"(",
")",
":",
"try",
":",
"col_offset",
"=",
"table",
".",
"get_column_offset",
"(",
"col_name",
")",
"except",
"KeyError",
":",
"continue",
"for",
"i",
",",
"r",
"in",
"enumerate",
"(",
"range",
"(",
"row",
"+",
"table",
".",
"header_height",
",",
"row",
"+",
"table",
".",
"height",
")",
")",
":",
"style",
"=",
"col_style",
"if",
"(",
"r",
",",
"col",
"+",
"col_offset",
")",
"in",
"ws_styles",
":",
"style",
"=",
"ws_styles",
"[",
"(",
"r",
",",
"col",
"+",
"col_offset",
")",
"]",
"+",
"style",
"ws_styles",
"[",
"(",
"r",
",",
"col",
"+",
"col_offset",
")",
"]",
"=",
"style",
"for",
"row_name",
",",
"row_style",
"in",
"table",
".",
"row_styles",
".",
"items",
"(",
")",
":",
"try",
":",
"row_offset",
"=",
"table",
".",
"get_row_offset",
"(",
"row_name",
")",
"except",
"KeyError",
":",
"continue",
"for",
"i",
",",
"c",
"in",
"enumerate",
"(",
"range",
"(",
"col",
"+",
"table",
".",
"row_labels_width",
",",
"col",
"+",
"table",
".",
"width",
")",
")",
":",
"style",
"=",
"row_style",
"if",
"(",
"row",
"+",
"row_offset",
",",
"c",
")",
"in",
"ws_styles",
":",
"style",
"=",
"ws_styles",
"[",
"(",
"row",
"+",
"row_offset",
",",
"c",
")",
"]",
"+",
"style",
"ws_styles",
"[",
"(",
"row",
"+",
"row_offset",
",",
"c",
")",
"]",
"=",
"style",
"for",
"(",
"row_name",
",",
"col_name",
")",
",",
"cell_style",
"in",
"table",
".",
"cell_styles",
".",
"items",
"(",
")",
":",
"try",
":",
"col_offset",
"=",
"table",
".",
"get_column_offset",
"(",
"col_name",
")",
"row_offset",
"=",
"table",
".",
"get_row_offset",
"(",
"row_name",
")",
"except",
"KeyError",
":",
"continue",
"style",
"=",
"cell_style",
"if",
"(",
"row",
"+",
"row_offset",
",",
"col",
"+",
"col_offset",
")",
"in",
"ws_styles",
":",
"style",
"=",
"ws_styles",
"[",
"(",
"row",
"+",
"row_offset",
",",
"col",
"+",
"col_offset",
")",
"]",
"+",
"style",
"ws_styles",
"[",
"(",
"row",
"+",
"row_offset",
",",
"col",
"+",
"col_offset",
")",
"]",
"=",
"style",
"for",
"(",
"row",
",",
"col",
")",
",",
"value",
"in",
"self",
".",
"__values",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"Value",
")",
":",
"style",
"=",
"value",
".",
"style",
"if",
"style",
":",
"if",
"(",
"row",
",",
"col",
")",
"in",
"ws_styles",
":",
"style",
"=",
"style",
"+",
"ws_styles",
"[",
"(",
"row",
",",
"col",
")",
"]",
"ws_styles",
"[",
"(",
"row",
",",
"col",
")",
"]",
"=",
"style",
"return",
"ws_styles"
] | return a dictionary of {(row, col) -> CellStyle}
for all cells that use a non-default style. | [
"return",
"a",
"dictionary",
"of",
"{",
"(",
"row",
"col",
")",
"-",
">",
"CellStyle",
"}",
"for",
"all",
"cells",
"that",
"use",
"a",
"non",
"-",
"default",
"style",
"."
] | python | train |
idlesign/steampak | steampak/webapi/resources/apps.py | https://github.com/idlesign/steampak/blob/cb3f2c737e272b0360802d947e388df7e34f50f3/steampak/webapi/resources/apps.py#L21-L37 | def get_filter_cardborder(*cardborder_type):
"""Returns game cards URL filter for a given cardborder
type (TAG_CARDBORDER_NORMAL / TAG_CARDBORDER_FOIL).
To be used in URL_GAMECARDS.
:param str|unicode cardborder_type:
:rtype: str|unicode
"""
filter_ = []
for type_ in cardborder_type:
if not type_:
continue
filter_.append('category_' + APPID_CARDS + '_cardborder[]=tag_' + type_)
return '&'.join(filter_) | [
"def",
"get_filter_cardborder",
"(",
"*",
"cardborder_type",
")",
":",
"filter_",
"=",
"[",
"]",
"for",
"type_",
"in",
"cardborder_type",
":",
"if",
"not",
"type_",
":",
"continue",
"filter_",
".",
"append",
"(",
"'category_'",
"+",
"APPID_CARDS",
"+",
"'_cardborder[]=tag_'",
"+",
"type_",
")",
"return",
"'&'",
".",
"join",
"(",
"filter_",
")"
] | Returns game cards URL filter for a given cardborder
type (TAG_CARDBORDER_NORMAL / TAG_CARDBORDER_FOIL).
To be used in URL_GAMECARDS.
:param str|unicode cardborder_type:
:rtype: str|unicode | [
"Returns",
"game",
"cards",
"URL",
"filter",
"for",
"a",
"given",
"cardborder",
"type",
"(",
"TAG_CARDBORDER_NORMAL",
"/",
"TAG_CARDBORDER_FOIL",
")",
"."
] | python | train |
Cog-Creators/Red-Lavalink | lavalink/player_manager.py | https://github.com/Cog-Creators/Red-Lavalink/blob/5b3fc6eb31ee5db8bd2b633a523cf69749957111/lavalink/player_manager.py#L120-L132 | async def move_to(self, channel: discord.VoiceChannel):
"""
Moves this player to a voice channel.
Parameters
----------
channel : discord.VoiceChannel
"""
if channel.guild != self.channel.guild:
raise TypeError("Cannot move to a different guild.")
self.channel = channel
await self.connect() | [
"async",
"def",
"move_to",
"(",
"self",
",",
"channel",
":",
"discord",
".",
"VoiceChannel",
")",
":",
"if",
"channel",
".",
"guild",
"!=",
"self",
".",
"channel",
".",
"guild",
":",
"raise",
"TypeError",
"(",
"\"Cannot move to a different guild.\"",
")",
"self",
".",
"channel",
"=",
"channel",
"await",
"self",
".",
"connect",
"(",
")"
] | Moves this player to a voice channel.
Parameters
----------
channel : discord.VoiceChannel | [
"Moves",
"this",
"player",
"to",
"a",
"voice",
"channel",
"."
] | python | train |
scanny/python-pptx | pptx/shapes/connector.py | https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/shapes/connector.py#L88-L96 | def begin_y(self):
"""
Return the Y-position of the begin point of this connector, in
English Metric Units (as a |Length| object).
"""
cxnSp = self._element
y, cy, flipV = cxnSp.y, cxnSp.cy, cxnSp.flipV
begin_y = y+cy if flipV else y
return Emu(begin_y) | [
"def",
"begin_y",
"(",
"self",
")",
":",
"cxnSp",
"=",
"self",
".",
"_element",
"y",
",",
"cy",
",",
"flipV",
"=",
"cxnSp",
".",
"y",
",",
"cxnSp",
".",
"cy",
",",
"cxnSp",
".",
"flipV",
"begin_y",
"=",
"y",
"+",
"cy",
"if",
"flipV",
"else",
"y",
"return",
"Emu",
"(",
"begin_y",
")"
] | Return the Y-position of the begin point of this connector, in
English Metric Units (as a |Length| object). | [
"Return",
"the",
"Y",
"-",
"position",
"of",
"the",
"begin",
"point",
"of",
"this",
"connector",
"in",
"English",
"Metric",
"Units",
"(",
"as",
"a",
"|Length|",
"object",
")",
"."
] | python | train |
data-8/datascience | datascience/maps.py | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/maps.py#L89-L97 | def copy(self):
"""
Copies the current Map into a new one and returns it.
"""
m = Map(features=self._features, width=self._width,
height=self._height, **self._attrs)
m._folium_map = self._folium_map
return m | [
"def",
"copy",
"(",
"self",
")",
":",
"m",
"=",
"Map",
"(",
"features",
"=",
"self",
".",
"_features",
",",
"width",
"=",
"self",
".",
"_width",
",",
"height",
"=",
"self",
".",
"_height",
",",
"*",
"*",
"self",
".",
"_attrs",
")",
"m",
".",
"_folium_map",
"=",
"self",
".",
"_folium_map",
"return",
"m"
] | Copies the current Map into a new one and returns it. | [
"Copies",
"the",
"current",
"Map",
"into",
"a",
"new",
"one",
"and",
"returns",
"it",
"."
] | python | train |
pgmpy/pgmpy | pgmpy/readwrite/ProbModelXML.py | https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/readwrite/ProbModelXML.py#L160-L183 | def write_probmodelxml(model, path, encoding='utf-8', prettyprint=True):
"""
Write model in ProbModelXML format to path.
Parameters
----------
model : A NetworkX graph
Bayesian network or Markov network
path : file or string
File or filename to write.
Filenames ending in .gz or .bz2 will be compressed.
encoding : string (optional)
Encoding for text data.
prettyprint : bool (optional)
If True use line breaks and indenting in output XML.
Examples
--------
>>> G = nx.path_graph(4)
>>> pgmpy.readwrite.write_probmodelxml(G, "test.probmodelxml")
"""
writer = ProbModelXMLWriter(model, path, encoding=encoding,
prettyprint=prettyprint)
writer.dump(path) | [
"def",
"write_probmodelxml",
"(",
"model",
",",
"path",
",",
"encoding",
"=",
"'utf-8'",
",",
"prettyprint",
"=",
"True",
")",
":",
"writer",
"=",
"ProbModelXMLWriter",
"(",
"model",
",",
"path",
",",
"encoding",
"=",
"encoding",
",",
"prettyprint",
"=",
"prettyprint",
")",
"writer",
".",
"dump",
"(",
"path",
")"
] | Write model in ProbModelXML format to path.
Parameters
----------
model : A NetworkX graph
Bayesian network or Markov network
path : file or string
File or filename to write.
Filenames ending in .gz or .bz2 will be compressed.
encoding : string (optional)
Encoding for text data.
prettyprint : bool (optional)
If True use line breaks and indenting in output XML.
Examples
--------
>>> G = nx.path_graph(4)
>>> pgmpy.readwrite.write_probmodelxml(G, "test.probmodelxml") | [
"Write",
"model",
"in",
"ProbModelXML",
"format",
"to",
"path",
"."
] | python | train |
quantopian/zipline | zipline/data/resample.py | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/resample.py#L69-L100 | def minute_to_session(column, close_locs, data, out):
"""
Resample an array with minute data into an array with session data.
This function assumes that the minute data is the exact length of all
minutes in the sessions in the output.
Parameters
----------
column : str
The `open`, `high`, `low`, `close`, or `volume` column.
close_locs : array[intp]
The locations in `data` which are the market close minutes.
data : array[float64|uint32]
The minute data to be sampled into session data.
The first value should align with the market open of the first session,
containing values for all minutes for all sessions. With the last value
being the market close of the last session.
out : array[float64|uint32]
The output array into which to write the sampled sessions.
"""
if column == 'open':
_minute_to_session_open(close_locs, data, out)
elif column == 'high':
_minute_to_session_high(close_locs, data, out)
elif column == 'low':
_minute_to_session_low(close_locs, data, out)
elif column == 'close':
_minute_to_session_close(close_locs, data, out)
elif column == 'volume':
_minute_to_session_volume(close_locs, data, out)
return out | [
"def",
"minute_to_session",
"(",
"column",
",",
"close_locs",
",",
"data",
",",
"out",
")",
":",
"if",
"column",
"==",
"'open'",
":",
"_minute_to_session_open",
"(",
"close_locs",
",",
"data",
",",
"out",
")",
"elif",
"column",
"==",
"'high'",
":",
"_minute_to_session_high",
"(",
"close_locs",
",",
"data",
",",
"out",
")",
"elif",
"column",
"==",
"'low'",
":",
"_minute_to_session_low",
"(",
"close_locs",
",",
"data",
",",
"out",
")",
"elif",
"column",
"==",
"'close'",
":",
"_minute_to_session_close",
"(",
"close_locs",
",",
"data",
",",
"out",
")",
"elif",
"column",
"==",
"'volume'",
":",
"_minute_to_session_volume",
"(",
"close_locs",
",",
"data",
",",
"out",
")",
"return",
"out"
] | Resample an array with minute data into an array with session data.
This function assumes that the minute data is the exact length of all
minutes in the sessions in the output.
Parameters
----------
column : str
The `open`, `high`, `low`, `close`, or `volume` column.
close_locs : array[intp]
The locations in `data` which are the market close minutes.
data : array[float64|uint32]
The minute data to be sampled into session data.
The first value should align with the market open of the first session,
containing values for all minutes for all sessions. With the last value
being the market close of the last session.
out : array[float64|uint32]
The output array into which to write the sampled sessions. | [
"Resample",
"an",
"array",
"with",
"minute",
"data",
"into",
"an",
"array",
"with",
"session",
"data",
"."
] | python | train |
graphql-python/graphql-core-next | graphql/type/schema.py | https://github.com/graphql-python/graphql-core-next/blob/073dce3f002f897d40f9348ffd8f107815160540/graphql/type/schema.py#L264-L276 | def type_map_directive_reducer(
map_: TypeMap, directive: GraphQLDirective = None
) -> TypeMap:
"""Reducer function for creating the type map from given directives."""
# Directives are not validated until validate_schema() is called.
if not is_directive(directive):
return map_
directive = cast(GraphQLDirective, directive)
return reduce(
lambda prev_map, arg: type_map_reducer(prev_map, arg.type), # type: ignore
directive.args.values(),
map_,
) | [
"def",
"type_map_directive_reducer",
"(",
"map_",
":",
"TypeMap",
",",
"directive",
":",
"GraphQLDirective",
"=",
"None",
")",
"->",
"TypeMap",
":",
"# Directives are not validated until validate_schema() is called.",
"if",
"not",
"is_directive",
"(",
"directive",
")",
":",
"return",
"map_",
"directive",
"=",
"cast",
"(",
"GraphQLDirective",
",",
"directive",
")",
"return",
"reduce",
"(",
"lambda",
"prev_map",
",",
"arg",
":",
"type_map_reducer",
"(",
"prev_map",
",",
"arg",
".",
"type",
")",
",",
"# type: ignore",
"directive",
".",
"args",
".",
"values",
"(",
")",
",",
"map_",
",",
")"
] | Reducer function for creating the type map from given directives. | [
"Reducer",
"function",
"for",
"creating",
"the",
"type",
"map",
"from",
"given",
"directives",
"."
] | python | train |
brainiak/brainiak | brainiak/funcalign/srm.py | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/funcalign/srm.py#L235-L266 | def transform(self, X, y=None):
"""Use the model to transform matrix to Shared Response space
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject
note that number of voxels and samples can vary across subjects
y : not used (as it is unsupervised learning)
Returns
-------
s : list of 2D arrays, element i has shape=[features_i, samples_i]
Shared responses from input data (X)
"""
# Check if the model exist
if hasattr(self, 'w_') is False:
raise NotFittedError("The model fit has not been run yet.")
# Check the number of subjects
if len(X) != len(self.w_):
raise ValueError("The number of subjects does not match the one"
" in the model.")
s = [None] * len(X)
for subject in range(len(X)):
if X[subject] is not None:
s[subject] = self.w_[subject].T.dot(X[subject])
return s | [
"def",
"transform",
"(",
"self",
",",
"X",
",",
"y",
"=",
"None",
")",
":",
"# Check if the model exist",
"if",
"hasattr",
"(",
"self",
",",
"'w_'",
")",
"is",
"False",
":",
"raise",
"NotFittedError",
"(",
"\"The model fit has not been run yet.\"",
")",
"# Check the number of subjects",
"if",
"len",
"(",
"X",
")",
"!=",
"len",
"(",
"self",
".",
"w_",
")",
":",
"raise",
"ValueError",
"(",
"\"The number of subjects does not match the one\"",
"\" in the model.\"",
")",
"s",
"=",
"[",
"None",
"]",
"*",
"len",
"(",
"X",
")",
"for",
"subject",
"in",
"range",
"(",
"len",
"(",
"X",
")",
")",
":",
"if",
"X",
"[",
"subject",
"]",
"is",
"not",
"None",
":",
"s",
"[",
"subject",
"]",
"=",
"self",
".",
"w_",
"[",
"subject",
"]",
".",
"T",
".",
"dot",
"(",
"X",
"[",
"subject",
"]",
")",
"return",
"s"
] | Use the model to transform matrix to Shared Response space
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject
note that number of voxels and samples can vary across subjects
y : not used (as it is unsupervised learning)
Returns
-------
s : list of 2D arrays, element i has shape=[features_i, samples_i]
Shared responses from input data (X) | [
"Use",
"the",
"model",
"to",
"transform",
"matrix",
"to",
"Shared",
"Response",
"space"
] | python | train |
glue-viz/glue-vispy-viewers | glue_vispy_viewers/extern/vispy/geometry/calculations.py | https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/geometry/calculations.py#L16-L50 | def _fast_cross_3d(x, y):
"""Compute cross product between list of 3D vectors
Much faster than np.cross() when the number of cross products
becomes large (>500). This is because np.cross() methods become
less memory efficient at this stage.
Parameters
----------
x : array
Input array 1.
y : array
Input array 2.
Returns
-------
z : array
Cross product of x and y.
Notes
-----
x and y must both be 2D row vectors. One must have length 1, or both
lengths must match.
"""
assert x.ndim == 2
assert y.ndim == 2
assert x.shape[1] == 3
assert y.shape[1] == 3
assert (x.shape[0] == 1 or y.shape[0] == 1) or x.shape[0] == y.shape[0]
if max([x.shape[0], y.shape[0]]) >= 500:
return np.c_[x[:, 1] * y[:, 2] - x[:, 2] * y[:, 1],
x[:, 2] * y[:, 0] - x[:, 0] * y[:, 2],
x[:, 0] * y[:, 1] - x[:, 1] * y[:, 0]]
else:
return np.cross(x, y) | [
"def",
"_fast_cross_3d",
"(",
"x",
",",
"y",
")",
":",
"assert",
"x",
".",
"ndim",
"==",
"2",
"assert",
"y",
".",
"ndim",
"==",
"2",
"assert",
"x",
".",
"shape",
"[",
"1",
"]",
"==",
"3",
"assert",
"y",
".",
"shape",
"[",
"1",
"]",
"==",
"3",
"assert",
"(",
"x",
".",
"shape",
"[",
"0",
"]",
"==",
"1",
"or",
"y",
".",
"shape",
"[",
"0",
"]",
"==",
"1",
")",
"or",
"x",
".",
"shape",
"[",
"0",
"]",
"==",
"y",
".",
"shape",
"[",
"0",
"]",
"if",
"max",
"(",
"[",
"x",
".",
"shape",
"[",
"0",
"]",
",",
"y",
".",
"shape",
"[",
"0",
"]",
"]",
")",
">=",
"500",
":",
"return",
"np",
".",
"c_",
"[",
"x",
"[",
":",
",",
"1",
"]",
"*",
"y",
"[",
":",
",",
"2",
"]",
"-",
"x",
"[",
":",
",",
"2",
"]",
"*",
"y",
"[",
":",
",",
"1",
"]",
",",
"x",
"[",
":",
",",
"2",
"]",
"*",
"y",
"[",
":",
",",
"0",
"]",
"-",
"x",
"[",
":",
",",
"0",
"]",
"*",
"y",
"[",
":",
",",
"2",
"]",
",",
"x",
"[",
":",
",",
"0",
"]",
"*",
"y",
"[",
":",
",",
"1",
"]",
"-",
"x",
"[",
":",
",",
"1",
"]",
"*",
"y",
"[",
":",
",",
"0",
"]",
"]",
"else",
":",
"return",
"np",
".",
"cross",
"(",
"x",
",",
"y",
")"
] | Compute cross product between list of 3D vectors
Much faster than np.cross() when the number of cross products
becomes large (>500). This is because np.cross() methods become
less memory efficient at this stage.
Parameters
----------
x : array
Input array 1.
y : array
Input array 2.
Returns
-------
z : array
Cross product of x and y.
Notes
-----
x and y must both be 2D row vectors. One must have length 1, or both
lengths must match. | [
"Compute",
"cross",
"product",
"between",
"list",
"of",
"3D",
"vectors"
] | python | train |
xtuml/pyxtuml | bridgepoint/ooaofooa.py | https://github.com/xtuml/pyxtuml/blob/7dd9343b9a0191d1db1887ab9288d0a026608d9a/bridgepoint/ooaofooa.py#L567-L573 | def load_component(resource, name=None, load_globals=True):
'''
Load and return a model from a *resource*. The resource may be either a
filename, a path, or a list of filenames and/or paths.
'''
loader = _mk_loader(resource, load_globals)
return loader.build_component() | [
"def",
"load_component",
"(",
"resource",
",",
"name",
"=",
"None",
",",
"load_globals",
"=",
"True",
")",
":",
"loader",
"=",
"_mk_loader",
"(",
"resource",
",",
"load_globals",
")",
"return",
"loader",
".",
"build_component",
"(",
")"
] | Load and return a model from a *resource*. The resource may be either a
filename, a path, or a list of filenames and/or paths. | [
"Load",
"and",
"return",
"a",
"model",
"from",
"a",
"*",
"resource",
"*",
".",
"The",
"resource",
"may",
"be",
"either",
"a",
"filename",
"a",
"path",
"or",
"a",
"list",
"of",
"filenames",
"and",
"/",
"or",
"paths",
"."
] | python | test |
veltzer/pypitools | pypitools/scripts/upload.py | https://github.com/veltzer/pypitools/blob/5f097be21e9bc65578eed5b6b7855c1945540701/pypitools/scripts/upload.py#L32-L42 | def main():
"""
upload a package to pypi or gemfury
:return:
"""
setup_main()
config = ConfigData(clean=True)
try:
config.upload()
finally:
config.clean_after_if_needed() | [
"def",
"main",
"(",
")",
":",
"setup_main",
"(",
")",
"config",
"=",
"ConfigData",
"(",
"clean",
"=",
"True",
")",
"try",
":",
"config",
".",
"upload",
"(",
")",
"finally",
":",
"config",
".",
"clean_after_if_needed",
"(",
")"
] | upload a package to pypi or gemfury
:return: | [
"upload",
"a",
"package",
"to",
"pypi",
"or",
"gemfury",
":",
"return",
":"
] | python | train |
ARMmbed/mbed-cloud-sdk-python | src/mbed_cloud/_backends/iam/apis/aggregator_account_admin_api.py | https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/_backends/iam/apis/aggregator_account_admin_api.py#L488-L509 | def check_account_api_key(self, account_id, api_key, **kwargs): # noqa: E501
"""Check the API key. # noqa: E501
An endpoint for checking API key. **Example usage:** `curl -X POST https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/api-keys/{apiKey} -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.check_account_api_key(account_id, api_key, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str account_id: Account ID. (required)
:param str api_key: The API key to be checked. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.check_account_api_key_with_http_info(account_id, api_key, **kwargs) # noqa: E501
else:
(data) = self.check_account_api_key_with_http_info(account_id, api_key, **kwargs) # noqa: E501
return data | [
"def",
"check_account_api_key",
"(",
"self",
",",
"account_id",
",",
"api_key",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'asynchronous'",
")",
":",
"return",
"self",
".",
"check_account_api_key_with_http_info",
"(",
"account_id",
",",
"api_key",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"check_account_api_key_with_http_info",
"(",
"account_id",
",",
"api_key",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"return",
"data"
] | Check the API key. # noqa: E501
An endpoint for checking API key. **Example usage:** `curl -X POST https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/api-keys/{apiKey} -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.check_account_api_key(account_id, api_key, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str account_id: Account ID. (required)
:param str api_key: The API key to be checked. (required)
:return: None
If the method is called asynchronously,
returns the request thread. | [
"Check",
"the",
"API",
"key",
".",
"#",
"noqa",
":",
"E501"
] | python | train |
pecan/pecan | pecan/core.py | https://github.com/pecan/pecan/blob/833d0653fa0e6bbfb52545b091c30182105f4a82/pecan/core.py#L427-L552 | def find_controller(self, state):
'''
The main request handler for Pecan applications.
'''
# get a sorted list of hooks, by priority (no controller hooks yet)
req = state.request
pecan_state = req.pecan
# store the routing path for the current application to allow hooks to
# modify it
pecan_state['routing_path'] = path = req.path_info
# handle "on_route" hooks
self.handle_hooks(self.hooks, 'on_route', state)
# lookup the controller, respecting content-type as requested
# by the file extension on the URI
pecan_state['extension'] = None
# attempt to guess the content type based on the file extension
if self.guess_content_type_from_ext \
and not pecan_state['content_type'] \
and '.' in path:
_, extension = splitext(path.rstrip('/'))
# preface with a letter to ensure compat for 2.5
potential_type = guess_type('x' + extension)[0]
if extension and potential_type is not None:
path = ''.join(path.rsplit(extension, 1))
pecan_state['extension'] = extension
pecan_state['content_type'] = potential_type
controller, remainder = self.route(req, self.root, path)
cfg = _cfg(controller)
if cfg.get('generic_handler'):
raise exc.HTTPNotFound
# handle generic controllers
im_self = None
if cfg.get('generic'):
im_self = six.get_method_self(controller)
handlers = cfg['generic_handlers']
controller = handlers.get(req.method, handlers['DEFAULT'])
handle_security(controller, im_self)
cfg = _cfg(controller)
# add the controller to the state so that hooks can use it
state.controller = controller
# if unsure ask the controller for the default content type
content_types = cfg.get('content_types', {})
if not pecan_state['content_type']:
# attempt to find a best match based on accept headers (if they
# exist)
accept = getattr(req.accept, 'header_value', '*/*') or '*/*'
if accept == '*/*' or (
accept.startswith('text/html,') and
list(content_types.keys()) in self.SIMPLEST_CONTENT_TYPES):
pecan_state['content_type'] = cfg.get(
'content_type',
'text/html'
)
else:
best_default = acceptparse.MIMEAccept(
accept
).best_match(
content_types.keys()
)
if best_default is None:
msg = "Controller '%s' defined does not support " + \
"content_type '%s'. Supported type(s): %s"
logger.error(
msg % (
controller.__name__,
pecan_state['content_type'],
content_types.keys()
)
)
raise exc.HTTPNotAcceptable()
pecan_state['content_type'] = best_default
elif cfg.get('content_type') is not None and \
pecan_state['content_type'] not in content_types:
msg = "Controller '%s' defined does not support content_type " + \
"'%s'. Supported type(s): %s"
logger.error(
msg % (
controller.__name__,
pecan_state['content_type'],
content_types.keys()
)
)
raise exc.HTTPNotFound
# fetch any parameters
if req.method == 'GET':
params = req.GET
elif req.content_type in ('application/json',
'application/javascript'):
try:
if not isinstance(req.json, dict):
raise TypeError('%s is not a dict' % req.json)
params = NestedMultiDict(req.GET, req.json)
except (TypeError, ValueError):
params = req.params
else:
params = req.params
# fetch the arguments for the controller
args, varargs, kwargs = self.get_args(
state,
params.mixed(),
remainder,
cfg['argspec'],
im_self
)
state.arguments = Arguments(args, varargs, kwargs)
# handle "before" hooks
self.handle_hooks(self.determine_hooks(controller), 'before', state)
return controller, args + varargs, kwargs | [
"def",
"find_controller",
"(",
"self",
",",
"state",
")",
":",
"# get a sorted list of hooks, by priority (no controller hooks yet)",
"req",
"=",
"state",
".",
"request",
"pecan_state",
"=",
"req",
".",
"pecan",
"# store the routing path for the current application to allow hooks to",
"# modify it",
"pecan_state",
"[",
"'routing_path'",
"]",
"=",
"path",
"=",
"req",
".",
"path_info",
"# handle \"on_route\" hooks",
"self",
".",
"handle_hooks",
"(",
"self",
".",
"hooks",
",",
"'on_route'",
",",
"state",
")",
"# lookup the controller, respecting content-type as requested",
"# by the file extension on the URI",
"pecan_state",
"[",
"'extension'",
"]",
"=",
"None",
"# attempt to guess the content type based on the file extension",
"if",
"self",
".",
"guess_content_type_from_ext",
"and",
"not",
"pecan_state",
"[",
"'content_type'",
"]",
"and",
"'.'",
"in",
"path",
":",
"_",
",",
"extension",
"=",
"splitext",
"(",
"path",
".",
"rstrip",
"(",
"'/'",
")",
")",
"# preface with a letter to ensure compat for 2.5",
"potential_type",
"=",
"guess_type",
"(",
"'x'",
"+",
"extension",
")",
"[",
"0",
"]",
"if",
"extension",
"and",
"potential_type",
"is",
"not",
"None",
":",
"path",
"=",
"''",
".",
"join",
"(",
"path",
".",
"rsplit",
"(",
"extension",
",",
"1",
")",
")",
"pecan_state",
"[",
"'extension'",
"]",
"=",
"extension",
"pecan_state",
"[",
"'content_type'",
"]",
"=",
"potential_type",
"controller",
",",
"remainder",
"=",
"self",
".",
"route",
"(",
"req",
",",
"self",
".",
"root",
",",
"path",
")",
"cfg",
"=",
"_cfg",
"(",
"controller",
")",
"if",
"cfg",
".",
"get",
"(",
"'generic_handler'",
")",
":",
"raise",
"exc",
".",
"HTTPNotFound",
"# handle generic controllers",
"im_self",
"=",
"None",
"if",
"cfg",
".",
"get",
"(",
"'generic'",
")",
":",
"im_self",
"=",
"six",
".",
"get_method_self",
"(",
"controller",
")",
"handlers",
"=",
"cfg",
"[",
"'generic_handlers'",
"]",
"controller",
"=",
"handlers",
".",
"get",
"(",
"req",
".",
"method",
",",
"handlers",
"[",
"'DEFAULT'",
"]",
")",
"handle_security",
"(",
"controller",
",",
"im_self",
")",
"cfg",
"=",
"_cfg",
"(",
"controller",
")",
"# add the controller to the state so that hooks can use it",
"state",
".",
"controller",
"=",
"controller",
"# if unsure ask the controller for the default content type",
"content_types",
"=",
"cfg",
".",
"get",
"(",
"'content_types'",
",",
"{",
"}",
")",
"if",
"not",
"pecan_state",
"[",
"'content_type'",
"]",
":",
"# attempt to find a best match based on accept headers (if they",
"# exist)",
"accept",
"=",
"getattr",
"(",
"req",
".",
"accept",
",",
"'header_value'",
",",
"'*/*'",
")",
"or",
"'*/*'",
"if",
"accept",
"==",
"'*/*'",
"or",
"(",
"accept",
".",
"startswith",
"(",
"'text/html,'",
")",
"and",
"list",
"(",
"content_types",
".",
"keys",
"(",
")",
")",
"in",
"self",
".",
"SIMPLEST_CONTENT_TYPES",
")",
":",
"pecan_state",
"[",
"'content_type'",
"]",
"=",
"cfg",
".",
"get",
"(",
"'content_type'",
",",
"'text/html'",
")",
"else",
":",
"best_default",
"=",
"acceptparse",
".",
"MIMEAccept",
"(",
"accept",
")",
".",
"best_match",
"(",
"content_types",
".",
"keys",
"(",
")",
")",
"if",
"best_default",
"is",
"None",
":",
"msg",
"=",
"\"Controller '%s' defined does not support \"",
"+",
"\"content_type '%s'. Supported type(s): %s\"",
"logger",
".",
"error",
"(",
"msg",
"%",
"(",
"controller",
".",
"__name__",
",",
"pecan_state",
"[",
"'content_type'",
"]",
",",
"content_types",
".",
"keys",
"(",
")",
")",
")",
"raise",
"exc",
".",
"HTTPNotAcceptable",
"(",
")",
"pecan_state",
"[",
"'content_type'",
"]",
"=",
"best_default",
"elif",
"cfg",
".",
"get",
"(",
"'content_type'",
")",
"is",
"not",
"None",
"and",
"pecan_state",
"[",
"'content_type'",
"]",
"not",
"in",
"content_types",
":",
"msg",
"=",
"\"Controller '%s' defined does not support content_type \"",
"+",
"\"'%s'. Supported type(s): %s\"",
"logger",
".",
"error",
"(",
"msg",
"%",
"(",
"controller",
".",
"__name__",
",",
"pecan_state",
"[",
"'content_type'",
"]",
",",
"content_types",
".",
"keys",
"(",
")",
")",
")",
"raise",
"exc",
".",
"HTTPNotFound",
"# fetch any parameters",
"if",
"req",
".",
"method",
"==",
"'GET'",
":",
"params",
"=",
"req",
".",
"GET",
"elif",
"req",
".",
"content_type",
"in",
"(",
"'application/json'",
",",
"'application/javascript'",
")",
":",
"try",
":",
"if",
"not",
"isinstance",
"(",
"req",
".",
"json",
",",
"dict",
")",
":",
"raise",
"TypeError",
"(",
"'%s is not a dict'",
"%",
"req",
".",
"json",
")",
"params",
"=",
"NestedMultiDict",
"(",
"req",
".",
"GET",
",",
"req",
".",
"json",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"params",
"=",
"req",
".",
"params",
"else",
":",
"params",
"=",
"req",
".",
"params",
"# fetch the arguments for the controller",
"args",
",",
"varargs",
",",
"kwargs",
"=",
"self",
".",
"get_args",
"(",
"state",
",",
"params",
".",
"mixed",
"(",
")",
",",
"remainder",
",",
"cfg",
"[",
"'argspec'",
"]",
",",
"im_self",
")",
"state",
".",
"arguments",
"=",
"Arguments",
"(",
"args",
",",
"varargs",
",",
"kwargs",
")",
"# handle \"before\" hooks",
"self",
".",
"handle_hooks",
"(",
"self",
".",
"determine_hooks",
"(",
"controller",
")",
",",
"'before'",
",",
"state",
")",
"return",
"controller",
",",
"args",
"+",
"varargs",
",",
"kwargs"
] | The main request handler for Pecan applications. | [
"The",
"main",
"request",
"handler",
"for",
"Pecan",
"applications",
"."
] | python | train |
ajk8/hatchery | hatchery/executor.py | https://github.com/ajk8/hatchery/blob/e068c9f5366d2c98225babb03d4cde36c710194f/hatchery/executor.py#L66-L85 | def call(cmd_args, suppress_output=False):
""" Call an arbitary command and return the exit value, stdout, and stderr as a tuple
Command can be passed in as either a string or iterable
>>> result = call('hatchery', suppress_output=True)
>>> result.exitval
0
>>> result = call(['hatchery', 'notreal'])
>>> result.exitval
1
"""
if not funcy.is_list(cmd_args) and not funcy.is_tuple(cmd_args):
cmd_args = shlex.split(cmd_args)
logger.info('executing `{}`'.format(' '.join(cmd_args)))
call_request = CallRequest(cmd_args, suppress_output=suppress_output)
call_result = call_request.run()
if call_result.exitval:
logger.error('`{}` returned error code {}'.format(' '.join(cmd_args), call_result.exitval))
return call_result | [
"def",
"call",
"(",
"cmd_args",
",",
"suppress_output",
"=",
"False",
")",
":",
"if",
"not",
"funcy",
".",
"is_list",
"(",
"cmd_args",
")",
"and",
"not",
"funcy",
".",
"is_tuple",
"(",
"cmd_args",
")",
":",
"cmd_args",
"=",
"shlex",
".",
"split",
"(",
"cmd_args",
")",
"logger",
".",
"info",
"(",
"'executing `{}`'",
".",
"format",
"(",
"' '",
".",
"join",
"(",
"cmd_args",
")",
")",
")",
"call_request",
"=",
"CallRequest",
"(",
"cmd_args",
",",
"suppress_output",
"=",
"suppress_output",
")",
"call_result",
"=",
"call_request",
".",
"run",
"(",
")",
"if",
"call_result",
".",
"exitval",
":",
"logger",
".",
"error",
"(",
"'`{}` returned error code {}'",
".",
"format",
"(",
"' '",
".",
"join",
"(",
"cmd_args",
")",
",",
"call_result",
".",
"exitval",
")",
")",
"return",
"call_result"
] | Call an arbitary command and return the exit value, stdout, and stderr as a tuple
Command can be passed in as either a string or iterable
>>> result = call('hatchery', suppress_output=True)
>>> result.exitval
0
>>> result = call(['hatchery', 'notreal'])
>>> result.exitval
1 | [
"Call",
"an",
"arbitary",
"command",
"and",
"return",
"the",
"exit",
"value",
"stdout",
"and",
"stderr",
"as",
"a",
"tuple"
] | python | train |
saltstack/salt | salt/modules/nftables.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/nftables.py#L370-L426 | def get_rule_handle(table='filter', chain=None, rule=None, family='ipv4'):
'''
Get the handle for a particular rule
This function accepts a rule in a standard nftables command format,
starting with the chain. Trying to force users to adapt to a new
method of creating rules would be irritating at best, and we
already have a parser that can handle it.
CLI Example:
.. code-block:: bash
salt '*' nftables.get_rule_handle filter input \\
rule='tcp dport 22 log accept'
IPv6:
salt '*' nftables.get_rule_handle filter input \\
rule='tcp dport 22 log accept' \\
family=ipv6
'''
ret = {'comment': '',
'result': False}
if not chain:
ret['comment'] = 'Chain needs to be specified'
return ret
if not rule:
ret['comment'] = 'Rule needs to be specified'
return ret
res = check_table(table, family=family)
if not res['result']:
return res
res = check_chain(table, chain, family=family)
if not res['result']:
return res
res = check(table, chain, rule, family=family)
if not res['result']:
return res
nft_family = _NFTABLES_FAMILIES[family]
cmd = '{0} --numeric --numeric --numeric --handle list chain {1} {2} {3}'.\
format(_nftables_cmd(), nft_family, table, chain)
out = __salt__['cmd.run'](cmd, python_shell=False)
rules = re.split('\n+', out)
pat = re.compile(r'{0} # handle (?P<handle>\d+)'.format(rule))
for r in rules:
match = pat.search(r)
if match:
return {'result': True, 'handle': match.group('handle')}
return {'result': False,
'comment': 'Could not find rule {0}'.format(rule)} | [
"def",
"get_rule_handle",
"(",
"table",
"=",
"'filter'",
",",
"chain",
"=",
"None",
",",
"rule",
"=",
"None",
",",
"family",
"=",
"'ipv4'",
")",
":",
"ret",
"=",
"{",
"'comment'",
":",
"''",
",",
"'result'",
":",
"False",
"}",
"if",
"not",
"chain",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Chain needs to be specified'",
"return",
"ret",
"if",
"not",
"rule",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Rule needs to be specified'",
"return",
"ret",
"res",
"=",
"check_table",
"(",
"table",
",",
"family",
"=",
"family",
")",
"if",
"not",
"res",
"[",
"'result'",
"]",
":",
"return",
"res",
"res",
"=",
"check_chain",
"(",
"table",
",",
"chain",
",",
"family",
"=",
"family",
")",
"if",
"not",
"res",
"[",
"'result'",
"]",
":",
"return",
"res",
"res",
"=",
"check",
"(",
"table",
",",
"chain",
",",
"rule",
",",
"family",
"=",
"family",
")",
"if",
"not",
"res",
"[",
"'result'",
"]",
":",
"return",
"res",
"nft_family",
"=",
"_NFTABLES_FAMILIES",
"[",
"family",
"]",
"cmd",
"=",
"'{0} --numeric --numeric --numeric --handle list chain {1} {2} {3}'",
".",
"format",
"(",
"_nftables_cmd",
"(",
")",
",",
"nft_family",
",",
"table",
",",
"chain",
")",
"out",
"=",
"__salt__",
"[",
"'cmd.run'",
"]",
"(",
"cmd",
",",
"python_shell",
"=",
"False",
")",
"rules",
"=",
"re",
".",
"split",
"(",
"'\\n+'",
",",
"out",
")",
"pat",
"=",
"re",
".",
"compile",
"(",
"r'{0} # handle (?P<handle>\\d+)'",
".",
"format",
"(",
"rule",
")",
")",
"for",
"r",
"in",
"rules",
":",
"match",
"=",
"pat",
".",
"search",
"(",
"r",
")",
"if",
"match",
":",
"return",
"{",
"'result'",
":",
"True",
",",
"'handle'",
":",
"match",
".",
"group",
"(",
"'handle'",
")",
"}",
"return",
"{",
"'result'",
":",
"False",
",",
"'comment'",
":",
"'Could not find rule {0}'",
".",
"format",
"(",
"rule",
")",
"}"
] | Get the handle for a particular rule
This function accepts a rule in a standard nftables command format,
starting with the chain. Trying to force users to adapt to a new
method of creating rules would be irritating at best, and we
already have a parser that can handle it.
CLI Example:
.. code-block:: bash
salt '*' nftables.get_rule_handle filter input \\
rule='tcp dport 22 log accept'
IPv6:
salt '*' nftables.get_rule_handle filter input \\
rule='tcp dport 22 log accept' \\
family=ipv6 | [
"Get",
"the",
"handle",
"for",
"a",
"particular",
"rule"
] | python | train |
evocell/rabifier | rabifier/rabmyfire.py | https://github.com/evocell/rabifier/blob/a5be3d516517e555bde463b94f06aeed106d19b8/rabifier/rabmyfire.py#L73-L88 | def has_rabf_motif(self):
"""Checks if the sequence has enough RabF motifs within the G domain
If there exists more than one G domain in the sequence enough RabF motifs is required in at least one
of those domains to classify the sequence as a Rab.
"""
if self.rabf_motifs:
for gdomain in self.gdomain_regions:
beg, end = map(int, gdomain.split('-'))
motifs = [x for x in self.rabf_motifs if x[1] >= beg and x[2] <= end]
if motifs:
matches = int(pairwise2.align.globalxx('12345', ''.join(str(x[0]) for x in motifs))[0][2])
if matches >= self.motif_number:
return True
return False | [
"def",
"has_rabf_motif",
"(",
"self",
")",
":",
"if",
"self",
".",
"rabf_motifs",
":",
"for",
"gdomain",
"in",
"self",
".",
"gdomain_regions",
":",
"beg",
",",
"end",
"=",
"map",
"(",
"int",
",",
"gdomain",
".",
"split",
"(",
"'-'",
")",
")",
"motifs",
"=",
"[",
"x",
"for",
"x",
"in",
"self",
".",
"rabf_motifs",
"if",
"x",
"[",
"1",
"]",
">=",
"beg",
"and",
"x",
"[",
"2",
"]",
"<=",
"end",
"]",
"if",
"motifs",
":",
"matches",
"=",
"int",
"(",
"pairwise2",
".",
"align",
".",
"globalxx",
"(",
"'12345'",
",",
"''",
".",
"join",
"(",
"str",
"(",
"x",
"[",
"0",
"]",
")",
"for",
"x",
"in",
"motifs",
")",
")",
"[",
"0",
"]",
"[",
"2",
"]",
")",
"if",
"matches",
">=",
"self",
".",
"motif_number",
":",
"return",
"True",
"return",
"False"
] | Checks if the sequence has enough RabF motifs within the G domain
If there exists more than one G domain in the sequence enough RabF motifs is required in at least one
of those domains to classify the sequence as a Rab. | [
"Checks",
"if",
"the",
"sequence",
"has",
"enough",
"RabF",
"motifs",
"within",
"the",
"G",
"domain"
] | python | train |
pandas-dev/pandas | pandas/core/arrays/categorical.py | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/categorical.py#L942-L983 | def reorder_categories(self, new_categories, ordered=None, inplace=False):
"""
Reorder categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : bool, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : bool, default False
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
See Also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self.dtype.categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace) | [
"def",
"reorder_categories",
"(",
"self",
",",
"new_categories",
",",
"ordered",
"=",
"None",
",",
"inplace",
"=",
"False",
")",
":",
"inplace",
"=",
"validate_bool_kwarg",
"(",
"inplace",
",",
"'inplace'",
")",
"if",
"set",
"(",
"self",
".",
"dtype",
".",
"categories",
")",
"!=",
"set",
"(",
"new_categories",
")",
":",
"raise",
"ValueError",
"(",
"\"items in new_categories are not the same as in \"",
"\"old categories\"",
")",
"return",
"self",
".",
"set_categories",
"(",
"new_categories",
",",
"ordered",
"=",
"ordered",
",",
"inplace",
"=",
"inplace",
")"
] | Reorder categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : bool, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : bool, default False
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
See Also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories | [
"Reorder",
"categories",
"as",
"specified",
"in",
"new_categories",
"."
] | python | train |
crytic/slither | slither/detectors/reentrancy/reentrancy.py | https://github.com/crytic/slither/blob/04c147f7e50223c6af458ca430befae747ccd259/slither/detectors/reentrancy/reentrancy.py#L95-L179 | def _explore(self, node, visited, skip_father=None):
"""
Explore the CFG and look for re-entrancy
Heuristic: There is a re-entrancy if a state variable is written
after an external call
node.context will contains the external calls executed
It contains the calls executed in father nodes
if node.context is not empty, and variables are written, a re-entrancy is possible
"""
if node in visited:
return
visited = visited + [node]
# First we add the external calls executed in previous nodes
# send_eth returns the list of calls sending value
# calls returns the list of calls that can callback
# read returns the variable read
# read_prior_calls returns the variable read prior a call
fathers_context = {'send_eth':set(), 'calls':set(), 'read':set(), 'read_prior_calls':{}}
for father in node.fathers:
if self.KEY in father.context:
fathers_context['send_eth'] |= set([s for s in father.context[self.KEY]['send_eth'] if s!=skip_father])
fathers_context['calls'] |= set([c for c in father.context[self.KEY]['calls'] if c!=skip_father])
fathers_context['read'] |= set(father.context[self.KEY]['read'])
fathers_context['read_prior_calls'] = union_dict(fathers_context['read_prior_calls'], father.context[self.KEY]['read_prior_calls'])
# Exclude path that dont bring further information
if node in self.visited_all_paths:
if all(call in self.visited_all_paths[node]['calls'] for call in fathers_context['calls']):
if all(send in self.visited_all_paths[node]['send_eth'] for send in fathers_context['send_eth']):
if all(read in self.visited_all_paths[node]['read'] for read in fathers_context['read']):
if dict_are_equal(self.visited_all_paths[node]['read_prior_calls'], fathers_context['read_prior_calls']):
return
else:
self.visited_all_paths[node] = {'send_eth':set(), 'calls':set(), 'read':set(), 'read_prior_calls':{}}
self.visited_all_paths[node]['send_eth'] = set(self.visited_all_paths[node]['send_eth'] | fathers_context['send_eth'])
self.visited_all_paths[node]['calls'] = set(self.visited_all_paths[node]['calls'] | fathers_context['calls'])
self.visited_all_paths[node]['read'] = set(self.visited_all_paths[node]['read'] | fathers_context['read'])
self.visited_all_paths[node]['read_prior_calls'] = union_dict(self.visited_all_paths[node]['read_prior_calls'], fathers_context['read_prior_calls'])
node.context[self.KEY] = fathers_context
state_vars_read = set(node.state_variables_read)
# All the state variables written
state_vars_written = set(node.state_variables_written)
slithir_operations = []
# Add the state variables written in internal calls
for internal_call in node.internal_calls:
# Filter to Function, as internal_call can be a solidity call
if isinstance(internal_call, Function):
state_vars_written |= set(internal_call.all_state_variables_written())
state_vars_read |= set(internal_call.all_state_variables_read())
slithir_operations += internal_call.all_slithir_operations()
contains_call = False
node.context[self.KEY]['written'] = set(state_vars_written)
if self._can_callback(node.irs + slithir_operations):
node.context[self.KEY]['calls'] = set(node.context[self.KEY]['calls'] | {node})
node.context[self.KEY]['read_prior_calls'][node] = set(node.context[self.KEY]['read_prior_calls'].get(node, set()) | node.context[self.KEY]['read'] |state_vars_read)
contains_call = True
if self._can_send_eth(node.irs + slithir_operations):
node.context[self.KEY]['send_eth'] = set(node.context[self.KEY]['send_eth'] | {node})
node.context[self.KEY]['read'] = set(node.context[self.KEY]['read'] | state_vars_read)
sons = node.sons
if contains_call and node.type in [NodeType.IF, NodeType.IFLOOP]:
if self._filter_if(node):
son = sons[0]
self._explore(son, visited, node)
sons = sons[1:]
else:
son = sons[1]
self._explore(son, visited, node)
sons = [sons[0]]
for son in sons:
self._explore(son, visited) | [
"def",
"_explore",
"(",
"self",
",",
"node",
",",
"visited",
",",
"skip_father",
"=",
"None",
")",
":",
"if",
"node",
"in",
"visited",
":",
"return",
"visited",
"=",
"visited",
"+",
"[",
"node",
"]",
"# First we add the external calls executed in previous nodes",
"# send_eth returns the list of calls sending value",
"# calls returns the list of calls that can callback",
"# read returns the variable read",
"# read_prior_calls returns the variable read prior a call",
"fathers_context",
"=",
"{",
"'send_eth'",
":",
"set",
"(",
")",
",",
"'calls'",
":",
"set",
"(",
")",
",",
"'read'",
":",
"set",
"(",
")",
",",
"'read_prior_calls'",
":",
"{",
"}",
"}",
"for",
"father",
"in",
"node",
".",
"fathers",
":",
"if",
"self",
".",
"KEY",
"in",
"father",
".",
"context",
":",
"fathers_context",
"[",
"'send_eth'",
"]",
"|=",
"set",
"(",
"[",
"s",
"for",
"s",
"in",
"father",
".",
"context",
"[",
"self",
".",
"KEY",
"]",
"[",
"'send_eth'",
"]",
"if",
"s",
"!=",
"skip_father",
"]",
")",
"fathers_context",
"[",
"'calls'",
"]",
"|=",
"set",
"(",
"[",
"c",
"for",
"c",
"in",
"father",
".",
"context",
"[",
"self",
".",
"KEY",
"]",
"[",
"'calls'",
"]",
"if",
"c",
"!=",
"skip_father",
"]",
")",
"fathers_context",
"[",
"'read'",
"]",
"|=",
"set",
"(",
"father",
".",
"context",
"[",
"self",
".",
"KEY",
"]",
"[",
"'read'",
"]",
")",
"fathers_context",
"[",
"'read_prior_calls'",
"]",
"=",
"union_dict",
"(",
"fathers_context",
"[",
"'read_prior_calls'",
"]",
",",
"father",
".",
"context",
"[",
"self",
".",
"KEY",
"]",
"[",
"'read_prior_calls'",
"]",
")",
"# Exclude path that dont bring further information",
"if",
"node",
"in",
"self",
".",
"visited_all_paths",
":",
"if",
"all",
"(",
"call",
"in",
"self",
".",
"visited_all_paths",
"[",
"node",
"]",
"[",
"'calls'",
"]",
"for",
"call",
"in",
"fathers_context",
"[",
"'calls'",
"]",
")",
":",
"if",
"all",
"(",
"send",
"in",
"self",
".",
"visited_all_paths",
"[",
"node",
"]",
"[",
"'send_eth'",
"]",
"for",
"send",
"in",
"fathers_context",
"[",
"'send_eth'",
"]",
")",
":",
"if",
"all",
"(",
"read",
"in",
"self",
".",
"visited_all_paths",
"[",
"node",
"]",
"[",
"'read'",
"]",
"for",
"read",
"in",
"fathers_context",
"[",
"'read'",
"]",
")",
":",
"if",
"dict_are_equal",
"(",
"self",
".",
"visited_all_paths",
"[",
"node",
"]",
"[",
"'read_prior_calls'",
"]",
",",
"fathers_context",
"[",
"'read_prior_calls'",
"]",
")",
":",
"return",
"else",
":",
"self",
".",
"visited_all_paths",
"[",
"node",
"]",
"=",
"{",
"'send_eth'",
":",
"set",
"(",
")",
",",
"'calls'",
":",
"set",
"(",
")",
",",
"'read'",
":",
"set",
"(",
")",
",",
"'read_prior_calls'",
":",
"{",
"}",
"}",
"self",
".",
"visited_all_paths",
"[",
"node",
"]",
"[",
"'send_eth'",
"]",
"=",
"set",
"(",
"self",
".",
"visited_all_paths",
"[",
"node",
"]",
"[",
"'send_eth'",
"]",
"|",
"fathers_context",
"[",
"'send_eth'",
"]",
")",
"self",
".",
"visited_all_paths",
"[",
"node",
"]",
"[",
"'calls'",
"]",
"=",
"set",
"(",
"self",
".",
"visited_all_paths",
"[",
"node",
"]",
"[",
"'calls'",
"]",
"|",
"fathers_context",
"[",
"'calls'",
"]",
")",
"self",
".",
"visited_all_paths",
"[",
"node",
"]",
"[",
"'read'",
"]",
"=",
"set",
"(",
"self",
".",
"visited_all_paths",
"[",
"node",
"]",
"[",
"'read'",
"]",
"|",
"fathers_context",
"[",
"'read'",
"]",
")",
"self",
".",
"visited_all_paths",
"[",
"node",
"]",
"[",
"'read_prior_calls'",
"]",
"=",
"union_dict",
"(",
"self",
".",
"visited_all_paths",
"[",
"node",
"]",
"[",
"'read_prior_calls'",
"]",
",",
"fathers_context",
"[",
"'read_prior_calls'",
"]",
")",
"node",
".",
"context",
"[",
"self",
".",
"KEY",
"]",
"=",
"fathers_context",
"state_vars_read",
"=",
"set",
"(",
"node",
".",
"state_variables_read",
")",
"# All the state variables written",
"state_vars_written",
"=",
"set",
"(",
"node",
".",
"state_variables_written",
")",
"slithir_operations",
"=",
"[",
"]",
"# Add the state variables written in internal calls",
"for",
"internal_call",
"in",
"node",
".",
"internal_calls",
":",
"# Filter to Function, as internal_call can be a solidity call",
"if",
"isinstance",
"(",
"internal_call",
",",
"Function",
")",
":",
"state_vars_written",
"|=",
"set",
"(",
"internal_call",
".",
"all_state_variables_written",
"(",
")",
")",
"state_vars_read",
"|=",
"set",
"(",
"internal_call",
".",
"all_state_variables_read",
"(",
")",
")",
"slithir_operations",
"+=",
"internal_call",
".",
"all_slithir_operations",
"(",
")",
"contains_call",
"=",
"False",
"node",
".",
"context",
"[",
"self",
".",
"KEY",
"]",
"[",
"'written'",
"]",
"=",
"set",
"(",
"state_vars_written",
")",
"if",
"self",
".",
"_can_callback",
"(",
"node",
".",
"irs",
"+",
"slithir_operations",
")",
":",
"node",
".",
"context",
"[",
"self",
".",
"KEY",
"]",
"[",
"'calls'",
"]",
"=",
"set",
"(",
"node",
".",
"context",
"[",
"self",
".",
"KEY",
"]",
"[",
"'calls'",
"]",
"|",
"{",
"node",
"}",
")",
"node",
".",
"context",
"[",
"self",
".",
"KEY",
"]",
"[",
"'read_prior_calls'",
"]",
"[",
"node",
"]",
"=",
"set",
"(",
"node",
".",
"context",
"[",
"self",
".",
"KEY",
"]",
"[",
"'read_prior_calls'",
"]",
".",
"get",
"(",
"node",
",",
"set",
"(",
")",
")",
"|",
"node",
".",
"context",
"[",
"self",
".",
"KEY",
"]",
"[",
"'read'",
"]",
"|",
"state_vars_read",
")",
"contains_call",
"=",
"True",
"if",
"self",
".",
"_can_send_eth",
"(",
"node",
".",
"irs",
"+",
"slithir_operations",
")",
":",
"node",
".",
"context",
"[",
"self",
".",
"KEY",
"]",
"[",
"'send_eth'",
"]",
"=",
"set",
"(",
"node",
".",
"context",
"[",
"self",
".",
"KEY",
"]",
"[",
"'send_eth'",
"]",
"|",
"{",
"node",
"}",
")",
"node",
".",
"context",
"[",
"self",
".",
"KEY",
"]",
"[",
"'read'",
"]",
"=",
"set",
"(",
"node",
".",
"context",
"[",
"self",
".",
"KEY",
"]",
"[",
"'read'",
"]",
"|",
"state_vars_read",
")",
"sons",
"=",
"node",
".",
"sons",
"if",
"contains_call",
"and",
"node",
".",
"type",
"in",
"[",
"NodeType",
".",
"IF",
",",
"NodeType",
".",
"IFLOOP",
"]",
":",
"if",
"self",
".",
"_filter_if",
"(",
"node",
")",
":",
"son",
"=",
"sons",
"[",
"0",
"]",
"self",
".",
"_explore",
"(",
"son",
",",
"visited",
",",
"node",
")",
"sons",
"=",
"sons",
"[",
"1",
":",
"]",
"else",
":",
"son",
"=",
"sons",
"[",
"1",
"]",
"self",
".",
"_explore",
"(",
"son",
",",
"visited",
",",
"node",
")",
"sons",
"=",
"[",
"sons",
"[",
"0",
"]",
"]",
"for",
"son",
"in",
"sons",
":",
"self",
".",
"_explore",
"(",
"son",
",",
"visited",
")"
] | Explore the CFG and look for re-entrancy
Heuristic: There is a re-entrancy if a state variable is written
after an external call
node.context will contains the external calls executed
It contains the calls executed in father nodes
if node.context is not empty, and variables are written, a re-entrancy is possible | [
"Explore",
"the",
"CFG",
"and",
"look",
"for",
"re",
"-",
"entrancy",
"Heuristic",
":",
"There",
"is",
"a",
"re",
"-",
"entrancy",
"if",
"a",
"state",
"variable",
"is",
"written",
"after",
"an",
"external",
"call"
] | python | train |
cmbruns/pyopenvr | src/openvr/__init__.py | https://github.com/cmbruns/pyopenvr/blob/68395d26bb3df6ab1f0f059c38d441f962938be6/src/openvr/__init__.py#L4680-L4688 | def getOverlayColor(self, ulOverlayHandle):
"""Gets the color tint of the overlay quad."""
fn = self.function_table.getOverlayColor
pfRed = c_float()
pfGreen = c_float()
pfBlue = c_float()
result = fn(ulOverlayHandle, byref(pfRed), byref(pfGreen), byref(pfBlue))
return result, pfRed.value, pfGreen.value, pfBlue.value | [
"def",
"getOverlayColor",
"(",
"self",
",",
"ulOverlayHandle",
")",
":",
"fn",
"=",
"self",
".",
"function_table",
".",
"getOverlayColor",
"pfRed",
"=",
"c_float",
"(",
")",
"pfGreen",
"=",
"c_float",
"(",
")",
"pfBlue",
"=",
"c_float",
"(",
")",
"result",
"=",
"fn",
"(",
"ulOverlayHandle",
",",
"byref",
"(",
"pfRed",
")",
",",
"byref",
"(",
"pfGreen",
")",
",",
"byref",
"(",
"pfBlue",
")",
")",
"return",
"result",
",",
"pfRed",
".",
"value",
",",
"pfGreen",
".",
"value",
",",
"pfBlue",
".",
"value"
] | Gets the color tint of the overlay quad. | [
"Gets",
"the",
"color",
"tint",
"of",
"the",
"overlay",
"quad",
"."
] | python | train |
cloud-custodian/cloud-custodian | c7n/log.py | https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/c7n/log.py#L100-L121 | def emit(self, message):
"""Send logs"""
# We're sending messages asynchronously, bubble to caller when
# we've detected an error on the message. This isn't great,
# but options once we've gone async without a deferred/promise
# aren't great.
if self.transport and self.transport.error:
raise self.transport.error
# Sanity safety, people do like to recurse by attaching to
# root log :-(
if message.name.startswith('boto'):
return
msg = self.format_message(message)
if not self.transport:
self.start_transports()
self.buf.append(msg)
self.flush_buffers(
(message.created - self.last_seen >= self.batch_interval))
self.last_seen = message.created | [
"def",
"emit",
"(",
"self",
",",
"message",
")",
":",
"# We're sending messages asynchronously, bubble to caller when",
"# we've detected an error on the message. This isn't great,",
"# but options once we've gone async without a deferred/promise",
"# aren't great.",
"if",
"self",
".",
"transport",
"and",
"self",
".",
"transport",
".",
"error",
":",
"raise",
"self",
".",
"transport",
".",
"error",
"# Sanity safety, people do like to recurse by attaching to",
"# root log :-(",
"if",
"message",
".",
"name",
".",
"startswith",
"(",
"'boto'",
")",
":",
"return",
"msg",
"=",
"self",
".",
"format_message",
"(",
"message",
")",
"if",
"not",
"self",
".",
"transport",
":",
"self",
".",
"start_transports",
"(",
")",
"self",
".",
"buf",
".",
"append",
"(",
"msg",
")",
"self",
".",
"flush_buffers",
"(",
"(",
"message",
".",
"created",
"-",
"self",
".",
"last_seen",
">=",
"self",
".",
"batch_interval",
")",
")",
"self",
".",
"last_seen",
"=",
"message",
".",
"created"
] | Send logs | [
"Send",
"logs"
] | python | train |
mitsei/dlkit | dlkit/handcar/repository/objects.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/repository/objects.py#L580-L598 | def set_public_domain(self, public_domain=None):
"""Sets the public domain flag.
:param public_domain: the public domain status
:type public_domain: ``boolean``
:raise: ``NoAccess`` -- ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
if public_domain is None:
raise NullArgument()
metadata = Metadata(**settings.METADATA['public_domain'])
if metadata.is_read_only():
raise NoAccess()
if self._is_valid_input(public_domain, metadata, array=False):
self._my_map['publicDomain'] = public_domain
else:
raise InvalidArgument() | [
"def",
"set_public_domain",
"(",
"self",
",",
"public_domain",
"=",
"None",
")",
":",
"if",
"public_domain",
"is",
"None",
":",
"raise",
"NullArgument",
"(",
")",
"metadata",
"=",
"Metadata",
"(",
"*",
"*",
"settings",
".",
"METADATA",
"[",
"'public_domain'",
"]",
")",
"if",
"metadata",
".",
"is_read_only",
"(",
")",
":",
"raise",
"NoAccess",
"(",
")",
"if",
"self",
".",
"_is_valid_input",
"(",
"public_domain",
",",
"metadata",
",",
"array",
"=",
"False",
")",
":",
"self",
".",
"_my_map",
"[",
"'publicDomain'",
"]",
"=",
"public_domain",
"else",
":",
"raise",
"InvalidArgument",
"(",
")"
] | Sets the public domain flag.
:param public_domain: the public domain status
:type public_domain: ``boolean``
:raise: ``NoAccess`` -- ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.* | [
"Sets",
"the",
"public",
"domain",
"flag",
"."
] | python | train |
dbcli/cli_helpers | cli_helpers/tabular_output/output_formatter.py | https://github.com/dbcli/cli_helpers/blob/3ebd891ac0c02bad061182dbcb54a47fb21980ae/cli_helpers/tabular_output/output_formatter.py#L156-L159 | def _get_column_types(self, data):
"""Get a list of the data types for each column in *data*."""
columns = list(zip_longest(*data))
return [self._get_column_type(column) for column in columns] | [
"def",
"_get_column_types",
"(",
"self",
",",
"data",
")",
":",
"columns",
"=",
"list",
"(",
"zip_longest",
"(",
"*",
"data",
")",
")",
"return",
"[",
"self",
".",
"_get_column_type",
"(",
"column",
")",
"for",
"column",
"in",
"columns",
"]"
] | Get a list of the data types for each column in *data*. | [
"Get",
"a",
"list",
"of",
"the",
"data",
"types",
"for",
"each",
"column",
"in",
"*",
"data",
"*",
"."
] | python | test |
PmagPy/PmagPy | programs/fishrot.py | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/fishrot.py#L8-L63 | def main():
"""
NAME
fishrot.py
DESCRIPTION
generates set of Fisher distributed data from specified distribution
SYNTAX
fishrot.py [-h][-i][command line options]
OPTIONS
-h prints help message and quits
-i for interactive entry
-k kappa specify kappa, default is 20
-n N specify N, default is 100
-D D specify mean Dec, default is 0
-I I specify mean Inc, default is 90
where:
kappa: fisher distribution concentration parameter
N: number of directions desired
OUTPUT
dec, inc
"""
N,kappa,D,I=100,20.,0.,90.
if len(sys.argv)!=0 and '-h' in sys.argv:
print(main.__doc__)
sys.exit()
elif '-i' in sys.argv:
ans=input(' Kappa: ')
kappa=float(ans)
ans=input(' N: ')
N=int(ans)
ans=input(' Mean Dec: ')
D=float(ans)
ans=input(' Mean Inc: ')
I=float(ans)
else:
if '-k' in sys.argv:
ind=sys.argv.index('-k')
kappa=float(sys.argv[ind+1])
if '-n' in sys.argv:
ind=sys.argv.index('-n')
N=int(sys.argv[ind+1])
if '-D' in sys.argv:
ind=sys.argv.index('-D')
D=float(sys.argv[ind+1])
if '-I' in sys.argv:
ind=sys.argv.index('-I')
I=float(sys.argv[ind+1])
for k in range(N):
dec,inc= pmag.fshdev(kappa) # send kappa to fshdev
drot,irot=pmag.dodirot(dec,inc,D,I)
print('%7.1f %7.1f ' % (drot,irot)) | [
"def",
"main",
"(",
")",
":",
"N",
",",
"kappa",
",",
"D",
",",
"I",
"=",
"100",
",",
"20.",
",",
"0.",
",",
"90.",
"if",
"len",
"(",
"sys",
".",
"argv",
")",
"!=",
"0",
"and",
"'-h'",
"in",
"sys",
".",
"argv",
":",
"print",
"(",
"main",
".",
"__doc__",
")",
"sys",
".",
"exit",
"(",
")",
"elif",
"'-i'",
"in",
"sys",
".",
"argv",
":",
"ans",
"=",
"input",
"(",
"' Kappa: '",
")",
"kappa",
"=",
"float",
"(",
"ans",
")",
"ans",
"=",
"input",
"(",
"' N: '",
")",
"N",
"=",
"int",
"(",
"ans",
")",
"ans",
"=",
"input",
"(",
"' Mean Dec: '",
")",
"D",
"=",
"float",
"(",
"ans",
")",
"ans",
"=",
"input",
"(",
"' Mean Inc: '",
")",
"I",
"=",
"float",
"(",
"ans",
")",
"else",
":",
"if",
"'-k'",
"in",
"sys",
".",
"argv",
":",
"ind",
"=",
"sys",
".",
"argv",
".",
"index",
"(",
"'-k'",
")",
"kappa",
"=",
"float",
"(",
"sys",
".",
"argv",
"[",
"ind",
"+",
"1",
"]",
")",
"if",
"'-n'",
"in",
"sys",
".",
"argv",
":",
"ind",
"=",
"sys",
".",
"argv",
".",
"index",
"(",
"'-n'",
")",
"N",
"=",
"int",
"(",
"sys",
".",
"argv",
"[",
"ind",
"+",
"1",
"]",
")",
"if",
"'-D'",
"in",
"sys",
".",
"argv",
":",
"ind",
"=",
"sys",
".",
"argv",
".",
"index",
"(",
"'-D'",
")",
"D",
"=",
"float",
"(",
"sys",
".",
"argv",
"[",
"ind",
"+",
"1",
"]",
")",
"if",
"'-I'",
"in",
"sys",
".",
"argv",
":",
"ind",
"=",
"sys",
".",
"argv",
".",
"index",
"(",
"'-I'",
")",
"I",
"=",
"float",
"(",
"sys",
".",
"argv",
"[",
"ind",
"+",
"1",
"]",
")",
"for",
"k",
"in",
"range",
"(",
"N",
")",
":",
"dec",
",",
"inc",
"=",
"pmag",
".",
"fshdev",
"(",
"kappa",
")",
"# send kappa to fshdev",
"drot",
",",
"irot",
"=",
"pmag",
".",
"dodirot",
"(",
"dec",
",",
"inc",
",",
"D",
",",
"I",
")",
"print",
"(",
"'%7.1f %7.1f '",
"%",
"(",
"drot",
",",
"irot",
")",
")"
] | NAME
fishrot.py
DESCRIPTION
generates set of Fisher distributed data from specified distribution
SYNTAX
fishrot.py [-h][-i][command line options]
OPTIONS
-h prints help message and quits
-i for interactive entry
-k kappa specify kappa, default is 20
-n N specify N, default is 100
-D D specify mean Dec, default is 0
-I I specify mean Inc, default is 90
where:
kappa: fisher distribution concentration parameter
N: number of directions desired
OUTPUT
dec, inc | [
"NAME",
"fishrot",
".",
"py"
] | python | train |
mickbad/mblibs | mblibs/fast.py | https://github.com/mickbad/mblibs/blob/c1f423ef107c94e2ab6bd253e9148f6056e0ef75/mblibs/fast.py#L529-L531 | def debug(self, text):
""" Ajout d'un message de log de type DEBUG """
self.logger.debug("{}{}".format(self.message_prefix, text)) | [
"def",
"debug",
"(",
"self",
",",
"text",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"{}{}\"",
".",
"format",
"(",
"self",
".",
"message_prefix",
",",
"text",
")",
")"
] | Ajout d'un message de log de type DEBUG | [
"Ajout",
"d",
"un",
"message",
"de",
"log",
"de",
"type",
"DEBUG"
] | python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.