repo
stringlengths 7
55
| path
stringlengths 4
223
| url
stringlengths 87
315
| code
stringlengths 75
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values | avg_line_len
float64 7.91
980
|
---|---|---|---|---|---|---|---|---|---|
google/openhtf
|
openhtf/util/logs.py
|
https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/logs.py#L188-L193
|
def log_once(log_func, msg, *args, **kwargs):
""""Logs a message only once."""
if msg not in _LOG_ONCE_SEEN:
log_func(msg, *args, **kwargs)
# Key on the message, ignoring args. This should fit most use cases.
_LOG_ONCE_SEEN.add(msg)
|
[
"def",
"log_once",
"(",
"log_func",
",",
"msg",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"msg",
"not",
"in",
"_LOG_ONCE_SEEN",
":",
"log_func",
"(",
"msg",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"# Key on the message, ignoring args. This should fit most use cases.",
"_LOG_ONCE_SEEN",
".",
"add",
"(",
"msg",
")"
] |
Logs a message only once.
|
[
"Logs",
"a",
"message",
"only",
"once",
"."
] |
python
|
train
| 40.5 |
log2timeline/dfvfs
|
examples/source_analyzer.py
|
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/examples/source_analyzer.py#L283-L337
|
def Main():
"""The main program function.
Returns:
bool: True if successful or False if not.
"""
argument_parser = argparse.ArgumentParser(description=(
'Calculates a message digest hash for every file in a directory or '
'storage media image.'))
argument_parser.add_argument(
'source', nargs='?', action='store', metavar='image.raw', default=None,
help=('path of the directory or filename of a storage media image '
'containing the file.'))
argument_parser.add_argument(
'--no-auto-recurse', '--no_auto_recurse', dest='no_auto_recurse',
action='store_true', default=False, help=(
'Indicate that the source scanner should not auto-recurse.'))
options = argument_parser.parse_args()
if not options.source:
print('Source value is missing.')
print('')
argument_parser.print_help()
print('')
return False
logging.basicConfig(
level=logging.INFO, format='[%(levelname)s] %(message)s')
output_writer = StdoutWriter()
if not output_writer.Open():
print('Unable to open output writer.')
print('')
return False
return_value = True
source_analyzer = SourceAnalyzer(auto_recurse=not options.no_auto_recurse)
try:
source_analyzer.Analyze(options.source, output_writer)
print('Completed.')
except KeyboardInterrupt:
return_value = False
print('Aborted by user.')
output_writer.Close()
return return_value
|
[
"def",
"Main",
"(",
")",
":",
"argument_parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"(",
"'Calculates a message digest hash for every file in a directory or '",
"'storage media image.'",
")",
")",
"argument_parser",
".",
"add_argument",
"(",
"'source'",
",",
"nargs",
"=",
"'?'",
",",
"action",
"=",
"'store'",
",",
"metavar",
"=",
"'image.raw'",
",",
"default",
"=",
"None",
",",
"help",
"=",
"(",
"'path of the directory or filename of a storage media image '",
"'containing the file.'",
")",
")",
"argument_parser",
".",
"add_argument",
"(",
"'--no-auto-recurse'",
",",
"'--no_auto_recurse'",
",",
"dest",
"=",
"'no_auto_recurse'",
",",
"action",
"=",
"'store_true'",
",",
"default",
"=",
"False",
",",
"help",
"=",
"(",
"'Indicate that the source scanner should not auto-recurse.'",
")",
")",
"options",
"=",
"argument_parser",
".",
"parse_args",
"(",
")",
"if",
"not",
"options",
".",
"source",
":",
"print",
"(",
"'Source value is missing.'",
")",
"print",
"(",
"''",
")",
"argument_parser",
".",
"print_help",
"(",
")",
"print",
"(",
"''",
")",
"return",
"False",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"logging",
".",
"INFO",
",",
"format",
"=",
"'[%(levelname)s] %(message)s'",
")",
"output_writer",
"=",
"StdoutWriter",
"(",
")",
"if",
"not",
"output_writer",
".",
"Open",
"(",
")",
":",
"print",
"(",
"'Unable to open output writer.'",
")",
"print",
"(",
"''",
")",
"return",
"False",
"return_value",
"=",
"True",
"source_analyzer",
"=",
"SourceAnalyzer",
"(",
"auto_recurse",
"=",
"not",
"options",
".",
"no_auto_recurse",
")",
"try",
":",
"source_analyzer",
".",
"Analyze",
"(",
"options",
".",
"source",
",",
"output_writer",
")",
"print",
"(",
"'Completed.'",
")",
"except",
"KeyboardInterrupt",
":",
"return_value",
"=",
"False",
"print",
"(",
"'Aborted by user.'",
")",
"output_writer",
".",
"Close",
"(",
")",
"return",
"return_value"
] |
The main program function.
Returns:
bool: True if successful or False if not.
|
[
"The",
"main",
"program",
"function",
"."
] |
python
|
train
| 25.490909 |
zimeon/iiif
|
iiif/static.py
|
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/static.py#L331-L368
|
def setup_destination(self):
"""Setup output directory based on self.dst and self.identifier.
Returns the output directory name on success, raises and exception on
failure.
"""
# Do we have a separate identifier?
if (not self.identifier):
# No separate identifier specified, split off the last path segment
# of the source name, strip the extension to get the identifier
self.identifier = os.path.splitext(os.path.split(self.src)[1])[0]
# Done if dryrun, else setup self.dst first
if (self.dryrun):
return
if (not self.dst):
raise IIIFStaticError("No destination directory specified!")
dst = self.dst
if (os.path.isdir(dst)):
# Exists, OK
pass
elif (os.path.isfile(dst)):
raise IIIFStaticError(
"Can't write to directory %s: a file of that name exists" % dst)
else:
os.makedirs(dst)
# Second, create identifier based subdir if necessary
outd = os.path.join(dst, self.identifier)
if (os.path.isdir(outd)):
# Nothing for now, perhaps should delete?
self.logger.warning(
"Output directory %s already exists, adding/updating files" % outd)
pass
elif (os.path.isfile(outd)):
raise IIIFStaticError(
"Can't write to directory %s: a file of that name exists" % outd)
else:
os.makedirs(outd)
self.logger.debug("Output directory %s" % outd)
|
[
"def",
"setup_destination",
"(",
"self",
")",
":",
"# Do we have a separate identifier?",
"if",
"(",
"not",
"self",
".",
"identifier",
")",
":",
"# No separate identifier specified, split off the last path segment",
"# of the source name, strip the extension to get the identifier",
"self",
".",
"identifier",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"split",
"(",
"self",
".",
"src",
")",
"[",
"1",
"]",
")",
"[",
"0",
"]",
"# Done if dryrun, else setup self.dst first",
"if",
"(",
"self",
".",
"dryrun",
")",
":",
"return",
"if",
"(",
"not",
"self",
".",
"dst",
")",
":",
"raise",
"IIIFStaticError",
"(",
"\"No destination directory specified!\"",
")",
"dst",
"=",
"self",
".",
"dst",
"if",
"(",
"os",
".",
"path",
".",
"isdir",
"(",
"dst",
")",
")",
":",
"# Exists, OK",
"pass",
"elif",
"(",
"os",
".",
"path",
".",
"isfile",
"(",
"dst",
")",
")",
":",
"raise",
"IIIFStaticError",
"(",
"\"Can't write to directory %s: a file of that name exists\"",
"%",
"dst",
")",
"else",
":",
"os",
".",
"makedirs",
"(",
"dst",
")",
"# Second, create identifier based subdir if necessary",
"outd",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dst",
",",
"self",
".",
"identifier",
")",
"if",
"(",
"os",
".",
"path",
".",
"isdir",
"(",
"outd",
")",
")",
":",
"# Nothing for now, perhaps should delete?",
"self",
".",
"logger",
".",
"warning",
"(",
"\"Output directory %s already exists, adding/updating files\"",
"%",
"outd",
")",
"pass",
"elif",
"(",
"os",
".",
"path",
".",
"isfile",
"(",
"outd",
")",
")",
":",
"raise",
"IIIFStaticError",
"(",
"\"Can't write to directory %s: a file of that name exists\"",
"%",
"outd",
")",
"else",
":",
"os",
".",
"makedirs",
"(",
"outd",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Output directory %s\"",
"%",
"outd",
")"
] |
Setup output directory based on self.dst and self.identifier.
Returns the output directory name on success, raises and exception on
failure.
|
[
"Setup",
"output",
"directory",
"based",
"on",
"self",
".",
"dst",
"and",
"self",
".",
"identifier",
"."
] |
python
|
train
| 41.105263 |
hendrix/hendrix
|
hendrix/options.py
|
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/options.py#L7-L26
|
def cleanOptions(options):
"""
Takes an options dict and returns a tuple containing the daemonize boolean,
the reload boolean, and the parsed list of cleaned options as would be
expected to be passed to hx
"""
_reload = options.pop('reload')
dev = options.pop('dev')
opts = []
store_true = [
'--nocache', '--global_cache', '--quiet', '--loud'
]
store_false = []
for key, value in options.items():
key = '--' + key
if (key in store_true and value) or (key in store_false and not value):
opts += [key, ]
elif value:
opts += [key, str(value)]
return _reload, opts
|
[
"def",
"cleanOptions",
"(",
"options",
")",
":",
"_reload",
"=",
"options",
".",
"pop",
"(",
"'reload'",
")",
"dev",
"=",
"options",
".",
"pop",
"(",
"'dev'",
")",
"opts",
"=",
"[",
"]",
"store_true",
"=",
"[",
"'--nocache'",
",",
"'--global_cache'",
",",
"'--quiet'",
",",
"'--loud'",
"]",
"store_false",
"=",
"[",
"]",
"for",
"key",
",",
"value",
"in",
"options",
".",
"items",
"(",
")",
":",
"key",
"=",
"'--'",
"+",
"key",
"if",
"(",
"key",
"in",
"store_true",
"and",
"value",
")",
"or",
"(",
"key",
"in",
"store_false",
"and",
"not",
"value",
")",
":",
"opts",
"+=",
"[",
"key",
",",
"]",
"elif",
"value",
":",
"opts",
"+=",
"[",
"key",
",",
"str",
"(",
"value",
")",
"]",
"return",
"_reload",
",",
"opts"
] |
Takes an options dict and returns a tuple containing the daemonize boolean,
the reload boolean, and the parsed list of cleaned options as would be
expected to be passed to hx
|
[
"Takes",
"an",
"options",
"dict",
"and",
"returns",
"a",
"tuple",
"containing",
"the",
"daemonize",
"boolean",
"the",
"reload",
"boolean",
"and",
"the",
"parsed",
"list",
"of",
"cleaned",
"options",
"as",
"would",
"be",
"expected",
"to",
"be",
"passed",
"to",
"hx"
] |
python
|
train
| 32.45 |
log2timeline/plaso
|
plaso/parsers/esedb_plugins/interface.py
|
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/esedb_plugins/interface.py#L99-L123
|
def _ConvertValueBinaryDataToUBInt64(self, value):
"""Converts a binary data value into an integer.
Args:
value (bytes): binary data value containing an unsigned 64-bit big-endian
integer.
Returns:
int: integer representation of binary data value or None if value is
not set.
Raises:
ParseError: if the integer value cannot be parsed.
"""
if not value:
return None
integer_map = self._GetDataTypeMap('uint64be')
try:
return self._ReadStructureFromByteStream(value, 0, integer_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError(
'Unable to parse integer value with error: {0!s}'.format(
exception))
|
[
"def",
"_ConvertValueBinaryDataToUBInt64",
"(",
"self",
",",
"value",
")",
":",
"if",
"not",
"value",
":",
"return",
"None",
"integer_map",
"=",
"self",
".",
"_GetDataTypeMap",
"(",
"'uint64be'",
")",
"try",
":",
"return",
"self",
".",
"_ReadStructureFromByteStream",
"(",
"value",
",",
"0",
",",
"integer_map",
")",
"except",
"(",
"ValueError",
",",
"errors",
".",
"ParseError",
")",
"as",
"exception",
":",
"raise",
"errors",
".",
"ParseError",
"(",
"'Unable to parse integer value with error: {0!s}'",
".",
"format",
"(",
"exception",
")",
")"
] |
Converts a binary data value into an integer.
Args:
value (bytes): binary data value containing an unsigned 64-bit big-endian
integer.
Returns:
int: integer representation of binary data value or None if value is
not set.
Raises:
ParseError: if the integer value cannot be parsed.
|
[
"Converts",
"a",
"binary",
"data",
"value",
"into",
"an",
"integer",
"."
] |
python
|
train
| 29 |
pyblish/pyblish-maya
|
pyblish_maya/lib.py
|
https://github.com/pyblish/pyblish-maya/blob/75db8b5d8de9d53ae95e74195a788b5f6db2cb5f/pyblish_maya/lib.py#L26-L50
|
def setup(menu=True):
"""Setup integration
Registers Pyblish for Maya plug-ins and appends an item to the File-menu
Attributes:
console (bool): Display console with GUI
port (int, optional): Port from which to start looking for an
available port to connect with Pyblish QML, default
provided by Pyblish Integration.
"""
if self._has_been_setup:
teardown()
register_plugins()
register_host()
if menu:
add_to_filemenu()
self._has_menu = True
self._has_been_setup = True
print("Pyblish loaded successfully.")
|
[
"def",
"setup",
"(",
"menu",
"=",
"True",
")",
":",
"if",
"self",
".",
"_has_been_setup",
":",
"teardown",
"(",
")",
"register_plugins",
"(",
")",
"register_host",
"(",
")",
"if",
"menu",
":",
"add_to_filemenu",
"(",
")",
"self",
".",
"_has_menu",
"=",
"True",
"self",
".",
"_has_been_setup",
"=",
"True",
"print",
"(",
"\"Pyblish loaded successfully.\"",
")"
] |
Setup integration
Registers Pyblish for Maya plug-ins and appends an item to the File-menu
Attributes:
console (bool): Display console with GUI
port (int, optional): Port from which to start looking for an
available port to connect with Pyblish QML, default
provided by Pyblish Integration.
|
[
"Setup",
"integration"
] |
python
|
test
| 23.68 |
apache/incubator-mxnet
|
python/mxnet/contrib/onnx/mx2onnx/_op_translations.py
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py#L148-L159
|
def create_basic_op_node(op_name, node, kwargs):
"""Helper function to create a basic operator
node that doesn't contain op specific attrs"""
name, input_nodes, _ = get_inputs(node, kwargs)
node = onnx.helper.make_node(
op_name,
input_nodes,
[name],
name=name
)
return [node]
|
[
"def",
"create_basic_op_node",
"(",
"op_name",
",",
"node",
",",
"kwargs",
")",
":",
"name",
",",
"input_nodes",
",",
"_",
"=",
"get_inputs",
"(",
"node",
",",
"kwargs",
")",
"node",
"=",
"onnx",
".",
"helper",
".",
"make_node",
"(",
"op_name",
",",
"input_nodes",
",",
"[",
"name",
"]",
",",
"name",
"=",
"name",
")",
"return",
"[",
"node",
"]"
] |
Helper function to create a basic operator
node that doesn't contain op specific attrs
|
[
"Helper",
"function",
"to",
"create",
"a",
"basic",
"operator",
"node",
"that",
"doesn",
"t",
"contain",
"op",
"specific",
"attrs"
] |
python
|
train
| 26.75 |
tensorlayer/tensorlayer
|
examples/text_classification/tutorial_imdb_fasttext.py
|
https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/examples/text_classification/tutorial_imdb_fasttext.py#L112-L120
|
def load_and_preprocess_imdb_data(n_gram=None):
"""Load IMDb data and augment with hashed n-gram features."""
X_train, y_train, X_test, y_test = tl.files.load_imdb_dataset(nb_words=VOCAB_SIZE)
if n_gram is not None:
X_train = np.array([augment_with_ngrams(x, VOCAB_SIZE, N_BUCKETS, n=n_gram) for x in X_train])
X_test = np.array([augment_with_ngrams(x, VOCAB_SIZE, N_BUCKETS, n=n_gram) for x in X_test])
return X_train, y_train, X_test, y_test
|
[
"def",
"load_and_preprocess_imdb_data",
"(",
"n_gram",
"=",
"None",
")",
":",
"X_train",
",",
"y_train",
",",
"X_test",
",",
"y_test",
"=",
"tl",
".",
"files",
".",
"load_imdb_dataset",
"(",
"nb_words",
"=",
"VOCAB_SIZE",
")",
"if",
"n_gram",
"is",
"not",
"None",
":",
"X_train",
"=",
"np",
".",
"array",
"(",
"[",
"augment_with_ngrams",
"(",
"x",
",",
"VOCAB_SIZE",
",",
"N_BUCKETS",
",",
"n",
"=",
"n_gram",
")",
"for",
"x",
"in",
"X_train",
"]",
")",
"X_test",
"=",
"np",
".",
"array",
"(",
"[",
"augment_with_ngrams",
"(",
"x",
",",
"VOCAB_SIZE",
",",
"N_BUCKETS",
",",
"n",
"=",
"n_gram",
")",
"for",
"x",
"in",
"X_test",
"]",
")",
"return",
"X_train",
",",
"y_train",
",",
"X_test",
",",
"y_test"
] |
Load IMDb data and augment with hashed n-gram features.
|
[
"Load",
"IMDb",
"data",
"and",
"augment",
"with",
"hashed",
"n",
"-",
"gram",
"features",
"."
] |
python
|
valid
| 52.111111 |
Azure/blobxfer
|
blobxfer/models/crypto.py
|
https://github.com/Azure/blobxfer/blob/3eccbe7530cc6a20ab2d30f9e034b6f021817f34/blobxfer/models/crypto.py#L169-L188
|
def create_new_metadata(self, rsa_public_key):
# type: (EncryptionMetadata,
# cryptography.hazmat.primitives.asymmetric.rsa.RSAPublicKey)
# -> None
"""Create new metadata entries for encryption (upload)
:param EncryptionMetadata self: this
:param cryptography.hazmat.primitives.asymmetric.rsa.RSAPublicKey:
rsa public key
"""
self._rsa_public_key = rsa_public_key
self._symkey = os.urandom(
blobxfer.operations.crypto._AES256_KEYLENGTH_BYTES)
self._signkey = os.urandom(
blobxfer.operations.crypto._AES256_KEYLENGTH_BYTES)
self.content_encryption_iv = os.urandom(AES256_BLOCKSIZE_BYTES)
self.encryption_agent = EncryptionAgent(
encryption_algorithm=EncryptionMetadata._ENCRYPTION_ALGORITHM,
protocol=EncryptionMetadata._ENCRYPTION_PROTOCOL_VERSION,
)
self.encryption_mode = EncryptionMetadata._ENCRYPTION_MODE
|
[
"def",
"create_new_metadata",
"(",
"self",
",",
"rsa_public_key",
")",
":",
"# type: (EncryptionMetadata,",
"# cryptography.hazmat.primitives.asymmetric.rsa.RSAPublicKey)",
"# -> None",
"self",
".",
"_rsa_public_key",
"=",
"rsa_public_key",
"self",
".",
"_symkey",
"=",
"os",
".",
"urandom",
"(",
"blobxfer",
".",
"operations",
".",
"crypto",
".",
"_AES256_KEYLENGTH_BYTES",
")",
"self",
".",
"_signkey",
"=",
"os",
".",
"urandom",
"(",
"blobxfer",
".",
"operations",
".",
"crypto",
".",
"_AES256_KEYLENGTH_BYTES",
")",
"self",
".",
"content_encryption_iv",
"=",
"os",
".",
"urandom",
"(",
"AES256_BLOCKSIZE_BYTES",
")",
"self",
".",
"encryption_agent",
"=",
"EncryptionAgent",
"(",
"encryption_algorithm",
"=",
"EncryptionMetadata",
".",
"_ENCRYPTION_ALGORITHM",
",",
"protocol",
"=",
"EncryptionMetadata",
".",
"_ENCRYPTION_PROTOCOL_VERSION",
",",
")",
"self",
".",
"encryption_mode",
"=",
"EncryptionMetadata",
".",
"_ENCRYPTION_MODE"
] |
Create new metadata entries for encryption (upload)
:param EncryptionMetadata self: this
:param cryptography.hazmat.primitives.asymmetric.rsa.RSAPublicKey:
rsa public key
|
[
"Create",
"new",
"metadata",
"entries",
"for",
"encryption",
"(",
"upload",
")",
":",
"param",
"EncryptionMetadata",
"self",
":",
"this",
":",
"param",
"cryptography",
".",
"hazmat",
".",
"primitives",
".",
"asymmetric",
".",
"rsa",
".",
"RSAPublicKey",
":",
"rsa",
"public",
"key"
] |
python
|
train
| 48.8 |
ArduPilot/MAVProxy
|
MAVProxy/modules/mavproxy_gasheli.py
|
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_gasheli.py#L83-L96
|
def idle_task(self):
'''run periodic tasks'''
if self.starting_motor:
if self.gasheli_settings.ignition_disable_time > 0:
elapsed = time.time() - self.motor_t1
if elapsed >= self.gasheli_settings.ignition_disable_time:
self.module('rc').set_override_chan(self.gasheli_settings.ignition_chan-1, self.old_override)
self.starting_motor = False
if self.stopping_motor:
elapsed = time.time() - self.motor_t1
if elapsed >= self.gasheli_settings.ignition_stop_time:
# hand back control to RC
self.module('rc').set_override_chan(self.gasheli_settings.ignition_chan-1, self.old_override)
self.stopping_motor = False
|
[
"def",
"idle_task",
"(",
"self",
")",
":",
"if",
"self",
".",
"starting_motor",
":",
"if",
"self",
".",
"gasheli_settings",
".",
"ignition_disable_time",
">",
"0",
":",
"elapsed",
"=",
"time",
".",
"time",
"(",
")",
"-",
"self",
".",
"motor_t1",
"if",
"elapsed",
">=",
"self",
".",
"gasheli_settings",
".",
"ignition_disable_time",
":",
"self",
".",
"module",
"(",
"'rc'",
")",
".",
"set_override_chan",
"(",
"self",
".",
"gasheli_settings",
".",
"ignition_chan",
"-",
"1",
",",
"self",
".",
"old_override",
")",
"self",
".",
"starting_motor",
"=",
"False",
"if",
"self",
".",
"stopping_motor",
":",
"elapsed",
"=",
"time",
".",
"time",
"(",
")",
"-",
"self",
".",
"motor_t1",
"if",
"elapsed",
">=",
"self",
".",
"gasheli_settings",
".",
"ignition_stop_time",
":",
"# hand back control to RC",
"self",
".",
"module",
"(",
"'rc'",
")",
".",
"set_override_chan",
"(",
"self",
".",
"gasheli_settings",
".",
"ignition_chan",
"-",
"1",
",",
"self",
".",
"old_override",
")",
"self",
".",
"stopping_motor",
"=",
"False"
] |
run periodic tasks
|
[
"run",
"periodic",
"tasks"
] |
python
|
train
| 55.214286 |
Esri/ArcREST
|
src/arcrest/manageorg/_community.py
|
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/manageorg/_community.py#L987-L1004
|
def applications(self):
"""returns all the group applications to join"""
url = self._url + "/applications"
params = {"f" : "json"}
res = self._get(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
items = []
if "applications" in res.keys():
for apps in res['applications']:
items.append(
self.Application(url="%s/%s" % (self._url, apps['username']),
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
)
return items
|
[
"def",
"applications",
"(",
"self",
")",
":",
"url",
"=",
"self",
".",
"_url",
"+",
"\"/applications\"",
"params",
"=",
"{",
"\"f\"",
":",
"\"json\"",
"}",
"res",
"=",
"self",
".",
"_get",
"(",
"url",
"=",
"url",
",",
"param_dict",
"=",
"params",
",",
"proxy_url",
"=",
"self",
".",
"_proxy_url",
",",
"proxy_port",
"=",
"self",
".",
"_proxy_port",
")",
"items",
"=",
"[",
"]",
"if",
"\"applications\"",
"in",
"res",
".",
"keys",
"(",
")",
":",
"for",
"apps",
"in",
"res",
"[",
"'applications'",
"]",
":",
"items",
".",
"append",
"(",
"self",
".",
"Application",
"(",
"url",
"=",
"\"%s/%s\"",
"%",
"(",
"self",
".",
"_url",
",",
"apps",
"[",
"'username'",
"]",
")",
",",
"securityHandler",
"=",
"self",
".",
"_securityHandler",
",",
"proxy_url",
"=",
"self",
".",
"_proxy_url",
",",
"proxy_port",
"=",
"self",
".",
"_proxy_port",
")",
")",
"return",
"items"
] |
returns all the group applications to join
|
[
"returns",
"all",
"the",
"group",
"applications",
"to",
"join"
] |
python
|
train
| 43.777778 |
tox-dev/tox-venv
|
src/tox_venv/hooks.py
|
https://github.com/tox-dev/tox-venv/blob/e740a96c81e076d850065e6a8444ae1cd833468b/src/tox_venv/hooks.py#L8-L69
|
def real_python3(python, version_dict):
"""
Determine the path of the real python executable, which is then used for
venv creation. This is necessary, because an active virtualenv environment
will cause venv creation to malfunction. By getting the path of the real
executable, this issue is bypassed.
The provided `python` path may be either:
- A real python executable
- A virtual python executable (with venv)
- A virtual python executable (with virtualenv)
If the virtual environment was created with virtualenv, the `sys` module
will have a `real_prefix` attribute, which points to the directory where
the real python files are installed.
If `real_prefix` is not present, the environment was not created with
virtualenv, and the python executable is safe to use.
The `version_dict` is used for attempting to derive the real executable
path. This is necessary when the name of the virtual python executable
does not exist in the Python installation's directory. For example, if
the `basepython` is explicitly set to `python`, tox will use this name
instead of attempting `pythonX.Y`. In many cases, Python 3 installations
do not contain an executable named `python`, so we attempt to derive this
from the version info. e.g., `python3.6.5`, `python3.6`, then `python3`.
"""
args = [python, '-c', 'import sys; print(sys.real_prefix)']
# get python prefix
try:
output = subprocess.check_output(args, stderr=subprocess.STDOUT)
prefix = output.decode('UTF-8').strip()
except subprocess.CalledProcessError:
# process fails, implies *not* in active virtualenv
return python
# determine absolute binary path
if os.name == 'nt': # pragma: no cover
paths = [os.path.join(prefix, os.path.basename(python))]
else:
paths = [os.path.join(prefix, 'bin', python) for python in [
os.path.basename(python),
'python%(major)d.%(minor)d.%(micro)d' % version_dict,
'python%(major)d.%(minor)d' % version_dict,
'python%(major)d' % version_dict,
'python',
]]
for path in paths:
if os.path.isfile(path):
break
else:
path = None
# the executable path must exist
assert path, '\n- '.join(['Could not find interpreter. Attempted:'] + paths)
v1 = subprocess.check_output([python, '--version'])
v2 = subprocess.check_output([path, '--version'])
assert v1 == v2, 'Expected versions to match (%s != %s).' % (v1, v2)
return path
|
[
"def",
"real_python3",
"(",
"python",
",",
"version_dict",
")",
":",
"args",
"=",
"[",
"python",
",",
"'-c'",
",",
"'import sys; print(sys.real_prefix)'",
"]",
"# get python prefix",
"try",
":",
"output",
"=",
"subprocess",
".",
"check_output",
"(",
"args",
",",
"stderr",
"=",
"subprocess",
".",
"STDOUT",
")",
"prefix",
"=",
"output",
".",
"decode",
"(",
"'UTF-8'",
")",
".",
"strip",
"(",
")",
"except",
"subprocess",
".",
"CalledProcessError",
":",
"# process fails, implies *not* in active virtualenv",
"return",
"python",
"# determine absolute binary path",
"if",
"os",
".",
"name",
"==",
"'nt'",
":",
"# pragma: no cover",
"paths",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"prefix",
",",
"os",
".",
"path",
".",
"basename",
"(",
"python",
")",
")",
"]",
"else",
":",
"paths",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"prefix",
",",
"'bin'",
",",
"python",
")",
"for",
"python",
"in",
"[",
"os",
".",
"path",
".",
"basename",
"(",
"python",
")",
",",
"'python%(major)d.%(minor)d.%(micro)d'",
"%",
"version_dict",
",",
"'python%(major)d.%(minor)d'",
"%",
"version_dict",
",",
"'python%(major)d'",
"%",
"version_dict",
",",
"'python'",
",",
"]",
"]",
"for",
"path",
"in",
"paths",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
":",
"break",
"else",
":",
"path",
"=",
"None",
"# the executable path must exist",
"assert",
"path",
",",
"'\\n- '",
".",
"join",
"(",
"[",
"'Could not find interpreter. Attempted:'",
"]",
"+",
"paths",
")",
"v1",
"=",
"subprocess",
".",
"check_output",
"(",
"[",
"python",
",",
"'--version'",
"]",
")",
"v2",
"=",
"subprocess",
".",
"check_output",
"(",
"[",
"path",
",",
"'--version'",
"]",
")",
"assert",
"v1",
"==",
"v2",
",",
"'Expected versions to match (%s != %s).'",
"%",
"(",
"v1",
",",
"v2",
")",
"return",
"path"
] |
Determine the path of the real python executable, which is then used for
venv creation. This is necessary, because an active virtualenv environment
will cause venv creation to malfunction. By getting the path of the real
executable, this issue is bypassed.
The provided `python` path may be either:
- A real python executable
- A virtual python executable (with venv)
- A virtual python executable (with virtualenv)
If the virtual environment was created with virtualenv, the `sys` module
will have a `real_prefix` attribute, which points to the directory where
the real python files are installed.
If `real_prefix` is not present, the environment was not created with
virtualenv, and the python executable is safe to use.
The `version_dict` is used for attempting to derive the real executable
path. This is necessary when the name of the virtual python executable
does not exist in the Python installation's directory. For example, if
the `basepython` is explicitly set to `python`, tox will use this name
instead of attempting `pythonX.Y`. In many cases, Python 3 installations
do not contain an executable named `python`, so we attempt to derive this
from the version info. e.g., `python3.6.5`, `python3.6`, then `python3`.
|
[
"Determine",
"the",
"path",
"of",
"the",
"real",
"python",
"executable",
"which",
"is",
"then",
"used",
"for",
"venv",
"creation",
".",
"This",
"is",
"necessary",
"because",
"an",
"active",
"virtualenv",
"environment",
"will",
"cause",
"venv",
"creation",
"to",
"malfunction",
".",
"By",
"getting",
"the",
"path",
"of",
"the",
"real",
"executable",
"this",
"issue",
"is",
"bypassed",
"."
] |
python
|
train
| 40.935484 |
pyapi-gitlab/pyapi-gitlab
|
gitlab/__init__.py
|
https://github.com/pyapi-gitlab/pyapi-gitlab/blob/f74b6fb5c13cecae9524997847e928905cc60acf/gitlab/__init__.py#L188-L205
|
def delete_project(self, id):
"""
Delete a project from the Gitlab server
Gitlab currently returns a Boolean True if the deleted and as such we return an
empty Dictionary
:param id: The ID of the project or NAMESPACE/PROJECT_NAME
:return: Dictionary
:raise: HttpError: If invalid response returned
"""
url = '/projects/{id}'.format(id=id)
response = self.delete(url)
if response is True:
return {}
else:
return response
|
[
"def",
"delete_project",
"(",
"self",
",",
"id",
")",
":",
"url",
"=",
"'/projects/{id}'",
".",
"format",
"(",
"id",
"=",
"id",
")",
"response",
"=",
"self",
".",
"delete",
"(",
"url",
")",
"if",
"response",
"is",
"True",
":",
"return",
"{",
"}",
"else",
":",
"return",
"response"
] |
Delete a project from the Gitlab server
Gitlab currently returns a Boolean True if the deleted and as such we return an
empty Dictionary
:param id: The ID of the project or NAMESPACE/PROJECT_NAME
:return: Dictionary
:raise: HttpError: If invalid response returned
|
[
"Delete",
"a",
"project",
"from",
"the",
"Gitlab",
"server"
] |
python
|
train
| 29.166667 |
dade-ai/snipy
|
snipy/iterflow.py
|
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/iterflow.py#L359-L381
|
def shuffle(qsize=1024, iterable=None):
"""
add example
:param qsize:
:param iterable:
:return:
"""
@iterflow
def shuffleit(it):
from random import randrange
q = []
for i, d in enumerate(it):
q.insert(randrange(0, len(q) + 1), d)
if i < qsize:
continue
yield q.pop(randrange(0, len(q)))
while q:
yield q.pop(randrange(0, len(q)))
return shuffleit if iterable is None else shuffleit(iterable)
|
[
"def",
"shuffle",
"(",
"qsize",
"=",
"1024",
",",
"iterable",
"=",
"None",
")",
":",
"@",
"iterflow",
"def",
"shuffleit",
"(",
"it",
")",
":",
"from",
"random",
"import",
"randrange",
"q",
"=",
"[",
"]",
"for",
"i",
",",
"d",
"in",
"enumerate",
"(",
"it",
")",
":",
"q",
".",
"insert",
"(",
"randrange",
"(",
"0",
",",
"len",
"(",
"q",
")",
"+",
"1",
")",
",",
"d",
")",
"if",
"i",
"<",
"qsize",
":",
"continue",
"yield",
"q",
".",
"pop",
"(",
"randrange",
"(",
"0",
",",
"len",
"(",
"q",
")",
")",
")",
"while",
"q",
":",
"yield",
"q",
".",
"pop",
"(",
"randrange",
"(",
"0",
",",
"len",
"(",
"q",
")",
")",
")",
"return",
"shuffleit",
"if",
"iterable",
"is",
"None",
"else",
"shuffleit",
"(",
"iterable",
")"
] |
add example
:param qsize:
:param iterable:
:return:
|
[
"add",
"example",
":",
"param",
"qsize",
":",
":",
"param",
"iterable",
":",
":",
"return",
":"
] |
python
|
valid
| 21.956522 |
SheffieldML/GPyOpt
|
GPyOpt/models/gpmodel.py
|
https://github.com/SheffieldML/GPyOpt/blob/255539dc5927819ca701e44fe3d76cd4864222fa/GPyOpt/models/gpmodel.py#L48-L74
|
def _create_model(self, X, Y):
"""
Creates the model given some input data X and Y.
"""
# --- define kernel
self.input_dim = X.shape[1]
if self.kernel is None:
kern = GPy.kern.Matern52(self.input_dim, variance=1., ARD=self.ARD) #+ GPy.kern.Bias(self.input_dim)
else:
kern = self.kernel
self.kernel = None
# --- define model
noise_var = Y.var()*0.01 if self.noise_var is None else self.noise_var
if not self.sparse:
self.model = GPy.models.GPRegression(X, Y, kernel=kern, noise_var=noise_var)
else:
self.model = GPy.models.SparseGPRegression(X, Y, kernel=kern, num_inducing=self.num_inducing)
# --- restrict variance if exact evaluations of the objective
if self.exact_feval:
self.model.Gaussian_noise.constrain_fixed(1e-6, warning=False)
else:
# --- We make sure we do not get ridiculously small residual noise variance
self.model.Gaussian_noise.constrain_bounded(1e-9, 1e6, warning=False)
|
[
"def",
"_create_model",
"(",
"self",
",",
"X",
",",
"Y",
")",
":",
"# --- define kernel",
"self",
".",
"input_dim",
"=",
"X",
".",
"shape",
"[",
"1",
"]",
"if",
"self",
".",
"kernel",
"is",
"None",
":",
"kern",
"=",
"GPy",
".",
"kern",
".",
"Matern52",
"(",
"self",
".",
"input_dim",
",",
"variance",
"=",
"1.",
",",
"ARD",
"=",
"self",
".",
"ARD",
")",
"#+ GPy.kern.Bias(self.input_dim)",
"else",
":",
"kern",
"=",
"self",
".",
"kernel",
"self",
".",
"kernel",
"=",
"None",
"# --- define model",
"noise_var",
"=",
"Y",
".",
"var",
"(",
")",
"*",
"0.01",
"if",
"self",
".",
"noise_var",
"is",
"None",
"else",
"self",
".",
"noise_var",
"if",
"not",
"self",
".",
"sparse",
":",
"self",
".",
"model",
"=",
"GPy",
".",
"models",
".",
"GPRegression",
"(",
"X",
",",
"Y",
",",
"kernel",
"=",
"kern",
",",
"noise_var",
"=",
"noise_var",
")",
"else",
":",
"self",
".",
"model",
"=",
"GPy",
".",
"models",
".",
"SparseGPRegression",
"(",
"X",
",",
"Y",
",",
"kernel",
"=",
"kern",
",",
"num_inducing",
"=",
"self",
".",
"num_inducing",
")",
"# --- restrict variance if exact evaluations of the objective",
"if",
"self",
".",
"exact_feval",
":",
"self",
".",
"model",
".",
"Gaussian_noise",
".",
"constrain_fixed",
"(",
"1e-6",
",",
"warning",
"=",
"False",
")",
"else",
":",
"# --- We make sure we do not get ridiculously small residual noise variance",
"self",
".",
"model",
".",
"Gaussian_noise",
".",
"constrain_bounded",
"(",
"1e-9",
",",
"1e6",
",",
"warning",
"=",
"False",
")"
] |
Creates the model given some input data X and Y.
|
[
"Creates",
"the",
"model",
"given",
"some",
"input",
"data",
"X",
"and",
"Y",
"."
] |
python
|
train
| 39.814815 |
pantsbuild/pants
|
src/python/pants/backend/native/config/environment.py
|
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/backend/native/config/environment.py#L89-L122
|
def sequence(self, other, exclude_list_fields=None):
"""Return a copy of this object which combines all the fields common to both `self` and `other`.
List fields will be concatenated.
The return type of this method is the type of `self` (or whatever `.copy()` returns), but the
`other` argument can be any `_ExtensibleAlgebraic` instance.
"""
exclude_list_fields = frozenset(exclude_list_fields or [])
overwrite_kwargs = {}
nonexistent_excluded_fields = exclude_list_fields - self._list_fields
if nonexistent_excluded_fields:
raise self.AlgebraicDataError(
"Fields {} to exclude from a sequence() were not found in this object's list fields: {}. "
"This object is {}, the other object is {}."
.format(nonexistent_excluded_fields, self._list_fields, self, other))
shared_list_fields = (self._list_fields
& other._list_fields
- exclude_list_fields)
if not shared_list_fields:
raise self.AlgebraicDataError(
"Objects to sequence have no shared fields after excluding {}. "
"This object is {}, with list fields: {}. "
"The other object is {}, with list fields: {}."
.format(exclude_list_fields, self, self._list_fields, other, other._list_fields))
for list_field_name in shared_list_fields:
lhs_value = getattr(self, list_field_name)
rhs_value = getattr(other, list_field_name)
overwrite_kwargs[list_field_name] = lhs_value + rhs_value
return self.copy(**overwrite_kwargs)
|
[
"def",
"sequence",
"(",
"self",
",",
"other",
",",
"exclude_list_fields",
"=",
"None",
")",
":",
"exclude_list_fields",
"=",
"frozenset",
"(",
"exclude_list_fields",
"or",
"[",
"]",
")",
"overwrite_kwargs",
"=",
"{",
"}",
"nonexistent_excluded_fields",
"=",
"exclude_list_fields",
"-",
"self",
".",
"_list_fields",
"if",
"nonexistent_excluded_fields",
":",
"raise",
"self",
".",
"AlgebraicDataError",
"(",
"\"Fields {} to exclude from a sequence() were not found in this object's list fields: {}. \"",
"\"This object is {}, the other object is {}.\"",
".",
"format",
"(",
"nonexistent_excluded_fields",
",",
"self",
".",
"_list_fields",
",",
"self",
",",
"other",
")",
")",
"shared_list_fields",
"=",
"(",
"self",
".",
"_list_fields",
"&",
"other",
".",
"_list_fields",
"-",
"exclude_list_fields",
")",
"if",
"not",
"shared_list_fields",
":",
"raise",
"self",
".",
"AlgebraicDataError",
"(",
"\"Objects to sequence have no shared fields after excluding {}. \"",
"\"This object is {}, with list fields: {}. \"",
"\"The other object is {}, with list fields: {}.\"",
".",
"format",
"(",
"exclude_list_fields",
",",
"self",
",",
"self",
".",
"_list_fields",
",",
"other",
",",
"other",
".",
"_list_fields",
")",
")",
"for",
"list_field_name",
"in",
"shared_list_fields",
":",
"lhs_value",
"=",
"getattr",
"(",
"self",
",",
"list_field_name",
")",
"rhs_value",
"=",
"getattr",
"(",
"other",
",",
"list_field_name",
")",
"overwrite_kwargs",
"[",
"list_field_name",
"]",
"=",
"lhs_value",
"+",
"rhs_value",
"return",
"self",
".",
"copy",
"(",
"*",
"*",
"overwrite_kwargs",
")"
] |
Return a copy of this object which combines all the fields common to both `self` and `other`.
List fields will be concatenated.
The return type of this method is the type of `self` (or whatever `.copy()` returns), but the
`other` argument can be any `_ExtensibleAlgebraic` instance.
|
[
"Return",
"a",
"copy",
"of",
"this",
"object",
"which",
"combines",
"all",
"the",
"fields",
"common",
"to",
"both",
"self",
"and",
"other",
"."
] |
python
|
train
| 45.029412 |
annoviko/pyclustering
|
pyclustering/utils/__init__.py
|
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/utils/__init__.py#L201-L230
|
def average_neighbor_distance(points, num_neigh):
"""!
@brief Returns average distance for establish links between specified number of nearest neighbors.
@param[in] points (list): Input data, list of points where each point represented by list.
@param[in] num_neigh (uint): Number of neighbors that should be used for distance calculation.
@return (double) Average distance for establish links between 'num_neigh' in data set 'points'.
"""
if num_neigh > len(points) - 1:
raise NameError('Impossible to calculate average distance to neighbors when number of object is less than number of neighbors.');
dist_matrix = [ [ 0.0 for i in range(len(points)) ] for j in range(len(points)) ];
for i in range(0, len(points), 1):
for j in range(i + 1, len(points), 1):
distance = euclidean_distance(points[i], points[j]);
dist_matrix[i][j] = distance;
dist_matrix[j][i] = distance;
dist_matrix[i] = sorted(dist_matrix[i]);
total_distance = 0;
for i in range(0, len(points), 1):
# start from 0 - first element is distance to itself.
for j in range(0, num_neigh, 1):
total_distance += dist_matrix[i][j + 1];
return ( total_distance / (num_neigh * len(points)) );
|
[
"def",
"average_neighbor_distance",
"(",
"points",
",",
"num_neigh",
")",
":",
"if",
"num_neigh",
">",
"len",
"(",
"points",
")",
"-",
"1",
":",
"raise",
"NameError",
"(",
"'Impossible to calculate average distance to neighbors when number of object is less than number of neighbors.'",
")",
"dist_matrix",
"=",
"[",
"[",
"0.0",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"points",
")",
")",
"]",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"points",
")",
")",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"points",
")",
",",
"1",
")",
":",
"for",
"j",
"in",
"range",
"(",
"i",
"+",
"1",
",",
"len",
"(",
"points",
")",
",",
"1",
")",
":",
"distance",
"=",
"euclidean_distance",
"(",
"points",
"[",
"i",
"]",
",",
"points",
"[",
"j",
"]",
")",
"dist_matrix",
"[",
"i",
"]",
"[",
"j",
"]",
"=",
"distance",
"dist_matrix",
"[",
"j",
"]",
"[",
"i",
"]",
"=",
"distance",
"dist_matrix",
"[",
"i",
"]",
"=",
"sorted",
"(",
"dist_matrix",
"[",
"i",
"]",
")",
"total_distance",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"points",
")",
",",
"1",
")",
":",
"# start from 0 - first element is distance to itself.\r",
"for",
"j",
"in",
"range",
"(",
"0",
",",
"num_neigh",
",",
"1",
")",
":",
"total_distance",
"+=",
"dist_matrix",
"[",
"i",
"]",
"[",
"j",
"+",
"1",
"]",
"return",
"(",
"total_distance",
"/",
"(",
"num_neigh",
"*",
"len",
"(",
"points",
")",
")",
")"
] |
!
@brief Returns average distance for establish links between specified number of nearest neighbors.
@param[in] points (list): Input data, list of points where each point represented by list.
@param[in] num_neigh (uint): Number of neighbors that should be used for distance calculation.
@return (double) Average distance for establish links between 'num_neigh' in data set 'points'.
|
[
"!"
] |
python
|
valid
| 44.6 |
cloudtools/stacker
|
stacker/context.py
|
https://github.com/cloudtools/stacker/blob/ad6013a03a560c46ba3c63c4d153336273e6da5d/stacker/context.py#L186-L204
|
def set_hook_data(self, key, data):
"""Set hook data for the given key.
Args:
key(str): The key to store the hook data in.
data(:class:`collections.Mapping`): A dictionary of data to store,
as returned from a hook.
"""
if not isinstance(data, collections.Mapping):
raise ValueError("Hook (key: %s) data must be an instance of "
"collections.Mapping (a dictionary for "
"example)." % key)
if key in self.hook_data:
raise KeyError("Hook data for key %s already exists, each hook "
"must have a unique data_key.", key)
self.hook_data[key] = data
|
[
"def",
"set_hook_data",
"(",
"self",
",",
"key",
",",
"data",
")",
":",
"if",
"not",
"isinstance",
"(",
"data",
",",
"collections",
".",
"Mapping",
")",
":",
"raise",
"ValueError",
"(",
"\"Hook (key: %s) data must be an instance of \"",
"\"collections.Mapping (a dictionary for \"",
"\"example).\"",
"%",
"key",
")",
"if",
"key",
"in",
"self",
".",
"hook_data",
":",
"raise",
"KeyError",
"(",
"\"Hook data for key %s already exists, each hook \"",
"\"must have a unique data_key.\"",
",",
"key",
")",
"self",
".",
"hook_data",
"[",
"key",
"]",
"=",
"data"
] |
Set hook data for the given key.
Args:
key(str): The key to store the hook data in.
data(:class:`collections.Mapping`): A dictionary of data to store,
as returned from a hook.
|
[
"Set",
"hook",
"data",
"for",
"the",
"given",
"key",
"."
] |
python
|
train
| 38.157895 |
artefactual-labs/agentarchives
|
agentarchives/atom/client.py
|
https://github.com/artefactual-labs/agentarchives/blob/af19ade56a90c64069cf46b50972fe72b6f10a45/agentarchives/atom/client.py#L749-L757
|
def delete_record(self, record_id):
"""
Delete a record with record_id.
"""
self._delete(
urljoin(self.base_url, "informationobjects/{}".format(record_id)),
expected_response=204,
)
return {"status": "Deleted"}
|
[
"def",
"delete_record",
"(",
"self",
",",
"record_id",
")",
":",
"self",
".",
"_delete",
"(",
"urljoin",
"(",
"self",
".",
"base_url",
",",
"\"informationobjects/{}\"",
".",
"format",
"(",
"record_id",
")",
")",
",",
"expected_response",
"=",
"204",
",",
")",
"return",
"{",
"\"status\"",
":",
"\"Deleted\"",
"}"
] |
Delete a record with record_id.
|
[
"Delete",
"a",
"record",
"with",
"record_id",
"."
] |
python
|
train
| 30.444444 |
bukun/TorCMS
|
torcms/handlers/reply_handler.py
|
https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/reply_handler.py#L66-L80
|
def get_by_id(self, reply_id):
'''
Get the reply by id.
'''
reply = MReply.get_by_uid(reply_id)
logger.info('get_reply: {0}'.format(reply_id))
self.render('misc/reply/show_reply.html',
reply=reply,
username=reply.user_name,
date=reply.date,
vote=reply.vote,
uid=reply.uid,
userinfo=self.userinfo,
kwd={})
|
[
"def",
"get_by_id",
"(",
"self",
",",
"reply_id",
")",
":",
"reply",
"=",
"MReply",
".",
"get_by_uid",
"(",
"reply_id",
")",
"logger",
".",
"info",
"(",
"'get_reply: {0}'",
".",
"format",
"(",
"reply_id",
")",
")",
"self",
".",
"render",
"(",
"'misc/reply/show_reply.html'",
",",
"reply",
"=",
"reply",
",",
"username",
"=",
"reply",
".",
"user_name",
",",
"date",
"=",
"reply",
".",
"date",
",",
"vote",
"=",
"reply",
".",
"vote",
",",
"uid",
"=",
"reply",
".",
"uid",
",",
"userinfo",
"=",
"self",
".",
"userinfo",
",",
"kwd",
"=",
"{",
"}",
")"
] |
Get the reply by id.
|
[
"Get",
"the",
"reply",
"by",
"id",
"."
] |
python
|
train
| 31.933333 |
pantsbuild/pants
|
src/python/pants/build_graph/target.py
|
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/build_graph/target.py#L685-L736
|
def strict_dependencies(self, dep_context):
"""
:param dep_context: A DependencyContext with configuration for the request.
:return: targets that this target "strictly" depends on. This set of dependencies contains
only directly declared dependencies, with two exceptions:
1) aliases are expanded transitively
2) the strict_dependencies of targets exported targets exported by
strict_dependencies (transitively).
:rtype: list of Target
"""
strict_deps = self._cached_strict_dependencies_map.get(dep_context, None)
if strict_deps is None:
default_predicate = self._closure_dep_predicate({self}, **dep_context.target_closure_kwargs)
# TODO(#5977): this branch needs testing!
if not default_predicate:
def default_predicate(*args, **kwargs):
return True
def dep_predicate(source, dependency):
if not default_predicate(source, dependency):
return False
# Always expand aliases.
if type(source) in dep_context.alias_types:
return True
# Traverse other dependencies if they are exported.
if source._dep_is_exported(dependency):
return True
return False
dep_addresses = [d.address for d in self.dependencies
if default_predicate(self, d)
]
result = self._build_graph.transitive_subgraph_of_addresses_bfs(
addresses=dep_addresses,
dep_predicate=dep_predicate
)
strict_deps = OrderedSet()
for declared in result:
if type(declared) in dep_context.alias_types:
continue
if isinstance(declared, dep_context.types_with_closure):
strict_deps.update(declared.closure(
bfs=True,
**dep_context.target_closure_kwargs))
strict_deps.add(declared)
strict_deps = list(strict_deps)
self._cached_strict_dependencies_map[dep_context] = strict_deps
return strict_deps
|
[
"def",
"strict_dependencies",
"(",
"self",
",",
"dep_context",
")",
":",
"strict_deps",
"=",
"self",
".",
"_cached_strict_dependencies_map",
".",
"get",
"(",
"dep_context",
",",
"None",
")",
"if",
"strict_deps",
"is",
"None",
":",
"default_predicate",
"=",
"self",
".",
"_closure_dep_predicate",
"(",
"{",
"self",
"}",
",",
"*",
"*",
"dep_context",
".",
"target_closure_kwargs",
")",
"# TODO(#5977): this branch needs testing!",
"if",
"not",
"default_predicate",
":",
"def",
"default_predicate",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"True",
"def",
"dep_predicate",
"(",
"source",
",",
"dependency",
")",
":",
"if",
"not",
"default_predicate",
"(",
"source",
",",
"dependency",
")",
":",
"return",
"False",
"# Always expand aliases.",
"if",
"type",
"(",
"source",
")",
"in",
"dep_context",
".",
"alias_types",
":",
"return",
"True",
"# Traverse other dependencies if they are exported.",
"if",
"source",
".",
"_dep_is_exported",
"(",
"dependency",
")",
":",
"return",
"True",
"return",
"False",
"dep_addresses",
"=",
"[",
"d",
".",
"address",
"for",
"d",
"in",
"self",
".",
"dependencies",
"if",
"default_predicate",
"(",
"self",
",",
"d",
")",
"]",
"result",
"=",
"self",
".",
"_build_graph",
".",
"transitive_subgraph_of_addresses_bfs",
"(",
"addresses",
"=",
"dep_addresses",
",",
"dep_predicate",
"=",
"dep_predicate",
")",
"strict_deps",
"=",
"OrderedSet",
"(",
")",
"for",
"declared",
"in",
"result",
":",
"if",
"type",
"(",
"declared",
")",
"in",
"dep_context",
".",
"alias_types",
":",
"continue",
"if",
"isinstance",
"(",
"declared",
",",
"dep_context",
".",
"types_with_closure",
")",
":",
"strict_deps",
".",
"update",
"(",
"declared",
".",
"closure",
"(",
"bfs",
"=",
"True",
",",
"*",
"*",
"dep_context",
".",
"target_closure_kwargs",
")",
")",
"strict_deps",
".",
"add",
"(",
"declared",
")",
"strict_deps",
"=",
"list",
"(",
"strict_deps",
")",
"self",
".",
"_cached_strict_dependencies_map",
"[",
"dep_context",
"]",
"=",
"strict_deps",
"return",
"strict_deps"
] |
:param dep_context: A DependencyContext with configuration for the request.
:return: targets that this target "strictly" depends on. This set of dependencies contains
only directly declared dependencies, with two exceptions:
1) aliases are expanded transitively
2) the strict_dependencies of targets exported targets exported by
strict_dependencies (transitively).
:rtype: list of Target
|
[
":",
"param",
"dep_context",
":",
"A",
"DependencyContext",
"with",
"configuration",
"for",
"the",
"request",
".",
":",
"return",
":",
"targets",
"that",
"this",
"target",
"strictly",
"depends",
"on",
".",
"This",
"set",
"of",
"dependencies",
"contains",
"only",
"directly",
"declared",
"dependencies",
"with",
"two",
"exceptions",
":",
"1",
")",
"aliases",
"are",
"expanded",
"transitively",
"2",
")",
"the",
"strict_dependencies",
"of",
"targets",
"exported",
"targets",
"exported",
"by",
"strict_dependencies",
"(",
"transitively",
")",
".",
":",
"rtype",
":",
"list",
"of",
"Target"
] |
python
|
train
| 37.461538 |
unt-libraries/edtf-validate
|
edtf_validate/valid_edtf.py
|
https://github.com/unt-libraries/edtf-validate/blob/d6d63141919a66aea4ff1c31fa0cb8ff744ef9d9/edtf_validate/valid_edtf.py#L254-L261
|
def replace_u_start_month(month):
"""Find the earliest legitimate month."""
month = month.lstrip('-')
if month == 'uu' or month == '0u':
return '01'
if month == 'u0':
return '10'
return month.replace('u', '0')
|
[
"def",
"replace_u_start_month",
"(",
"month",
")",
":",
"month",
"=",
"month",
".",
"lstrip",
"(",
"'-'",
")",
"if",
"month",
"==",
"'uu'",
"or",
"month",
"==",
"'0u'",
":",
"return",
"'01'",
"if",
"month",
"==",
"'u0'",
":",
"return",
"'10'",
"return",
"month",
".",
"replace",
"(",
"'u'",
",",
"'0'",
")"
] |
Find the earliest legitimate month.
|
[
"Find",
"the",
"earliest",
"legitimate",
"month",
"."
] |
python
|
train
| 29.75 |
mapillary/mapillary_tools
|
mapillary_tools/exif_read.py
|
https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/exif_read.py#L136-L148
|
def extract_altitude(self):
'''
Extract altitude
'''
altitude_ref = {
0: 1,
1: -1}
fields = ['GPS GPSAltitude', 'EXIF GPS GPSAltitude']
refs = ['GPS GPSAltitudeRef', 'EXIF GPS GPSAltitudeRef']
altitude, _ = self._extract_alternative_fields(fields, 0, float)
ref = 0 if not any([True for x in refs if x in self.tags]) else [
self.tags[x].values for x in refs if x in self.tags][0][0]
return altitude * altitude_ref[ref]
|
[
"def",
"extract_altitude",
"(",
"self",
")",
":",
"altitude_ref",
"=",
"{",
"0",
":",
"1",
",",
"1",
":",
"-",
"1",
"}",
"fields",
"=",
"[",
"'GPS GPSAltitude'",
",",
"'EXIF GPS GPSAltitude'",
"]",
"refs",
"=",
"[",
"'GPS GPSAltitudeRef'",
",",
"'EXIF GPS GPSAltitudeRef'",
"]",
"altitude",
",",
"_",
"=",
"self",
".",
"_extract_alternative_fields",
"(",
"fields",
",",
"0",
",",
"float",
")",
"ref",
"=",
"0",
"if",
"not",
"any",
"(",
"[",
"True",
"for",
"x",
"in",
"refs",
"if",
"x",
"in",
"self",
".",
"tags",
"]",
")",
"else",
"[",
"self",
".",
"tags",
"[",
"x",
"]",
".",
"values",
"for",
"x",
"in",
"refs",
"if",
"x",
"in",
"self",
".",
"tags",
"]",
"[",
"0",
"]",
"[",
"0",
"]",
"return",
"altitude",
"*",
"altitude_ref",
"[",
"ref",
"]"
] |
Extract altitude
|
[
"Extract",
"altitude"
] |
python
|
train
| 39.538462 |
StackStorm/pybind
|
pybind/nos/v7_2_0/firmware/peripheral_update/__init__.py
|
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v7_2_0/firmware/peripheral_update/__init__.py#L92-L113
|
def _set_microcode(self, v, load=False):
"""
Setter method for microcode, mapped from YANG variable /firmware/peripheral_update/microcode (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_microcode is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_microcode() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=microcode.microcode, is_container='container', presence=False, yang_name="microcode", rest_name="microcode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Microcode image'}}, namespace='urn:brocade.com:mgmt:brocade-firmware', defining_module='brocade-firmware', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """microcode must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=microcode.microcode, is_container='container', presence=False, yang_name="microcode", rest_name="microcode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Microcode image'}}, namespace='urn:brocade.com:mgmt:brocade-firmware', defining_module='brocade-firmware', yang_type='container', is_config=True)""",
})
self.__microcode = t
if hasattr(self, '_set'):
self._set()
|
[
"def",
"_set_microcode",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"microcode",
".",
"microcode",
",",
"is_container",
"=",
"'container'",
",",
"presence",
"=",
"False",
",",
"yang_name",
"=",
"\"microcode\"",
",",
"rest_name",
"=",
"\"microcode\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"True",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'info'",
":",
"u'Microcode image'",
"}",
"}",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-firmware'",
",",
"defining_module",
"=",
"'brocade-firmware'",
",",
"yang_type",
"=",
"'container'",
",",
"is_config",
"=",
"True",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"microcode must be of a type compatible with container\"\"\"",
",",
"'defined-type'",
":",
"\"container\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=microcode.microcode, is_container='container', presence=False, yang_name=\"microcode\", rest_name=\"microcode\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Microcode image'}}, namespace='urn:brocade.com:mgmt:brocade-firmware', defining_module='brocade-firmware', yang_type='container', is_config=True)\"\"\"",
",",
"}",
")",
"self",
".",
"__microcode",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
] |
Setter method for microcode, mapped from YANG variable /firmware/peripheral_update/microcode (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_microcode is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_microcode() directly.
|
[
"Setter",
"method",
"for",
"microcode",
"mapped",
"from",
"YANG",
"variable",
"/",
"firmware",
"/",
"peripheral_update",
"/",
"microcode",
"(",
"container",
")",
"If",
"this",
"variable",
"is",
"read",
"-",
"only",
"(",
"config",
":",
"false",
")",
"in",
"the",
"source",
"YANG",
"file",
"then",
"_set_microcode",
"is",
"considered",
"as",
"a",
"private",
"method",
".",
"Backends",
"looking",
"to",
"populate",
"this",
"variable",
"should",
"do",
"so",
"via",
"calling",
"thisObj",
".",
"_set_microcode",
"()",
"directly",
"."
] |
python
|
train
| 72 |
frejanordsiek/hdf5storage
|
hdf5storage/utilities.py
|
https://github.com/frejanordsiek/hdf5storage/blob/539275141dd3a4efbbbfd9bdb978f3ed59e3f05d/hdf5storage/utilities.py#L1036-L1174
|
def convert_to_numpy_bytes(data, length=None):
""" Decodes data to Numpy UTF-8 econded string (bytes\_).
Decodes `data` to a Numpy UTF-8 encoded string, which is
``numpy.bytes_``, or an array of them in which case it will be ASCII
encoded instead. If it can't be decoded, it is returned as
is. Unsigned integers, Python string types (``str``, ``bytes``), and
``numpy.unicode_`` (UTF-32) are supported.
For an array of unsigned integers, it may be desirable to make an
array with strings of some specified length as opposed to an array
of the same size with each element being a one element string. This
naturally arises when converting strings to unsigned integer types
in the first place, so it needs to be reversible. The `length`
parameter specifies how many to group together into a string
(desired string length). For 1d arrays, this is along its only
dimension. For higher dimensional arrays, it is done along each row
(across columns). So, for a 3x10x5 input array of uints and a
`length` of 5, the output array would be a 3x2x5 of 5 element
strings.
Parameters
----------
data : some type
Data decode into a Numpy UTF-8 encoded string/s.
length : int or None, optional
The number of consecutive elements (in the case of unsigned
integer `data`) to compose each string in the output array from.
``None`` indicates the full amount for a 1d array or the number
of columns (full length of row) for a higher dimension array.
Returns
-------
b : numpy.bytes\_ or numpy.ndarray of numpy.bytes\_ or data
If `data` can be decoded into a ``numpy.bytes_`` or a
``numpy.ndarray`` of them, the decoded version is returned.
Otherwise, `data` is returned unchanged.
See Also
--------
convert_to_str
convert_to_numpy_str
numpy.bytes\_
"""
# The method of conversion depends on its type.
if isinstance(data, np.bytes_) or (isinstance(data, np.ndarray) \
and data.dtype.char == 'S'):
# It is already an np.bytes_ or array of them, so nothing needs
# to be done.
return data
elif isinstance(data, (bytes, bytearray)):
# Easily converted through constructor.
return np.bytes_(data)
elif (sys.hexversion >= 0x03000000 and isinstance(data, str)) \
or (sys.hexversion < 0x03000000 \
and isinstance(data, unicode)):
return np.bytes_(data.encode('UTF-8'))
elif isinstance(data, (np.uint16, np.uint32)):
# They are single UTF-16 or UTF-32 scalars, and are easily
# converted to a UTF-8 string and then passed through the
# constructor.
return np.bytes_(convert_to_str(data).encode('UTF-8'))
elif isinstance(data, np.uint8):
# It is just the uint8 version of the character, so it just
# needs to be have the dtype essentially changed by having its
# bytes read into ndarray.
return np.ndarray(shape=tuple(), dtype='S1',
buffer=data.flatten().tostring())[()]
elif isinstance(data, np.ndarray) and data.dtype.char == 'U':
# We just need to convert it elementwise.
new_data = np.zeros(shape=data.shape,
dtype='S' + str(data.dtype.itemsize))
for index, x in np.ndenumerate(data):
new_data[index] = np.bytes_(x.encode('UTF-8'))
return new_data
elif isinstance(data, np.ndarray) \
and data.dtype.name in ('uint8', 'uint16', 'uint32'):
# It is an ndarray of some uint type. How it is converted
# depends on its shape. If its shape is just (), then it is just
# a scalar wrapped in an array, which can be converted by
# recursing the scalar value back into this function.
shape = list(data.shape)
if len(shape) == 0:
return convert_to_numpy_bytes(data[()])
# As there are more than one element, it gets a bit more
# complicated. We need to take the subarrays of the specified
# length along columns (1D arrays will be treated as row arrays
# here), each of those converted to an str_ scalar (normal
# string) and stuffed into a new array.
#
# If the length was not given, it needs to be set to full. Then
# the shape of the new array needs to be calculated (divide the
# appropriate dimension, which depends on the number of
# dimentions).
if len(shape) == 1:
if length is None:
length2 = shape[0]
new_shape = (shape[0],)
else:
length2 = length
new_shape = (shape[0]//length2,)
else:
if length is None:
length2 = shape[-1]
else:
length2 = length
new_shape = copy.deepcopy(shape)
new_shape[-1] //= length2
# The new array can be made as all zeros (nulls) with enough
# padding to hold everything (dtype='UL' where 'L' is the
# length). It will start out as a 1d array and be reshaped into
# the proper shape later (makes indexing easier).
new_data = np.zeros(shape=(np.prod(new_shape),),
dtype='S'+str(length2))
# With data flattened into a 1d array, we just need to take
# length sized chunks, convert them (if they are uint8 or 16,
# then decode to str first, if they are uint32, put them as an
# input buffer for an ndarray of type 'U').
data = data.flatten()
for i in range(0, new_data.shape[0]):
chunk = data[(i*length2):((i+1)*length2)]
if data.dtype.name == 'uint8':
new_data[i] = np.ndarray(shape=tuple(),
dtype=new_data.dtype,
buffer=chunk.tostring())[()]
else:
new_data[i] = np.bytes_( \
convert_to_str(chunk).encode('UTF-8'))
# Only thing is left is to reshape it.
return new_data.reshape(tuple(new_shape))
else:
# Couldn't figure out what it is, so nothing can be done but
# return it as is.
return data
|
[
"def",
"convert_to_numpy_bytes",
"(",
"data",
",",
"length",
"=",
"None",
")",
":",
"# The method of conversion depends on its type.",
"if",
"isinstance",
"(",
"data",
",",
"np",
".",
"bytes_",
")",
"or",
"(",
"isinstance",
"(",
"data",
",",
"np",
".",
"ndarray",
")",
"and",
"data",
".",
"dtype",
".",
"char",
"==",
"'S'",
")",
":",
"# It is already an np.bytes_ or array of them, so nothing needs",
"# to be done.",
"return",
"data",
"elif",
"isinstance",
"(",
"data",
",",
"(",
"bytes",
",",
"bytearray",
")",
")",
":",
"# Easily converted through constructor.",
"return",
"np",
".",
"bytes_",
"(",
"data",
")",
"elif",
"(",
"sys",
".",
"hexversion",
">=",
"0x03000000",
"and",
"isinstance",
"(",
"data",
",",
"str",
")",
")",
"or",
"(",
"sys",
".",
"hexversion",
"<",
"0x03000000",
"and",
"isinstance",
"(",
"data",
",",
"unicode",
")",
")",
":",
"return",
"np",
".",
"bytes_",
"(",
"data",
".",
"encode",
"(",
"'UTF-8'",
")",
")",
"elif",
"isinstance",
"(",
"data",
",",
"(",
"np",
".",
"uint16",
",",
"np",
".",
"uint32",
")",
")",
":",
"# They are single UTF-16 or UTF-32 scalars, and are easily",
"# converted to a UTF-8 string and then passed through the",
"# constructor.",
"return",
"np",
".",
"bytes_",
"(",
"convert_to_str",
"(",
"data",
")",
".",
"encode",
"(",
"'UTF-8'",
")",
")",
"elif",
"isinstance",
"(",
"data",
",",
"np",
".",
"uint8",
")",
":",
"# It is just the uint8 version of the character, so it just",
"# needs to be have the dtype essentially changed by having its",
"# bytes read into ndarray.",
"return",
"np",
".",
"ndarray",
"(",
"shape",
"=",
"tuple",
"(",
")",
",",
"dtype",
"=",
"'S1'",
",",
"buffer",
"=",
"data",
".",
"flatten",
"(",
")",
".",
"tostring",
"(",
")",
")",
"[",
"(",
")",
"]",
"elif",
"isinstance",
"(",
"data",
",",
"np",
".",
"ndarray",
")",
"and",
"data",
".",
"dtype",
".",
"char",
"==",
"'U'",
":",
"# We just need to convert it elementwise.",
"new_data",
"=",
"np",
".",
"zeros",
"(",
"shape",
"=",
"data",
".",
"shape",
",",
"dtype",
"=",
"'S'",
"+",
"str",
"(",
"data",
".",
"dtype",
".",
"itemsize",
")",
")",
"for",
"index",
",",
"x",
"in",
"np",
".",
"ndenumerate",
"(",
"data",
")",
":",
"new_data",
"[",
"index",
"]",
"=",
"np",
".",
"bytes_",
"(",
"x",
".",
"encode",
"(",
"'UTF-8'",
")",
")",
"return",
"new_data",
"elif",
"isinstance",
"(",
"data",
",",
"np",
".",
"ndarray",
")",
"and",
"data",
".",
"dtype",
".",
"name",
"in",
"(",
"'uint8'",
",",
"'uint16'",
",",
"'uint32'",
")",
":",
"# It is an ndarray of some uint type. How it is converted",
"# depends on its shape. If its shape is just (), then it is just",
"# a scalar wrapped in an array, which can be converted by",
"# recursing the scalar value back into this function.",
"shape",
"=",
"list",
"(",
"data",
".",
"shape",
")",
"if",
"len",
"(",
"shape",
")",
"==",
"0",
":",
"return",
"convert_to_numpy_bytes",
"(",
"data",
"[",
"(",
")",
"]",
")",
"# As there are more than one element, it gets a bit more",
"# complicated. We need to take the subarrays of the specified",
"# length along columns (1D arrays will be treated as row arrays",
"# here), each of those converted to an str_ scalar (normal",
"# string) and stuffed into a new array.",
"#",
"# If the length was not given, it needs to be set to full. Then",
"# the shape of the new array needs to be calculated (divide the",
"# appropriate dimension, which depends on the number of",
"# dimentions).",
"if",
"len",
"(",
"shape",
")",
"==",
"1",
":",
"if",
"length",
"is",
"None",
":",
"length2",
"=",
"shape",
"[",
"0",
"]",
"new_shape",
"=",
"(",
"shape",
"[",
"0",
"]",
",",
")",
"else",
":",
"length2",
"=",
"length",
"new_shape",
"=",
"(",
"shape",
"[",
"0",
"]",
"//",
"length2",
",",
")",
"else",
":",
"if",
"length",
"is",
"None",
":",
"length2",
"=",
"shape",
"[",
"-",
"1",
"]",
"else",
":",
"length2",
"=",
"length",
"new_shape",
"=",
"copy",
".",
"deepcopy",
"(",
"shape",
")",
"new_shape",
"[",
"-",
"1",
"]",
"//=",
"length2",
"# The new array can be made as all zeros (nulls) with enough",
"# padding to hold everything (dtype='UL' where 'L' is the",
"# length). It will start out as a 1d array and be reshaped into",
"# the proper shape later (makes indexing easier).",
"new_data",
"=",
"np",
".",
"zeros",
"(",
"shape",
"=",
"(",
"np",
".",
"prod",
"(",
"new_shape",
")",
",",
")",
",",
"dtype",
"=",
"'S'",
"+",
"str",
"(",
"length2",
")",
")",
"# With data flattened into a 1d array, we just need to take",
"# length sized chunks, convert them (if they are uint8 or 16,",
"# then decode to str first, if they are uint32, put them as an",
"# input buffer for an ndarray of type 'U').",
"data",
"=",
"data",
".",
"flatten",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"new_data",
".",
"shape",
"[",
"0",
"]",
")",
":",
"chunk",
"=",
"data",
"[",
"(",
"i",
"*",
"length2",
")",
":",
"(",
"(",
"i",
"+",
"1",
")",
"*",
"length2",
")",
"]",
"if",
"data",
".",
"dtype",
".",
"name",
"==",
"'uint8'",
":",
"new_data",
"[",
"i",
"]",
"=",
"np",
".",
"ndarray",
"(",
"shape",
"=",
"tuple",
"(",
")",
",",
"dtype",
"=",
"new_data",
".",
"dtype",
",",
"buffer",
"=",
"chunk",
".",
"tostring",
"(",
")",
")",
"[",
"(",
")",
"]",
"else",
":",
"new_data",
"[",
"i",
"]",
"=",
"np",
".",
"bytes_",
"(",
"convert_to_str",
"(",
"chunk",
")",
".",
"encode",
"(",
"'UTF-8'",
")",
")",
"# Only thing is left is to reshape it.",
"return",
"new_data",
".",
"reshape",
"(",
"tuple",
"(",
"new_shape",
")",
")",
"else",
":",
"# Couldn't figure out what it is, so nothing can be done but",
"# return it as is.",
"return",
"data"
] |
Decodes data to Numpy UTF-8 econded string (bytes\_).
Decodes `data` to a Numpy UTF-8 encoded string, which is
``numpy.bytes_``, or an array of them in which case it will be ASCII
encoded instead. If it can't be decoded, it is returned as
is. Unsigned integers, Python string types (``str``, ``bytes``), and
``numpy.unicode_`` (UTF-32) are supported.
For an array of unsigned integers, it may be desirable to make an
array with strings of some specified length as opposed to an array
of the same size with each element being a one element string. This
naturally arises when converting strings to unsigned integer types
in the first place, so it needs to be reversible. The `length`
parameter specifies how many to group together into a string
(desired string length). For 1d arrays, this is along its only
dimension. For higher dimensional arrays, it is done along each row
(across columns). So, for a 3x10x5 input array of uints and a
`length` of 5, the output array would be a 3x2x5 of 5 element
strings.
Parameters
----------
data : some type
Data decode into a Numpy UTF-8 encoded string/s.
length : int or None, optional
The number of consecutive elements (in the case of unsigned
integer `data`) to compose each string in the output array from.
``None`` indicates the full amount for a 1d array or the number
of columns (full length of row) for a higher dimension array.
Returns
-------
b : numpy.bytes\_ or numpy.ndarray of numpy.bytes\_ or data
If `data` can be decoded into a ``numpy.bytes_`` or a
``numpy.ndarray`` of them, the decoded version is returned.
Otherwise, `data` is returned unchanged.
See Also
--------
convert_to_str
convert_to_numpy_str
numpy.bytes\_
|
[
"Decodes",
"data",
"to",
"Numpy",
"UTF",
"-",
"8",
"econded",
"string",
"(",
"bytes",
"\\",
"_",
")",
"."
] |
python
|
train
| 44.517986 |
klmitch/framer
|
framer/framers.py
|
https://github.com/klmitch/framer/blob/bd34cee9737793dab61d1d8973930b64bd08acb4/framer/framers.py#L745-L762
|
def to_bytes(self, frame, state):
"""
Convert a single frame into bytes that can be transmitted on
the stream.
:param frame: The frame to convert. Should be the same type
of object returned by ``to_frame()``.
:param state: An instance of ``FramerState``. This object may
be used to track information across calls to the
method.
:returns: Bytes that may be transmitted on the stream.
"""
# Generate and return the frame
return (self.begin +
self.nop.join(six.binary_type(frame).split(self.prefix)) +
self.end)
|
[
"def",
"to_bytes",
"(",
"self",
",",
"frame",
",",
"state",
")",
":",
"# Generate and return the frame",
"return",
"(",
"self",
".",
"begin",
"+",
"self",
".",
"nop",
".",
"join",
"(",
"six",
".",
"binary_type",
"(",
"frame",
")",
".",
"split",
"(",
"self",
".",
"prefix",
")",
")",
"+",
"self",
".",
"end",
")"
] |
Convert a single frame into bytes that can be transmitted on
the stream.
:param frame: The frame to convert. Should be the same type
of object returned by ``to_frame()``.
:param state: An instance of ``FramerState``. This object may
be used to track information across calls to the
method.
:returns: Bytes that may be transmitted on the stream.
|
[
"Convert",
"a",
"single",
"frame",
"into",
"bytes",
"that",
"can",
"be",
"transmitted",
"on",
"the",
"stream",
"."
] |
python
|
train
| 37 |
ryanpetrello/python-zombie
|
zombie/proxy/client.py
|
https://github.com/ryanpetrello/python-zombie/blob/638916572d8ee5ebbdb2dcfc5000a952e99f280f/zombie/proxy/client.py#L25-L36
|
def encode_args(args, extra=False):
"""
Encode a list of arguments
"""
if not args:
return ''
methodargs = ', '.join([encode(a) for a in args])
if extra:
methodargs += ', '
return methodargs
|
[
"def",
"encode_args",
"(",
"args",
",",
"extra",
"=",
"False",
")",
":",
"if",
"not",
"args",
":",
"return",
"''",
"methodargs",
"=",
"', '",
".",
"join",
"(",
"[",
"encode",
"(",
"a",
")",
"for",
"a",
"in",
"args",
"]",
")",
"if",
"extra",
":",
"methodargs",
"+=",
"', '",
"return",
"methodargs"
] |
Encode a list of arguments
|
[
"Encode",
"a",
"list",
"of",
"arguments"
] |
python
|
train
| 18.75 |
pgmpy/pgmpy
|
pgmpy/factors/continuous/ContinuousFactor.py
|
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/factors/continuous/ContinuousFactor.py#L151-L180
|
def discretize(self, method, *args, **kwargs):
"""
Discretizes the continuous distribution into discrete
probability masses using various methods.
Parameters
----------
method : A Discretizer Class from pgmpy.discretize
*args, **kwargs:
The parameters to be given to the Discretizer Class.
Returns
-------
An n-D array or a DiscreteFactor object according to the discretiztion
method used.
Examples
--------
>>> import numpy as np
>>> from scipy.special import beta
>>> from pgmpy.factors.continuous import ContinuousFactor
>>> from pgmpy.factors.continuous import RoundingDiscretizer
>>> def dirichlet_pdf(x, y):
... return (np.power(x, 1) * np.power(y, 2)) / beta(x, y)
>>> dirichlet_factor = ContinuousFactor(['x', 'y'], dirichlet_pdf)
>>> dirichlet_factor.discretize(RoundingDiscretizer, low=1, high=2, cardinality=5)
# TODO: finish this
"""
return method(self, *args, **kwargs).get_discrete_values()
|
[
"def",
"discretize",
"(",
"self",
",",
"method",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"method",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
".",
"get_discrete_values",
"(",
")"
] |
Discretizes the continuous distribution into discrete
probability masses using various methods.
Parameters
----------
method : A Discretizer Class from pgmpy.discretize
*args, **kwargs:
The parameters to be given to the Discretizer Class.
Returns
-------
An n-D array or a DiscreteFactor object according to the discretiztion
method used.
Examples
--------
>>> import numpy as np
>>> from scipy.special import beta
>>> from pgmpy.factors.continuous import ContinuousFactor
>>> from pgmpy.factors.continuous import RoundingDiscretizer
>>> def dirichlet_pdf(x, y):
... return (np.power(x, 1) * np.power(y, 2)) / beta(x, y)
>>> dirichlet_factor = ContinuousFactor(['x', 'y'], dirichlet_pdf)
>>> dirichlet_factor.discretize(RoundingDiscretizer, low=1, high=2, cardinality=5)
# TODO: finish this
|
[
"Discretizes",
"the",
"continuous",
"distribution",
"into",
"discrete",
"probability",
"masses",
"using",
"various",
"methods",
"."
] |
python
|
train
| 36.233333 |
rameshg87/pyremotevbox
|
pyremotevbox/ZSI/wstools/WSDLTools.py
|
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/wstools/WSDLTools.py#L1391-L1404
|
def GetWSAActionInput(operation):
"""Find wsa:Action attribute, and return value or the default."""
attr = operation.input.action
if attr is not None:
return attr
portType = operation.getPortType()
targetNamespace = portType.getTargetNamespace()
ptName = portType.name
msgName = operation.input.name
if not msgName:
msgName = operation.name + 'Request'
if targetNamespace.endswith('/'):
return '%s%s/%s' %(targetNamespace, ptName, msgName)
return '%s/%s/%s' %(targetNamespace, ptName, msgName)
|
[
"def",
"GetWSAActionInput",
"(",
"operation",
")",
":",
"attr",
"=",
"operation",
".",
"input",
".",
"action",
"if",
"attr",
"is",
"not",
"None",
":",
"return",
"attr",
"portType",
"=",
"operation",
".",
"getPortType",
"(",
")",
"targetNamespace",
"=",
"portType",
".",
"getTargetNamespace",
"(",
")",
"ptName",
"=",
"portType",
".",
"name",
"msgName",
"=",
"operation",
".",
"input",
".",
"name",
"if",
"not",
"msgName",
":",
"msgName",
"=",
"operation",
".",
"name",
"+",
"'Request'",
"if",
"targetNamespace",
".",
"endswith",
"(",
"'/'",
")",
":",
"return",
"'%s%s/%s'",
"%",
"(",
"targetNamespace",
",",
"ptName",
",",
"msgName",
")",
"return",
"'%s/%s/%s'",
"%",
"(",
"targetNamespace",
",",
"ptName",
",",
"msgName",
")"
] |
Find wsa:Action attribute, and return value or the default.
|
[
"Find",
"wsa",
":",
"Action",
"attribute",
"and",
"return",
"value",
"or",
"the",
"default",
"."
] |
python
|
train
| 38.857143 |
mar10/wsgidav
|
wsgidav/util.py
|
https://github.com/mar10/wsgidav/blob/cec0d84222fc24bea01be1cea91729001963f172/wsgidav/util.py#L956-L996
|
def get_etag(file_path):
"""Return a strong Entity Tag for a (file)path.
http://www.webdav.org/specs/rfc4918.html#etag
Returns the following as entity tags::
Non-file - md5(pathname)
Win32 - md5(pathname)-lastmodifiedtime-filesize
Others - inode-lastmodifiedtime-filesize
"""
# (At least on Vista) os.path.exists returns False, if a file name contains
# special characters, even if it is correctly UTF-8 encoded.
# So we convert to unicode. On the other hand, md5() needs a byte string.
if compat.is_bytes(file_path):
unicodeFilePath = to_unicode_safe(file_path)
else:
unicodeFilePath = file_path
file_path = file_path.encode("utf8")
if not os.path.isfile(unicodeFilePath):
return md5(file_path).hexdigest()
if sys.platform == "win32":
statresults = os.stat(unicodeFilePath)
return (
md5(file_path).hexdigest()
+ "-"
+ str(statresults[stat.ST_MTIME])
+ "-"
+ str(statresults[stat.ST_SIZE])
)
else:
statresults = os.stat(unicodeFilePath)
return (
str(statresults[stat.ST_INO])
+ "-"
+ str(statresults[stat.ST_MTIME])
+ "-"
+ str(statresults[stat.ST_SIZE])
)
|
[
"def",
"get_etag",
"(",
"file_path",
")",
":",
"# (At least on Vista) os.path.exists returns False, if a file name contains",
"# special characters, even if it is correctly UTF-8 encoded.",
"# So we convert to unicode. On the other hand, md5() needs a byte string.",
"if",
"compat",
".",
"is_bytes",
"(",
"file_path",
")",
":",
"unicodeFilePath",
"=",
"to_unicode_safe",
"(",
"file_path",
")",
"else",
":",
"unicodeFilePath",
"=",
"file_path",
"file_path",
"=",
"file_path",
".",
"encode",
"(",
"\"utf8\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"unicodeFilePath",
")",
":",
"return",
"md5",
"(",
"file_path",
")",
".",
"hexdigest",
"(",
")",
"if",
"sys",
".",
"platform",
"==",
"\"win32\"",
":",
"statresults",
"=",
"os",
".",
"stat",
"(",
"unicodeFilePath",
")",
"return",
"(",
"md5",
"(",
"file_path",
")",
".",
"hexdigest",
"(",
")",
"+",
"\"-\"",
"+",
"str",
"(",
"statresults",
"[",
"stat",
".",
"ST_MTIME",
"]",
")",
"+",
"\"-\"",
"+",
"str",
"(",
"statresults",
"[",
"stat",
".",
"ST_SIZE",
"]",
")",
")",
"else",
":",
"statresults",
"=",
"os",
".",
"stat",
"(",
"unicodeFilePath",
")",
"return",
"(",
"str",
"(",
"statresults",
"[",
"stat",
".",
"ST_INO",
"]",
")",
"+",
"\"-\"",
"+",
"str",
"(",
"statresults",
"[",
"stat",
".",
"ST_MTIME",
"]",
")",
"+",
"\"-\"",
"+",
"str",
"(",
"statresults",
"[",
"stat",
".",
"ST_SIZE",
"]",
")",
")"
] |
Return a strong Entity Tag for a (file)path.
http://www.webdav.org/specs/rfc4918.html#etag
Returns the following as entity tags::
Non-file - md5(pathname)
Win32 - md5(pathname)-lastmodifiedtime-filesize
Others - inode-lastmodifiedtime-filesize
|
[
"Return",
"a",
"strong",
"Entity",
"Tag",
"for",
"a",
"(",
"file",
")",
"path",
"."
] |
python
|
valid
| 31.536585 |
Clinical-Genomics/scout
|
scout/parse/hpo.py
|
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/parse/hpo.py#L23-L40
|
def parse_hpo_gene(hpo_line):
"""Parse hpo gene information
Args:
hpo_line(str): A iterable with hpo phenotype lines
Yields:
hpo_info(dict)
"""
if not len(hpo_line) > 3:
return {}
hpo_line = hpo_line.rstrip().split('\t')
hpo_info = {}
hpo_info['hgnc_symbol'] = hpo_line[1]
hpo_info['description'] = hpo_line[2]
hpo_info['hpo_id'] = hpo_line[3]
return hpo_info
|
[
"def",
"parse_hpo_gene",
"(",
"hpo_line",
")",
":",
"if",
"not",
"len",
"(",
"hpo_line",
")",
">",
"3",
":",
"return",
"{",
"}",
"hpo_line",
"=",
"hpo_line",
".",
"rstrip",
"(",
")",
".",
"split",
"(",
"'\\t'",
")",
"hpo_info",
"=",
"{",
"}",
"hpo_info",
"[",
"'hgnc_symbol'",
"]",
"=",
"hpo_line",
"[",
"1",
"]",
"hpo_info",
"[",
"'description'",
"]",
"=",
"hpo_line",
"[",
"2",
"]",
"hpo_info",
"[",
"'hpo_id'",
"]",
"=",
"hpo_line",
"[",
"3",
"]",
"return",
"hpo_info"
] |
Parse hpo gene information
Args:
hpo_line(str): A iterable with hpo phenotype lines
Yields:
hpo_info(dict)
|
[
"Parse",
"hpo",
"gene",
"information",
"Args",
":",
"hpo_line",
"(",
"str",
")",
":",
"A",
"iterable",
"with",
"hpo",
"phenotype",
"lines",
"Yields",
":",
"hpo_info",
"(",
"dict",
")"
] |
python
|
test
| 24.5 |
jeongyoonlee/Kaggler
|
kaggler/data_io.py
|
https://github.com/jeongyoonlee/Kaggler/blob/20661105b61958dc9a3c529c1d3b2313ab23ae32/kaggler/data_io.py#L68-L77
|
def save_libsvm(X, y, path):
"""Save data as a LibSVM file.
Args:
X (numpy or scipy sparse matrix): Data matrix
y (numpy array): Target vector.
path (str): Path to the CSV file to save data.
"""
dump_svmlight_file(X, y, path, zero_based=False)
|
[
"def",
"save_libsvm",
"(",
"X",
",",
"y",
",",
"path",
")",
":",
"dump_svmlight_file",
"(",
"X",
",",
"y",
",",
"path",
",",
"zero_based",
"=",
"False",
")"
] |
Save data as a LibSVM file.
Args:
X (numpy or scipy sparse matrix): Data matrix
y (numpy array): Target vector.
path (str): Path to the CSV file to save data.
|
[
"Save",
"data",
"as",
"a",
"LibSVM",
"file",
"."
] |
python
|
train
| 27.6 |
dls-controls/pymalcolm
|
malcolm/core/process.py
|
https://github.com/dls-controls/pymalcolm/blob/80ea667e4da26365a6cebc0249f52fdc744bd983/malcolm/core/process.py#L152-L172
|
def spawn(self, function, *args, **kwargs):
# type: (Callable[..., Any], *Any, **Any) -> Spawned
"""Runs the function in a worker thread, returning a Result object
Args:
function: Function to run
args: Positional arguments to run the function with
kwargs: Keyword arguments to run the function with
Returns:
Spawned: Something you can call wait(timeout) on to see when it's
finished executing
"""
assert self.state != STOPPED, "Can't spawn when process stopped"
spawned = Spawned(function, args, kwargs)
self._spawned.append(spawned)
self._spawn_count += 1
# Filter out things that are ready to avoid memory leaks
if self._spawn_count > SPAWN_CLEAR_COUNT:
self._clear_spawn_list()
return spawned
|
[
"def",
"spawn",
"(",
"self",
",",
"function",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# type: (Callable[..., Any], *Any, **Any) -> Spawned",
"assert",
"self",
".",
"state",
"!=",
"STOPPED",
",",
"\"Can't spawn when process stopped\"",
"spawned",
"=",
"Spawned",
"(",
"function",
",",
"args",
",",
"kwargs",
")",
"self",
".",
"_spawned",
".",
"append",
"(",
"spawned",
")",
"self",
".",
"_spawn_count",
"+=",
"1",
"# Filter out things that are ready to avoid memory leaks",
"if",
"self",
".",
"_spawn_count",
">",
"SPAWN_CLEAR_COUNT",
":",
"self",
".",
"_clear_spawn_list",
"(",
")",
"return",
"spawned"
] |
Runs the function in a worker thread, returning a Result object
Args:
function: Function to run
args: Positional arguments to run the function with
kwargs: Keyword arguments to run the function with
Returns:
Spawned: Something you can call wait(timeout) on to see when it's
finished executing
|
[
"Runs",
"the",
"function",
"in",
"a",
"worker",
"thread",
"returning",
"a",
"Result",
"object"
] |
python
|
train
| 40.428571 |
edx/XBlock
|
xblock/runtime.py
|
https://github.com/edx/XBlock/blob/368bf46e2c0ee69bbb21817f428c4684936e18ee/xblock/runtime.py#L638-L651
|
def get_block(self, usage_id, for_parent=None):
"""
Create an XBlock instance in this runtime.
The `usage_id` is used to find the XBlock class and data.
"""
def_id = self.id_reader.get_definition_id(usage_id)
try:
block_type = self.id_reader.get_block_type(def_id)
except NoSuchDefinition:
raise NoSuchUsage(repr(usage_id))
keys = ScopeIds(self.user_id, block_type, def_id, usage_id)
block = self.construct_xblock(block_type, keys, for_parent=for_parent)
return block
|
[
"def",
"get_block",
"(",
"self",
",",
"usage_id",
",",
"for_parent",
"=",
"None",
")",
":",
"def_id",
"=",
"self",
".",
"id_reader",
".",
"get_definition_id",
"(",
"usage_id",
")",
"try",
":",
"block_type",
"=",
"self",
".",
"id_reader",
".",
"get_block_type",
"(",
"def_id",
")",
"except",
"NoSuchDefinition",
":",
"raise",
"NoSuchUsage",
"(",
"repr",
"(",
"usage_id",
")",
")",
"keys",
"=",
"ScopeIds",
"(",
"self",
".",
"user_id",
",",
"block_type",
",",
"def_id",
",",
"usage_id",
")",
"block",
"=",
"self",
".",
"construct_xblock",
"(",
"block_type",
",",
"keys",
",",
"for_parent",
"=",
"for_parent",
")",
"return",
"block"
] |
Create an XBlock instance in this runtime.
The `usage_id` is used to find the XBlock class and data.
|
[
"Create",
"an",
"XBlock",
"instance",
"in",
"this",
"runtime",
"."
] |
python
|
train
| 39.928571 |
jd/tenacity
|
tenacity/before.py
|
https://github.com/jd/tenacity/blob/354c40b7dc8e728c438668100dd020b65c84dfc6/tenacity/before.py#L24-L32
|
def before_log(logger, log_level):
"""Before call strategy that logs to some logger the attempt."""
def log_it(retry_state):
logger.log(log_level,
"Starting call to '%s', this is the %s time calling it.",
_utils.get_callback_name(retry_state.fn),
_utils.to_ordinal(retry_state.attempt_number))
return log_it
|
[
"def",
"before_log",
"(",
"logger",
",",
"log_level",
")",
":",
"def",
"log_it",
"(",
"retry_state",
")",
":",
"logger",
".",
"log",
"(",
"log_level",
",",
"\"Starting call to '%s', this is the %s time calling it.\"",
",",
"_utils",
".",
"get_callback_name",
"(",
"retry_state",
".",
"fn",
")",
",",
"_utils",
".",
"to_ordinal",
"(",
"retry_state",
".",
"attempt_number",
")",
")",
"return",
"log_it"
] |
Before call strategy that logs to some logger the attempt.
|
[
"Before",
"call",
"strategy",
"that",
"logs",
"to",
"some",
"logger",
"the",
"attempt",
"."
] |
python
|
train
| 41.888889 |
evhub/coconut
|
coconut/compiler/compiler.py
|
https://github.com/evhub/coconut/blob/ff97177344e7604e89a0a98a977a87ed2a56fc6d/coconut/compiler/compiler.py#L1944-L1946
|
def async_comp_check(self, original, loc, tokens):
"""Check for Python 3.6 async comprehension."""
return self.check_py("36", "async comprehension", original, loc, tokens)
|
[
"def",
"async_comp_check",
"(",
"self",
",",
"original",
",",
"loc",
",",
"tokens",
")",
":",
"return",
"self",
".",
"check_py",
"(",
"\"36\"",
",",
"\"async comprehension\"",
",",
"original",
",",
"loc",
",",
"tokens",
")"
] |
Check for Python 3.6 async comprehension.
|
[
"Check",
"for",
"Python",
"3",
".",
"6",
"async",
"comprehension",
"."
] |
python
|
train
| 61.666667 |
qualisys/qualisys_python_sdk
|
examples/basic_example.py
|
https://github.com/qualisys/qualisys_python_sdk/blob/127d7eeebc2b38b5cafdfa5d1d0198437fedd274/examples/basic_example.py#L11-L17
|
def on_packet(packet):
""" Callback function that is called everytime a data packet arrives from QTM """
print("Framenumber: {}".format(packet.framenumber))
header, markers = packet.get_3d_markers()
print("Component info: {}".format(header))
for marker in markers:
print("\t", marker)
|
[
"def",
"on_packet",
"(",
"packet",
")",
":",
"print",
"(",
"\"Framenumber: {}\"",
".",
"format",
"(",
"packet",
".",
"framenumber",
")",
")",
"header",
",",
"markers",
"=",
"packet",
".",
"get_3d_markers",
"(",
")",
"print",
"(",
"\"Component info: {}\"",
".",
"format",
"(",
"header",
")",
")",
"for",
"marker",
"in",
"markers",
":",
"print",
"(",
"\"\\t\"",
",",
"marker",
")"
] |
Callback function that is called everytime a data packet arrives from QTM
|
[
"Callback",
"function",
"that",
"is",
"called",
"everytime",
"a",
"data",
"packet",
"arrives",
"from",
"QTM"
] |
python
|
valid
| 43.714286 |
dw/mitogen
|
mitogen/parent.py
|
https://github.com/dw/mitogen/blob/a7fdb55e1300a7e0a5e404b09eb730cf9a525da7/mitogen/parent.py#L1880-L1895
|
def _propagate_up(self, handle, target_id, name=None):
"""
In a non-master context, propagate an update towards the master.
:param int handle:
:data:`mitogen.core.ADD_ROUTE` or :data:`mitogen.core.DEL_ROUTE`
:param int target_id:
ID of the connecting or disconnecting context.
:param str name:
For :data:`mitogen.core.ADD_ROUTE`, the name of the new context
assigned by its parent. This is used by parents to assign the
:attr:`mitogen.core.Context.name` attribute.
"""
if self.parent:
stream = self.router.stream_by_id(self.parent.context_id)
self._send_one(stream, handle, target_id, name)
|
[
"def",
"_propagate_up",
"(",
"self",
",",
"handle",
",",
"target_id",
",",
"name",
"=",
"None",
")",
":",
"if",
"self",
".",
"parent",
":",
"stream",
"=",
"self",
".",
"router",
".",
"stream_by_id",
"(",
"self",
".",
"parent",
".",
"context_id",
")",
"self",
".",
"_send_one",
"(",
"stream",
",",
"handle",
",",
"target_id",
",",
"name",
")"
] |
In a non-master context, propagate an update towards the master.
:param int handle:
:data:`mitogen.core.ADD_ROUTE` or :data:`mitogen.core.DEL_ROUTE`
:param int target_id:
ID of the connecting or disconnecting context.
:param str name:
For :data:`mitogen.core.ADD_ROUTE`, the name of the new context
assigned by its parent. This is used by parents to assign the
:attr:`mitogen.core.Context.name` attribute.
|
[
"In",
"a",
"non",
"-",
"master",
"context",
"propagate",
"an",
"update",
"towards",
"the",
"master",
"."
] |
python
|
train
| 44.75 |
saltstack/salt
|
salt/utils/cache.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/cache.py#L172-L195
|
def _read(self):
'''
Read in from disk
'''
if msgpack is None:
log.error('Cache cannot be read from the disk: msgpack is missing')
elif not os.path.exists(self._path):
log.debug('Cache path does not exist for reading: %s', self._path)
else:
try:
with salt.utils.files.fopen(self._path, 'rb') as fp_:
cache = salt.utils.data.decode(msgpack.load(fp_, encoding=__salt_system_encoding__))
if "CacheDisk_cachetime" in cache: # new format
self._dict = cache["CacheDisk_data"]
self._key_cache_time = cache["CacheDisk_cachetime"]
else: # old format
self._dict = cache
timestamp = os.path.getmtime(self._path)
for key in self._dict:
self._key_cache_time[key] = timestamp
if log.isEnabledFor(logging.DEBUG):
log.debug('Disk cache retrieved: %s', cache)
except (IOError, OSError) as err:
log.error('Error while reading disk cache from %s: %s', self._path, err)
|
[
"def",
"_read",
"(",
"self",
")",
":",
"if",
"msgpack",
"is",
"None",
":",
"log",
".",
"error",
"(",
"'Cache cannot be read from the disk: msgpack is missing'",
")",
"elif",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"_path",
")",
":",
"log",
".",
"debug",
"(",
"'Cache path does not exist for reading: %s'",
",",
"self",
".",
"_path",
")",
"else",
":",
"try",
":",
"with",
"salt",
".",
"utils",
".",
"files",
".",
"fopen",
"(",
"self",
".",
"_path",
",",
"'rb'",
")",
"as",
"fp_",
":",
"cache",
"=",
"salt",
".",
"utils",
".",
"data",
".",
"decode",
"(",
"msgpack",
".",
"load",
"(",
"fp_",
",",
"encoding",
"=",
"__salt_system_encoding__",
")",
")",
"if",
"\"CacheDisk_cachetime\"",
"in",
"cache",
":",
"# new format",
"self",
".",
"_dict",
"=",
"cache",
"[",
"\"CacheDisk_data\"",
"]",
"self",
".",
"_key_cache_time",
"=",
"cache",
"[",
"\"CacheDisk_cachetime\"",
"]",
"else",
":",
"# old format",
"self",
".",
"_dict",
"=",
"cache",
"timestamp",
"=",
"os",
".",
"path",
".",
"getmtime",
"(",
"self",
".",
"_path",
")",
"for",
"key",
"in",
"self",
".",
"_dict",
":",
"self",
".",
"_key_cache_time",
"[",
"key",
"]",
"=",
"timestamp",
"if",
"log",
".",
"isEnabledFor",
"(",
"logging",
".",
"DEBUG",
")",
":",
"log",
".",
"debug",
"(",
"'Disk cache retrieved: %s'",
",",
"cache",
")",
"except",
"(",
"IOError",
",",
"OSError",
")",
"as",
"err",
":",
"log",
".",
"error",
"(",
"'Error while reading disk cache from %s: %s'",
",",
"self",
".",
"_path",
",",
"err",
")"
] |
Read in from disk
|
[
"Read",
"in",
"from",
"disk"
] |
python
|
train
| 48.666667 |
vmalyi/adb_android
|
adb_android/adb_android.py
|
https://github.com/vmalyi/adb_android/blob/de53dc54f27b14dc8c2ae64b136a60a59e1a1cb1/adb_android/adb_android.py#L28-L47
|
def bugreport(dest_file="default.log"):
"""
Prints dumpsys, dumpstate, and logcat data to the screen, for the purposes of bug reporting
:return: result of _exec_command() execution
"""
adb_full_cmd = [v.ADB_COMMAND_PREFIX, v.ADB_COMMAND_BUGREPORT]
try:
dest_file_handler = open(dest_file, "w")
except IOError:
print("IOError: Failed to create a log file")
# We have to check if device is available or not before executing this command
# as adb bugreport will wait-for-device infinitely and does not come out of
# loop
# Execute only if device is available only
if _isDeviceAvailable():
result = _exec_command_to_file(adb_full_cmd, dest_file_handler)
return (result, "Success: Bug report saved to: " + dest_file)
else:
return (0, "Device Not Found")
|
[
"def",
"bugreport",
"(",
"dest_file",
"=",
"\"default.log\"",
")",
":",
"adb_full_cmd",
"=",
"[",
"v",
".",
"ADB_COMMAND_PREFIX",
",",
"v",
".",
"ADB_COMMAND_BUGREPORT",
"]",
"try",
":",
"dest_file_handler",
"=",
"open",
"(",
"dest_file",
",",
"\"w\"",
")",
"except",
"IOError",
":",
"print",
"(",
"\"IOError: Failed to create a log file\"",
")",
"# We have to check if device is available or not before executing this command",
"# as adb bugreport will wait-for-device infinitely and does not come out of ",
"# loop",
"# Execute only if device is available only",
"if",
"_isDeviceAvailable",
"(",
")",
":",
"result",
"=",
"_exec_command_to_file",
"(",
"adb_full_cmd",
",",
"dest_file_handler",
")",
"return",
"(",
"result",
",",
"\"Success: Bug report saved to: \"",
"+",
"dest_file",
")",
"else",
":",
"return",
"(",
"0",
",",
"\"Device Not Found\"",
")"
] |
Prints dumpsys, dumpstate, and logcat data to the screen, for the purposes of bug reporting
:return: result of _exec_command() execution
|
[
"Prints",
"dumpsys",
"dumpstate",
"and",
"logcat",
"data",
"to",
"the",
"screen",
"for",
"the",
"purposes",
"of",
"bug",
"reporting",
":",
"return",
":",
"result",
"of",
"_exec_command",
"()",
"execution"
] |
python
|
train
| 41.35 |
bcbio/bcbio-nextgen
|
bcbio/variation/validate.py
|
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validate.py#L305-L342
|
def _pick_best_quality_score(vrn_file):
"""Flexible quality score selection, picking the best available.
Implementation based on discussion:
https://github.com/bcbio/bcbio-nextgen/commit/a538cecd86c0000d17d3f9d4f8ac9d2da04f9884#commitcomment-14539249
(RTG=AVR/GATK=VQSLOD/MuTect=t_lod_fstar, otherwise GQ, otherwise QUAL, otherwise DP.)
For MuTect, it's not clear how to get t_lod_fstar, the right quality score, into VCF cleanly.
MuTect2 has TLOD in the INFO field.
"""
# pysam fails on checking reference contigs if input is empty
if not vcfutils.vcf_has_variants(vrn_file):
return "DP"
to_check = 25
scores = collections.defaultdict(int)
try:
in_handle = VariantFile(vrn_file)
except ValueError:
raise ValueError("Failed to parse input file in preparation for validation: %s" % vrn_file)
with contextlib.closing(in_handle) as val_in:
for i, rec in enumerate(val_in):
if i > to_check:
break
if "VQSLOD" in rec.info and rec.info.get("VQSLOD") is not None:
scores["INFO=VQSLOD"] += 1
if "TLOD" in rec.info and rec.info.get("TLOD") is not None:
scores["INFO=TLOD"] += 1
for skey in ["AVR", "GQ", "DP"]:
if len(rec.samples) > 0 and rec.samples[0].get(skey) is not None:
scores[skey] += 1
if rec.qual:
scores["QUAL"] += 1
for key in ["AVR", "INFO=VQSLOD", "INFO=TLOD", "GQ", "QUAL", "DP"]:
if scores[key] > 0:
return key
raise ValueError("Did not find quality score for validation from %s" % vrn_file)
|
[
"def",
"_pick_best_quality_score",
"(",
"vrn_file",
")",
":",
"# pysam fails on checking reference contigs if input is empty",
"if",
"not",
"vcfutils",
".",
"vcf_has_variants",
"(",
"vrn_file",
")",
":",
"return",
"\"DP\"",
"to_check",
"=",
"25",
"scores",
"=",
"collections",
".",
"defaultdict",
"(",
"int",
")",
"try",
":",
"in_handle",
"=",
"VariantFile",
"(",
"vrn_file",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"\"Failed to parse input file in preparation for validation: %s\"",
"%",
"vrn_file",
")",
"with",
"contextlib",
".",
"closing",
"(",
"in_handle",
")",
"as",
"val_in",
":",
"for",
"i",
",",
"rec",
"in",
"enumerate",
"(",
"val_in",
")",
":",
"if",
"i",
">",
"to_check",
":",
"break",
"if",
"\"VQSLOD\"",
"in",
"rec",
".",
"info",
"and",
"rec",
".",
"info",
".",
"get",
"(",
"\"VQSLOD\"",
")",
"is",
"not",
"None",
":",
"scores",
"[",
"\"INFO=VQSLOD\"",
"]",
"+=",
"1",
"if",
"\"TLOD\"",
"in",
"rec",
".",
"info",
"and",
"rec",
".",
"info",
".",
"get",
"(",
"\"TLOD\"",
")",
"is",
"not",
"None",
":",
"scores",
"[",
"\"INFO=TLOD\"",
"]",
"+=",
"1",
"for",
"skey",
"in",
"[",
"\"AVR\"",
",",
"\"GQ\"",
",",
"\"DP\"",
"]",
":",
"if",
"len",
"(",
"rec",
".",
"samples",
")",
">",
"0",
"and",
"rec",
".",
"samples",
"[",
"0",
"]",
".",
"get",
"(",
"skey",
")",
"is",
"not",
"None",
":",
"scores",
"[",
"skey",
"]",
"+=",
"1",
"if",
"rec",
".",
"qual",
":",
"scores",
"[",
"\"QUAL\"",
"]",
"+=",
"1",
"for",
"key",
"in",
"[",
"\"AVR\"",
",",
"\"INFO=VQSLOD\"",
",",
"\"INFO=TLOD\"",
",",
"\"GQ\"",
",",
"\"QUAL\"",
",",
"\"DP\"",
"]",
":",
"if",
"scores",
"[",
"key",
"]",
">",
"0",
":",
"return",
"key",
"raise",
"ValueError",
"(",
"\"Did not find quality score for validation from %s\"",
"%",
"vrn_file",
")"
] |
Flexible quality score selection, picking the best available.
Implementation based on discussion:
https://github.com/bcbio/bcbio-nextgen/commit/a538cecd86c0000d17d3f9d4f8ac9d2da04f9884#commitcomment-14539249
(RTG=AVR/GATK=VQSLOD/MuTect=t_lod_fstar, otherwise GQ, otherwise QUAL, otherwise DP.)
For MuTect, it's not clear how to get t_lod_fstar, the right quality score, into VCF cleanly.
MuTect2 has TLOD in the INFO field.
|
[
"Flexible",
"quality",
"score",
"selection",
"picking",
"the",
"best",
"available",
"."
] |
python
|
train
| 43.184211 |
glue-viz/glue-vispy-viewers
|
glue_vispy_viewers/extern/vispy/color/color_array.py
|
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/color/color_array.py#L313-L317
|
def value(self, val):
"""Set the color using length-N array of (from HSV)"""
hsv = self._hsv
hsv[:, 2] = _array_clip_val(val)
self.rgba = _hsv_to_rgb(hsv)
|
[
"def",
"value",
"(",
"self",
",",
"val",
")",
":",
"hsv",
"=",
"self",
".",
"_hsv",
"hsv",
"[",
":",
",",
"2",
"]",
"=",
"_array_clip_val",
"(",
"val",
")",
"self",
".",
"rgba",
"=",
"_hsv_to_rgb",
"(",
"hsv",
")"
] |
Set the color using length-N array of (from HSV)
|
[
"Set",
"the",
"color",
"using",
"length",
"-",
"N",
"array",
"of",
"(",
"from",
"HSV",
")"
] |
python
|
train
| 36.4 |
HewlettPackard/python-hpOneView
|
hpOneView/resources/resource.py
|
https://github.com/HewlettPackard/python-hpOneView/blob/3c6219723ef25e6e0c83d44a89007f89bc325b89/hpOneView/resources/resource.py#L1427-L1455
|
def patch_request(self, id_or_uri, body, timeout=-1, custom_headers=None):
"""
Uses the PATCH to update a resource.
Only one operation can be performed in each PATCH call.
Args:
id_or_uri: Can be either the resource ID or the resource URI.
body: Patch request body
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
Updated resource.
"""
uri = self.build_uri(id_or_uri)
logger.debug('Patch resource (uri = %s, data = %s)' % (uri, body))
custom_headers_copy = custom_headers.copy() if custom_headers else {}
if self._connection._apiVersion >= 300 and 'Content-Type' not in custom_headers_copy:
custom_headers_copy['Content-Type'] = 'application/json-patch+json'
task, entity = self._connection.patch(uri, body, custom_headers=custom_headers_copy)
if not task:
return entity
return self._task_monitor.wait_for_task(task, timeout)
|
[
"def",
"patch_request",
"(",
"self",
",",
"id_or_uri",
",",
"body",
",",
"timeout",
"=",
"-",
"1",
",",
"custom_headers",
"=",
"None",
")",
":",
"uri",
"=",
"self",
".",
"build_uri",
"(",
"id_or_uri",
")",
"logger",
".",
"debug",
"(",
"'Patch resource (uri = %s, data = %s)'",
"%",
"(",
"uri",
",",
"body",
")",
")",
"custom_headers_copy",
"=",
"custom_headers",
".",
"copy",
"(",
")",
"if",
"custom_headers",
"else",
"{",
"}",
"if",
"self",
".",
"_connection",
".",
"_apiVersion",
">=",
"300",
"and",
"'Content-Type'",
"not",
"in",
"custom_headers_copy",
":",
"custom_headers_copy",
"[",
"'Content-Type'",
"]",
"=",
"'application/json-patch+json'",
"task",
",",
"entity",
"=",
"self",
".",
"_connection",
".",
"patch",
"(",
"uri",
",",
"body",
",",
"custom_headers",
"=",
"custom_headers_copy",
")",
"if",
"not",
"task",
":",
"return",
"entity",
"return",
"self",
".",
"_task_monitor",
".",
"wait_for_task",
"(",
"task",
",",
"timeout",
")"
] |
Uses the PATCH to update a resource.
Only one operation can be performed in each PATCH call.
Args:
id_or_uri: Can be either the resource ID or the resource URI.
body: Patch request body
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
Updated resource.
|
[
"Uses",
"the",
"PATCH",
"to",
"update",
"a",
"resource",
"."
] |
python
|
train
| 38.551724 |
Microsoft/azure-devops-python-api
|
azure-devops/azure/devops/v5_0/work/work_client.py
|
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/work/work_client.py#L1186-L1219
|
def update_team_days_off(self, days_off_patch, team_context, iteration_id):
"""UpdateTeamDaysOff.
Set a team's days off for an iteration
:param :class:`<TeamSettingsDaysOffPatch> <azure.devops.v5_0.work.models.TeamSettingsDaysOffPatch>` days_off_patch: Team's days off patch containting a list of start and end dates
:param :class:`<TeamContext> <azure.devops.v5_0.work.models.TeamContext>` team_context: The team context for the operation
:param str iteration_id: ID of the iteration
:rtype: :class:`<TeamSettingsDaysOff> <azure.devops.v5_0.work.models.TeamSettingsDaysOff>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if iteration_id is not None:
route_values['iterationId'] = self._serialize.url('iteration_id', iteration_id, 'str')
content = self._serialize.body(days_off_patch, 'TeamSettingsDaysOffPatch')
response = self._send(http_method='PATCH',
location_id='2d4faa2e-9150-4cbf-a47a-932b1b4a0773',
version='5.0',
route_values=route_values,
content=content)
return self._deserialize('TeamSettingsDaysOff', response)
|
[
"def",
"update_team_days_off",
"(",
"self",
",",
"days_off_patch",
",",
"team_context",
",",
"iteration_id",
")",
":",
"project",
"=",
"None",
"team",
"=",
"None",
"if",
"team_context",
"is",
"not",
"None",
":",
"if",
"team_context",
".",
"project_id",
":",
"project",
"=",
"team_context",
".",
"project_id",
"else",
":",
"project",
"=",
"team_context",
".",
"project",
"if",
"team_context",
".",
"team_id",
":",
"team",
"=",
"team_context",
".",
"team_id",
"else",
":",
"team",
"=",
"team_context",
".",
"team",
"route_values",
"=",
"{",
"}",
"if",
"project",
"is",
"not",
"None",
":",
"route_values",
"[",
"'project'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'project'",
",",
"project",
",",
"'string'",
")",
"if",
"team",
"is",
"not",
"None",
":",
"route_values",
"[",
"'team'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'team'",
",",
"team",
",",
"'string'",
")",
"if",
"iteration_id",
"is",
"not",
"None",
":",
"route_values",
"[",
"'iterationId'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'iteration_id'",
",",
"iteration_id",
",",
"'str'",
")",
"content",
"=",
"self",
".",
"_serialize",
".",
"body",
"(",
"days_off_patch",
",",
"'TeamSettingsDaysOffPatch'",
")",
"response",
"=",
"self",
".",
"_send",
"(",
"http_method",
"=",
"'PATCH'",
",",
"location_id",
"=",
"'2d4faa2e-9150-4cbf-a47a-932b1b4a0773'",
",",
"version",
"=",
"'5.0'",
",",
"route_values",
"=",
"route_values",
",",
"content",
"=",
"content",
")",
"return",
"self",
".",
"_deserialize",
"(",
"'TeamSettingsDaysOff'",
",",
"response",
")"
] |
UpdateTeamDaysOff.
Set a team's days off for an iteration
:param :class:`<TeamSettingsDaysOffPatch> <azure.devops.v5_0.work.models.TeamSettingsDaysOffPatch>` days_off_patch: Team's days off patch containting a list of start and end dates
:param :class:`<TeamContext> <azure.devops.v5_0.work.models.TeamContext>` team_context: The team context for the operation
:param str iteration_id: ID of the iteration
:rtype: :class:`<TeamSettingsDaysOff> <azure.devops.v5_0.work.models.TeamSettingsDaysOff>`
|
[
"UpdateTeamDaysOff",
".",
"Set",
"a",
"team",
"s",
"days",
"off",
"for",
"an",
"iteration",
":",
"param",
":",
"class",
":",
"<TeamSettingsDaysOffPatch",
">",
"<azure",
".",
"devops",
".",
"v5_0",
".",
"work",
".",
"models",
".",
"TeamSettingsDaysOffPatch",
">",
"days_off_patch",
":",
"Team",
"s",
"days",
"off",
"patch",
"containting",
"a",
"list",
"of",
"start",
"and",
"end",
"dates",
":",
"param",
":",
"class",
":",
"<TeamContext",
">",
"<azure",
".",
"devops",
".",
"v5_0",
".",
"work",
".",
"models",
".",
"TeamContext",
">",
"team_context",
":",
"The",
"team",
"context",
"for",
"the",
"operation",
":",
"param",
"str",
"iteration_id",
":",
"ID",
"of",
"the",
"iteration",
":",
"rtype",
":",
":",
"class",
":",
"<TeamSettingsDaysOff",
">",
"<azure",
".",
"devops",
".",
"v5_0",
".",
"work",
".",
"models",
".",
"TeamSettingsDaysOff",
">"
] |
python
|
train
| 52.911765 |
jrspruitt/ubi_reader
|
ubireader/ubi_io.py
|
https://github.com/jrspruitt/ubi_reader/blob/7079dd380c1c9896bced30d6d34e8780b9181597/ubireader/ubi_io.py#L159-L167
|
def read_block_data(self, block):
"""Read LEB data from file
Argument:
Obj:block -- Block data is desired for.
"""
self.seek(block.file_offset + block.ec_hdr.data_offset)
buf = self._fhandle.read(block.size - block.ec_hdr.data_offset - block.vid_hdr.data_pad)
return buf
|
[
"def",
"read_block_data",
"(",
"self",
",",
"block",
")",
":",
"self",
".",
"seek",
"(",
"block",
".",
"file_offset",
"+",
"block",
".",
"ec_hdr",
".",
"data_offset",
")",
"buf",
"=",
"self",
".",
"_fhandle",
".",
"read",
"(",
"block",
".",
"size",
"-",
"block",
".",
"ec_hdr",
".",
"data_offset",
"-",
"block",
".",
"vid_hdr",
".",
"data_pad",
")",
"return",
"buf"
] |
Read LEB data from file
Argument:
Obj:block -- Block data is desired for.
|
[
"Read",
"LEB",
"data",
"from",
"file",
"Argument",
":",
"Obj",
":",
"block",
"--",
"Block",
"data",
"is",
"desired",
"for",
"."
] |
python
|
train
| 36.333333 |
solocompt/plugs-mail
|
plugs_mail/utils.py
|
https://github.com/solocompt/plugs-mail/blob/6139fa79ddb437562db1769d03bd3098c25a06fa/plugs_mail/utils.py#L27-L35
|
def to_staff(email_class, **data):
"""
Email staff users
"""
for user in get_user_model().objects.filter(is_staff=True):
try:
email_class().send([user.email], user.language, **data)
except AttributeError:
email_class().send([user.email], translation.get_language(), **data)
|
[
"def",
"to_staff",
"(",
"email_class",
",",
"*",
"*",
"data",
")",
":",
"for",
"user",
"in",
"get_user_model",
"(",
")",
".",
"objects",
".",
"filter",
"(",
"is_staff",
"=",
"True",
")",
":",
"try",
":",
"email_class",
"(",
")",
".",
"send",
"(",
"[",
"user",
".",
"email",
"]",
",",
"user",
".",
"language",
",",
"*",
"*",
"data",
")",
"except",
"AttributeError",
":",
"email_class",
"(",
")",
".",
"send",
"(",
"[",
"user",
".",
"email",
"]",
",",
"translation",
".",
"get_language",
"(",
")",
",",
"*",
"*",
"data",
")"
] |
Email staff users
|
[
"Email",
"staff",
"users"
] |
python
|
train
| 35.666667 |
google/grr
|
grr/client/grr_response_client/client_actions/osx/osx.py
|
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/client/grr_response_client/client_actions/osx/osx.py#L393-L419
|
def OSXEnumerateRunningServicesFromClient(args):
"""Get running launchd jobs.
Args:
args: Unused.
Yields:
`rdf_client.OSXServiceInformation` instances.
Raises:
UnsupportedOSVersionError: for OS X earlier than 10.6.
"""
del args # Unused.
osx_version = client_utils_osx.OSXVersion()
version_array = osx_version.VersionAsMajorMinor()
if version_array[:2] < [10, 6]:
raise UnsupportedOSVersionError(
"ServiceManagement API unsupported on < 10.6. This client is %s" %
osx_version.VersionString())
launchd_list = GetRunningLaunchDaemons()
parser = osx_launchd.OSXLaunchdJobDict(launchd_list)
for job in parser.Parse():
response = CreateServiceProto(job)
yield response
|
[
"def",
"OSXEnumerateRunningServicesFromClient",
"(",
"args",
")",
":",
"del",
"args",
"# Unused.",
"osx_version",
"=",
"client_utils_osx",
".",
"OSXVersion",
"(",
")",
"version_array",
"=",
"osx_version",
".",
"VersionAsMajorMinor",
"(",
")",
"if",
"version_array",
"[",
":",
"2",
"]",
"<",
"[",
"10",
",",
"6",
"]",
":",
"raise",
"UnsupportedOSVersionError",
"(",
"\"ServiceManagement API unsupported on < 10.6. This client is %s\"",
"%",
"osx_version",
".",
"VersionString",
"(",
")",
")",
"launchd_list",
"=",
"GetRunningLaunchDaemons",
"(",
")",
"parser",
"=",
"osx_launchd",
".",
"OSXLaunchdJobDict",
"(",
"launchd_list",
")",
"for",
"job",
"in",
"parser",
".",
"Parse",
"(",
")",
":",
"response",
"=",
"CreateServiceProto",
"(",
"job",
")",
"yield",
"response"
] |
Get running launchd jobs.
Args:
args: Unused.
Yields:
`rdf_client.OSXServiceInformation` instances.
Raises:
UnsupportedOSVersionError: for OS X earlier than 10.6.
|
[
"Get",
"running",
"launchd",
"jobs",
"."
] |
python
|
train
| 26.296296 |
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/text_encoder.py
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/text_encoder.py#L559-L574
|
def _token_to_subtoken_ids(self, token):
"""Converts token to a list of subtoken ids.
Args:
token: a string.
Returns:
a list of integers in the range [0, vocab_size)
"""
cache_location = hash(token) % self._cache_size
cache_key, cache_value = self._cache[cache_location]
if cache_key == token:
return cache_value
ret = self._escaped_token_to_subtoken_ids(
_escape_token(token, self._alphabet))
self._cache[cache_location] = (token, ret)
return ret
|
[
"def",
"_token_to_subtoken_ids",
"(",
"self",
",",
"token",
")",
":",
"cache_location",
"=",
"hash",
"(",
"token",
")",
"%",
"self",
".",
"_cache_size",
"cache_key",
",",
"cache_value",
"=",
"self",
".",
"_cache",
"[",
"cache_location",
"]",
"if",
"cache_key",
"==",
"token",
":",
"return",
"cache_value",
"ret",
"=",
"self",
".",
"_escaped_token_to_subtoken_ids",
"(",
"_escape_token",
"(",
"token",
",",
"self",
".",
"_alphabet",
")",
")",
"self",
".",
"_cache",
"[",
"cache_location",
"]",
"=",
"(",
"token",
",",
"ret",
")",
"return",
"ret"
] |
Converts token to a list of subtoken ids.
Args:
token: a string.
Returns:
a list of integers in the range [0, vocab_size)
|
[
"Converts",
"token",
"to",
"a",
"list",
"of",
"subtoken",
"ids",
"."
] |
python
|
train
| 31.1875 |
cloudmesh/cloudmesh-common
|
cloudmesh/common/logger.py
|
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/logger.py#L12-L61
|
def LOGGER(filename):
"""creates a logger with the given name.
You can use it as follows::
log = cloudmesh.common.LOGGER(__file__)
log.error("this is an error")
log.info("this is an info")
log.warning("this is a warning")
"""
pwd = os.getcwd()
name = filename.replace(pwd, "$PWD")
try:
(first, name) = name.split("site-packages")
name += "... site"
except:
pass
loglevel = logging.CRITICAL
try:
level = grep("loglevel:", config_file(
"/cloudmesh_debug.yaml")).strip().split(":")[1].strip().lower()
if level.upper() == "DEBUG":
loglevel = logging.DEBUG
elif level.upper() == "INFO":
loglevel = logging.INFO
elif level.upper() == "WARNING":
loglevel = logging.WARNING
elif level.upper() == "ERROR":
loglevel = logging.ERROR
else:
level = logging.CRITICAL
except:
# print "LOGLEVEL NOT FOUND"
loglevel = logging.DEBUG
log = logging.getLogger(name)
log.setLevel(loglevel)
formatter = logging.Formatter(
'CM {0:>50}:%(lineno)s: %(levelname)6s - %(message)s'.format(name))
# formatter = logging.Formatter(
# 'CM {0:>50}: %(levelname)6s - %(module)s:%(lineno)s %funcName)s: %(message)s'.format(name))
handler = logging.StreamHandler()
handler.setFormatter(formatter)
log.addHandler(handler)
return log
|
[
"def",
"LOGGER",
"(",
"filename",
")",
":",
"pwd",
"=",
"os",
".",
"getcwd",
"(",
")",
"name",
"=",
"filename",
".",
"replace",
"(",
"pwd",
",",
"\"$PWD\"",
")",
"try",
":",
"(",
"first",
",",
"name",
")",
"=",
"name",
".",
"split",
"(",
"\"site-packages\"",
")",
"name",
"+=",
"\"... site\"",
"except",
":",
"pass",
"loglevel",
"=",
"logging",
".",
"CRITICAL",
"try",
":",
"level",
"=",
"grep",
"(",
"\"loglevel:\"",
",",
"config_file",
"(",
"\"/cloudmesh_debug.yaml\"",
")",
")",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\":\"",
")",
"[",
"1",
"]",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"if",
"level",
".",
"upper",
"(",
")",
"==",
"\"DEBUG\"",
":",
"loglevel",
"=",
"logging",
".",
"DEBUG",
"elif",
"level",
".",
"upper",
"(",
")",
"==",
"\"INFO\"",
":",
"loglevel",
"=",
"logging",
".",
"INFO",
"elif",
"level",
".",
"upper",
"(",
")",
"==",
"\"WARNING\"",
":",
"loglevel",
"=",
"logging",
".",
"WARNING",
"elif",
"level",
".",
"upper",
"(",
")",
"==",
"\"ERROR\"",
":",
"loglevel",
"=",
"logging",
".",
"ERROR",
"else",
":",
"level",
"=",
"logging",
".",
"CRITICAL",
"except",
":",
"# print \"LOGLEVEL NOT FOUND\"",
"loglevel",
"=",
"logging",
".",
"DEBUG",
"log",
"=",
"logging",
".",
"getLogger",
"(",
"name",
")",
"log",
".",
"setLevel",
"(",
"loglevel",
")",
"formatter",
"=",
"logging",
".",
"Formatter",
"(",
"'CM {0:>50}:%(lineno)s: %(levelname)6s - %(message)s'",
".",
"format",
"(",
"name",
")",
")",
"# formatter = logging.Formatter(",
"# 'CM {0:>50}: %(levelname)6s - %(module)s:%(lineno)s %funcName)s: %(message)s'.format(name))",
"handler",
"=",
"logging",
".",
"StreamHandler",
"(",
")",
"handler",
".",
"setFormatter",
"(",
"formatter",
")",
"log",
".",
"addHandler",
"(",
"handler",
")",
"return",
"log"
] |
creates a logger with the given name.
You can use it as follows::
log = cloudmesh.common.LOGGER(__file__)
log.error("this is an error")
log.info("this is an info")
log.warning("this is a warning")
|
[
"creates",
"a",
"logger",
"with",
"the",
"given",
"name",
"."
] |
python
|
train
| 28.56 |
Shoobx/xmldiff
|
xmldiff/_diff_match_patch_py3.py
|
https://github.com/Shoobx/xmldiff/blob/ec7835bce9ba69ff4ce03ab6c11397183b6f8411/xmldiff/_diff_match_patch_py3.py#L138-L195
|
def diff_compute(self, text1, text2, checklines, deadline):
"""Find the differences between two texts. Assumes that the texts do not
have any common prefix or suffix.
Args:
text1: Old string to be diffed.
text2: New string to be diffed.
checklines: Speedup flag. If false, then don't run a line-level diff
first to identify the changed areas.
If true, then run a faster, slightly less optimal diff.
deadline: Time when the diff should be complete by.
Returns:
Array of changes.
"""
if not text1:
# Just add some text (speedup).
return [(self.DIFF_INSERT, text2)]
if not text2:
# Just delete some text (speedup).
return [(self.DIFF_DELETE, text1)]
if len(text1) > len(text2):
(longtext, shorttext) = (text1, text2)
else:
(shorttext, longtext) = (text1, text2)
i = longtext.find(shorttext)
if i != -1:
# Shorter text is inside the longer text (speedup).
diffs = [(self.DIFF_INSERT, longtext[:i]), (self.DIFF_EQUAL, shorttext),
(self.DIFF_INSERT, longtext[i + len(shorttext):])]
# Swap insertions for deletions if diff is reversed.
if len(text1) > len(text2):
diffs[0] = (self.DIFF_DELETE, diffs[0][1])
diffs[2] = (self.DIFF_DELETE, diffs[2][1])
return diffs
if len(shorttext) == 1:
# Single character string.
# After the previous speedup, the character can't be an equality.
return [(self.DIFF_DELETE, text1), (self.DIFF_INSERT, text2)]
# Check to see if the problem can be split in two.
hm = self.diff_halfMatch(text1, text2)
if hm:
# A half-match was found, sort out the return data.
(text1_a, text1_b, text2_a, text2_b, mid_common) = hm
# Send both pairs off for separate processing.
diffs_a = self.diff_main(text1_a, text2_a, checklines, deadline)
diffs_b = self.diff_main(text1_b, text2_b, checklines, deadline)
# Merge the results.
return diffs_a + [(self.DIFF_EQUAL, mid_common)] + diffs_b
if checklines and len(text1) > 100 and len(text2) > 100:
return self.diff_lineMode(text1, text2, deadline)
return self.diff_bisect(text1, text2, deadline)
|
[
"def",
"diff_compute",
"(",
"self",
",",
"text1",
",",
"text2",
",",
"checklines",
",",
"deadline",
")",
":",
"if",
"not",
"text1",
":",
"# Just add some text (speedup).",
"return",
"[",
"(",
"self",
".",
"DIFF_INSERT",
",",
"text2",
")",
"]",
"if",
"not",
"text2",
":",
"# Just delete some text (speedup).",
"return",
"[",
"(",
"self",
".",
"DIFF_DELETE",
",",
"text1",
")",
"]",
"if",
"len",
"(",
"text1",
")",
">",
"len",
"(",
"text2",
")",
":",
"(",
"longtext",
",",
"shorttext",
")",
"=",
"(",
"text1",
",",
"text2",
")",
"else",
":",
"(",
"shorttext",
",",
"longtext",
")",
"=",
"(",
"text1",
",",
"text2",
")",
"i",
"=",
"longtext",
".",
"find",
"(",
"shorttext",
")",
"if",
"i",
"!=",
"-",
"1",
":",
"# Shorter text is inside the longer text (speedup).",
"diffs",
"=",
"[",
"(",
"self",
".",
"DIFF_INSERT",
",",
"longtext",
"[",
":",
"i",
"]",
")",
",",
"(",
"self",
".",
"DIFF_EQUAL",
",",
"shorttext",
")",
",",
"(",
"self",
".",
"DIFF_INSERT",
",",
"longtext",
"[",
"i",
"+",
"len",
"(",
"shorttext",
")",
":",
"]",
")",
"]",
"# Swap insertions for deletions if diff is reversed.",
"if",
"len",
"(",
"text1",
")",
">",
"len",
"(",
"text2",
")",
":",
"diffs",
"[",
"0",
"]",
"=",
"(",
"self",
".",
"DIFF_DELETE",
",",
"diffs",
"[",
"0",
"]",
"[",
"1",
"]",
")",
"diffs",
"[",
"2",
"]",
"=",
"(",
"self",
".",
"DIFF_DELETE",
",",
"diffs",
"[",
"2",
"]",
"[",
"1",
"]",
")",
"return",
"diffs",
"if",
"len",
"(",
"shorttext",
")",
"==",
"1",
":",
"# Single character string.",
"# After the previous speedup, the character can't be an equality.",
"return",
"[",
"(",
"self",
".",
"DIFF_DELETE",
",",
"text1",
")",
",",
"(",
"self",
".",
"DIFF_INSERT",
",",
"text2",
")",
"]",
"# Check to see if the problem can be split in two.",
"hm",
"=",
"self",
".",
"diff_halfMatch",
"(",
"text1",
",",
"text2",
")",
"if",
"hm",
":",
"# A half-match was found, sort out the return data.",
"(",
"text1_a",
",",
"text1_b",
",",
"text2_a",
",",
"text2_b",
",",
"mid_common",
")",
"=",
"hm",
"# Send both pairs off for separate processing.",
"diffs_a",
"=",
"self",
".",
"diff_main",
"(",
"text1_a",
",",
"text2_a",
",",
"checklines",
",",
"deadline",
")",
"diffs_b",
"=",
"self",
".",
"diff_main",
"(",
"text1_b",
",",
"text2_b",
",",
"checklines",
",",
"deadline",
")",
"# Merge the results.",
"return",
"diffs_a",
"+",
"[",
"(",
"self",
".",
"DIFF_EQUAL",
",",
"mid_common",
")",
"]",
"+",
"diffs_b",
"if",
"checklines",
"and",
"len",
"(",
"text1",
")",
">",
"100",
"and",
"len",
"(",
"text2",
")",
">",
"100",
":",
"return",
"self",
".",
"diff_lineMode",
"(",
"text1",
",",
"text2",
",",
"deadline",
")",
"return",
"self",
".",
"diff_bisect",
"(",
"text1",
",",
"text2",
",",
"deadline",
")"
] |
Find the differences between two texts. Assumes that the texts do not
have any common prefix or suffix.
Args:
text1: Old string to be diffed.
text2: New string to be diffed.
checklines: Speedup flag. If false, then don't run a line-level diff
first to identify the changed areas.
If true, then run a faster, slightly less optimal diff.
deadline: Time when the diff should be complete by.
Returns:
Array of changes.
|
[
"Find",
"the",
"differences",
"between",
"two",
"texts",
".",
"Assumes",
"that",
"the",
"texts",
"do",
"not",
"have",
"any",
"common",
"prefix",
"or",
"suffix",
"."
] |
python
|
train
| 37.603448 |
cloud9ers/gurumate
|
environment/lib/python2.7/site-packages/IPython/config/configurable.py
|
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/config/configurable.py#L152-L166
|
def class_get_help(cls, inst=None):
"""Get the help string for this class in ReST format.
If `inst` is given, it's current trait values will be used in place of
class defaults.
"""
assert inst is None or isinstance(inst, cls)
cls_traits = cls.class_traits(config=True)
final_help = []
final_help.append(u'%s options' % cls.__name__)
final_help.append(len(final_help[0])*u'-')
for k,v in sorted(cls.class_traits(config=True).iteritems()):
help = cls.class_get_trait_help(v, inst)
final_help.append(help)
return '\n'.join(final_help)
|
[
"def",
"class_get_help",
"(",
"cls",
",",
"inst",
"=",
"None",
")",
":",
"assert",
"inst",
"is",
"None",
"or",
"isinstance",
"(",
"inst",
",",
"cls",
")",
"cls_traits",
"=",
"cls",
".",
"class_traits",
"(",
"config",
"=",
"True",
")",
"final_help",
"=",
"[",
"]",
"final_help",
".",
"append",
"(",
"u'%s options'",
"%",
"cls",
".",
"__name__",
")",
"final_help",
".",
"append",
"(",
"len",
"(",
"final_help",
"[",
"0",
"]",
")",
"*",
"u'-'",
")",
"for",
"k",
",",
"v",
"in",
"sorted",
"(",
"cls",
".",
"class_traits",
"(",
"config",
"=",
"True",
")",
".",
"iteritems",
"(",
")",
")",
":",
"help",
"=",
"cls",
".",
"class_get_trait_help",
"(",
"v",
",",
"inst",
")",
"final_help",
".",
"append",
"(",
"help",
")",
"return",
"'\\n'",
".",
"join",
"(",
"final_help",
")"
] |
Get the help string for this class in ReST format.
If `inst` is given, it's current trait values will be used in place of
class defaults.
|
[
"Get",
"the",
"help",
"string",
"for",
"this",
"class",
"in",
"ReST",
"format",
".",
"If",
"inst",
"is",
"given",
"it",
"s",
"current",
"trait",
"values",
"will",
"be",
"used",
"in",
"place",
"of",
"class",
"defaults",
"."
] |
python
|
test
| 42.533333 |
tanghaibao/goatools
|
goatools/anno/annoreader_base.py
|
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/anno/annoreader_base.py#L131-L136
|
def _get_goid2dbids(associations):
"""Return gene2go data for user-specified taxids."""
go2ids = cx.defaultdict(set)
for ntd in associations:
go2ids[ntd.GO_ID].add(ntd.DB_ID)
return dict(go2ids)
|
[
"def",
"_get_goid2dbids",
"(",
"associations",
")",
":",
"go2ids",
"=",
"cx",
".",
"defaultdict",
"(",
"set",
")",
"for",
"ntd",
"in",
"associations",
":",
"go2ids",
"[",
"ntd",
".",
"GO_ID",
"]",
".",
"add",
"(",
"ntd",
".",
"DB_ID",
")",
"return",
"dict",
"(",
"go2ids",
")"
] |
Return gene2go data for user-specified taxids.
|
[
"Return",
"gene2go",
"data",
"for",
"user",
"-",
"specified",
"taxids",
"."
] |
python
|
train
| 38.833333 |
happyleavesaoc/aoc-mgz
|
mgz/enums.py
|
https://github.com/happyleavesaoc/aoc-mgz/blob/13fc379cc062d7640bfa028eed9c0d45d37a7b2b/mgz/enums.py#L8-L81
|
def ObjectEnum(ctx):
"""Object Enumeration.
Should export the whole list from the game for the best accuracy.
"""
return Enum(
ctx,
villager_male=83,
villager_female=293,
scout_cavalry=448,
eagle_warrior=751,
king=434,
flare=332,
relic=285,
turkey=833,
sheep=594,
deer=65,
boar=48,
iron_boar=810,
ostrich=1026,
javelina=822,
crocodile=1031,
rhinoceros=1139,
wolf=126,
jaguar=812,
hawk=96,
macaw=816,
shore_fish=69,
fish_1=455,
fish_2=456,
fish_4=458,
fish_3=457,
marlin_1=450,
marlin_2=451,
dolphin=452,
cactus=709,
berry_bush=59,
stone_pile=102,
gold_pile=66,
forest_tree=350,
forest_tree_2=411,
snow_pine_tree=413,
straggler_tree=349,
tc_1=109,
tc_2=618,
tc_3=619,
tc_4=620,
castle=70,
palisade_wall=72,
stone_wall=117,
stone_gate_1=64,
stone_gate_2=81,
stone_gate_3=88,
stone_gate_4=95,
palisade_gate_1=662,
palisade_gate_2=666,
palisade_gate_3=670,
palisade_gate_4=674,
fortified_wall=155,
cliff_1=264,
cliff_2=265,
cliff_3=266,
cliff_4=267,
cliff_5=268,
cliff_6=269,
cliff_7=270,
cliff_8=271,
cliff_9=272,
cliff_10=273,
outpost=598,
shipwreck=722,
map_revealer=837,
default=Pass
)
|
[
"def",
"ObjectEnum",
"(",
"ctx",
")",
":",
"return",
"Enum",
"(",
"ctx",
",",
"villager_male",
"=",
"83",
",",
"villager_female",
"=",
"293",
",",
"scout_cavalry",
"=",
"448",
",",
"eagle_warrior",
"=",
"751",
",",
"king",
"=",
"434",
",",
"flare",
"=",
"332",
",",
"relic",
"=",
"285",
",",
"turkey",
"=",
"833",
",",
"sheep",
"=",
"594",
",",
"deer",
"=",
"65",
",",
"boar",
"=",
"48",
",",
"iron_boar",
"=",
"810",
",",
"ostrich",
"=",
"1026",
",",
"javelina",
"=",
"822",
",",
"crocodile",
"=",
"1031",
",",
"rhinoceros",
"=",
"1139",
",",
"wolf",
"=",
"126",
",",
"jaguar",
"=",
"812",
",",
"hawk",
"=",
"96",
",",
"macaw",
"=",
"816",
",",
"shore_fish",
"=",
"69",
",",
"fish_1",
"=",
"455",
",",
"fish_2",
"=",
"456",
",",
"fish_4",
"=",
"458",
",",
"fish_3",
"=",
"457",
",",
"marlin_1",
"=",
"450",
",",
"marlin_2",
"=",
"451",
",",
"dolphin",
"=",
"452",
",",
"cactus",
"=",
"709",
",",
"berry_bush",
"=",
"59",
",",
"stone_pile",
"=",
"102",
",",
"gold_pile",
"=",
"66",
",",
"forest_tree",
"=",
"350",
",",
"forest_tree_2",
"=",
"411",
",",
"snow_pine_tree",
"=",
"413",
",",
"straggler_tree",
"=",
"349",
",",
"tc_1",
"=",
"109",
",",
"tc_2",
"=",
"618",
",",
"tc_3",
"=",
"619",
",",
"tc_4",
"=",
"620",
",",
"castle",
"=",
"70",
",",
"palisade_wall",
"=",
"72",
",",
"stone_wall",
"=",
"117",
",",
"stone_gate_1",
"=",
"64",
",",
"stone_gate_2",
"=",
"81",
",",
"stone_gate_3",
"=",
"88",
",",
"stone_gate_4",
"=",
"95",
",",
"palisade_gate_1",
"=",
"662",
",",
"palisade_gate_2",
"=",
"666",
",",
"palisade_gate_3",
"=",
"670",
",",
"palisade_gate_4",
"=",
"674",
",",
"fortified_wall",
"=",
"155",
",",
"cliff_1",
"=",
"264",
",",
"cliff_2",
"=",
"265",
",",
"cliff_3",
"=",
"266",
",",
"cliff_4",
"=",
"267",
",",
"cliff_5",
"=",
"268",
",",
"cliff_6",
"=",
"269",
",",
"cliff_7",
"=",
"270",
",",
"cliff_8",
"=",
"271",
",",
"cliff_9",
"=",
"272",
",",
"cliff_10",
"=",
"273",
",",
"outpost",
"=",
"598",
",",
"shipwreck",
"=",
"722",
",",
"map_revealer",
"=",
"837",
",",
"default",
"=",
"Pass",
")"
] |
Object Enumeration.
Should export the whole list from the game for the best accuracy.
|
[
"Object",
"Enumeration",
"."
] |
python
|
train
| 21.202703 |
elastic/elasticsearch-dsl-py
|
elasticsearch_dsl/mapping.py
|
https://github.com/elastic/elasticsearch-dsl-py/blob/874b52472fc47b601de0e5fa0e4300e21aff0085/elasticsearch_dsl/mapping.py#L40-L51
|
def _collect_fields(self):
""" Iterate over all Field objects within, including multi fields. """
for f in itervalues(self.properties.to_dict()):
yield f
# multi fields
if hasattr(f, 'fields'):
for inner_f in itervalues(f.fields.to_dict()):
yield inner_f
# nested and inner objects
if hasattr(f, '_collect_fields'):
for inner_f in f._collect_fields():
yield inner_f
|
[
"def",
"_collect_fields",
"(",
"self",
")",
":",
"for",
"f",
"in",
"itervalues",
"(",
"self",
".",
"properties",
".",
"to_dict",
"(",
")",
")",
":",
"yield",
"f",
"# multi fields",
"if",
"hasattr",
"(",
"f",
",",
"'fields'",
")",
":",
"for",
"inner_f",
"in",
"itervalues",
"(",
"f",
".",
"fields",
".",
"to_dict",
"(",
")",
")",
":",
"yield",
"inner_f",
"# nested and inner objects",
"if",
"hasattr",
"(",
"f",
",",
"'_collect_fields'",
")",
":",
"for",
"inner_f",
"in",
"f",
".",
"_collect_fields",
"(",
")",
":",
"yield",
"inner_f"
] |
Iterate over all Field objects within, including multi fields.
|
[
"Iterate",
"over",
"all",
"Field",
"objects",
"within",
"including",
"multi",
"fields",
"."
] |
python
|
train
| 41.833333 |
tensorflow/mesh
|
mesh_tensorflow/transformer/dataset.py
|
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/transformer/dataset.py#L213-L250
|
def packed_parallel_tsv_dataset(filenames=gin.REQUIRED,
dataset_split=gin.REQUIRED,
batch_size=gin.REQUIRED,
sequence_length=gin.REQUIRED,
vocabulary=gin.REQUIRED,
append_eos=True,
shuffle_buffer_size=10000,
eos_id=1):
"""Reads parallel tab-separated text file. One example per line."""
dataset = tf.data.TextLineDataset(filenames)
if dataset_split == "train":
dataset = dataset.repeat()
dataset = dataset.shuffle(shuffle_buffer_size)
def _parse_fn(record): # pylint: disable=missing-docstring
tokens = tf.decode_csv(
record,
record_defaults=[""] * 2,
field_delim="\t",
use_quote_delim=False)
return {"inputs": tokens[0], "targets": tokens[1]}
def _encode_fn(features): # pylint: disable=missing-docstring
inputs_vocabulary = vocabulary[0] if isinstance(vocabulary,
tuple) else vocabulary
targets_vocabulary = vocabulary[1] if isinstance(vocabulary,
tuple) else vocabulary
inputs_enc = inputs_vocabulary.encode_tf(features["inputs"])
targets_enc = targets_vocabulary.encode_tf(features["targets"])
if append_eos:
inputs_enc = tf.concat([tf.to_int64(inputs_enc), [eos_id]], 0)
targets_enc = tf.concat([tf.to_int64(targets_enc), [eos_id]], 0)
return {"inputs": inputs_enc, "targets": targets_enc}
dataset = dataset.map(_parse_fn)
dataset = dataset.map(_encode_fn)
return pack_and_batch(dataset, batch_size, sequence_length)
|
[
"def",
"packed_parallel_tsv_dataset",
"(",
"filenames",
"=",
"gin",
".",
"REQUIRED",
",",
"dataset_split",
"=",
"gin",
".",
"REQUIRED",
",",
"batch_size",
"=",
"gin",
".",
"REQUIRED",
",",
"sequence_length",
"=",
"gin",
".",
"REQUIRED",
",",
"vocabulary",
"=",
"gin",
".",
"REQUIRED",
",",
"append_eos",
"=",
"True",
",",
"shuffle_buffer_size",
"=",
"10000",
",",
"eos_id",
"=",
"1",
")",
":",
"dataset",
"=",
"tf",
".",
"data",
".",
"TextLineDataset",
"(",
"filenames",
")",
"if",
"dataset_split",
"==",
"\"train\"",
":",
"dataset",
"=",
"dataset",
".",
"repeat",
"(",
")",
"dataset",
"=",
"dataset",
".",
"shuffle",
"(",
"shuffle_buffer_size",
")",
"def",
"_parse_fn",
"(",
"record",
")",
":",
"# pylint: disable=missing-docstring",
"tokens",
"=",
"tf",
".",
"decode_csv",
"(",
"record",
",",
"record_defaults",
"=",
"[",
"\"\"",
"]",
"*",
"2",
",",
"field_delim",
"=",
"\"\\t\"",
",",
"use_quote_delim",
"=",
"False",
")",
"return",
"{",
"\"inputs\"",
":",
"tokens",
"[",
"0",
"]",
",",
"\"targets\"",
":",
"tokens",
"[",
"1",
"]",
"}",
"def",
"_encode_fn",
"(",
"features",
")",
":",
"# pylint: disable=missing-docstring",
"inputs_vocabulary",
"=",
"vocabulary",
"[",
"0",
"]",
"if",
"isinstance",
"(",
"vocabulary",
",",
"tuple",
")",
"else",
"vocabulary",
"targets_vocabulary",
"=",
"vocabulary",
"[",
"1",
"]",
"if",
"isinstance",
"(",
"vocabulary",
",",
"tuple",
")",
"else",
"vocabulary",
"inputs_enc",
"=",
"inputs_vocabulary",
".",
"encode_tf",
"(",
"features",
"[",
"\"inputs\"",
"]",
")",
"targets_enc",
"=",
"targets_vocabulary",
".",
"encode_tf",
"(",
"features",
"[",
"\"targets\"",
"]",
")",
"if",
"append_eos",
":",
"inputs_enc",
"=",
"tf",
".",
"concat",
"(",
"[",
"tf",
".",
"to_int64",
"(",
"inputs_enc",
")",
",",
"[",
"eos_id",
"]",
"]",
",",
"0",
")",
"targets_enc",
"=",
"tf",
".",
"concat",
"(",
"[",
"tf",
".",
"to_int64",
"(",
"targets_enc",
")",
",",
"[",
"eos_id",
"]",
"]",
",",
"0",
")",
"return",
"{",
"\"inputs\"",
":",
"inputs_enc",
",",
"\"targets\"",
":",
"targets_enc",
"}",
"dataset",
"=",
"dataset",
".",
"map",
"(",
"_parse_fn",
")",
"dataset",
"=",
"dataset",
".",
"map",
"(",
"_encode_fn",
")",
"return",
"pack_and_batch",
"(",
"dataset",
",",
"batch_size",
",",
"sequence_length",
")"
] |
Reads parallel tab-separated text file. One example per line.
|
[
"Reads",
"parallel",
"tab",
"-",
"separated",
"text",
"file",
".",
"One",
"example",
"per",
"line",
"."
] |
python
|
train
| 45.236842 |
pysathq/pysat
|
pysat/solvers.py
|
https://github.com/pysathq/pysat/blob/522742e8f2d4c6ac50ecd9087f7a346206774c67/pysat/solvers.py#L1660-L1667
|
def get_core(self):
"""
Get an unsatisfiable core if the formula was previously
unsatisfied.
"""
if self.lingeling and self.status == False:
return pysolvers.lingeling_core(self.lingeling, self.prev_assumps)
|
[
"def",
"get_core",
"(",
"self",
")",
":",
"if",
"self",
".",
"lingeling",
"and",
"self",
".",
"status",
"==",
"False",
":",
"return",
"pysolvers",
".",
"lingeling_core",
"(",
"self",
".",
"lingeling",
",",
"self",
".",
"prev_assumps",
")"
] |
Get an unsatisfiable core if the formula was previously
unsatisfied.
|
[
"Get",
"an",
"unsatisfiable",
"core",
"if",
"the",
"formula",
"was",
"previously",
"unsatisfied",
"."
] |
python
|
train
| 32.625 |
konomae/lastpass-python
|
lastpass/parser.py
|
https://github.com/konomae/lastpass-python/blob/5063911b789868a1fd9db9922db82cdf156b938a/lastpass/parser.py#L216-L226
|
def decode_aes256_base64_auto(data, encryption_key):
"""Guesses AES cipher (EBC or CBD) from the length of the base64 encoded data."""
assert isinstance(data, bytes)
length = len(data)
if length == 0:
return b''
elif data[0] == b'!'[0]:
return decode_aes256_cbc_base64(data, encryption_key)
else:
return decode_aes256_ecb_base64(data, encryption_key)
|
[
"def",
"decode_aes256_base64_auto",
"(",
"data",
",",
"encryption_key",
")",
":",
"assert",
"isinstance",
"(",
"data",
",",
"bytes",
")",
"length",
"=",
"len",
"(",
"data",
")",
"if",
"length",
"==",
"0",
":",
"return",
"b''",
"elif",
"data",
"[",
"0",
"]",
"==",
"b'!'",
"[",
"0",
"]",
":",
"return",
"decode_aes256_cbc_base64",
"(",
"data",
",",
"encryption_key",
")",
"else",
":",
"return",
"decode_aes256_ecb_base64",
"(",
"data",
",",
"encryption_key",
")"
] |
Guesses AES cipher (EBC or CBD) from the length of the base64 encoded data.
|
[
"Guesses",
"AES",
"cipher",
"(",
"EBC",
"or",
"CBD",
")",
"from",
"the",
"length",
"of",
"the",
"base64",
"encoded",
"data",
"."
] |
python
|
train
| 35.363636 |
raymondEhlers/pachyderm
|
pachyderm/generic_config.py
|
https://github.com/raymondEhlers/pachyderm/blob/aaa1d8374fd871246290ce76f1796f2f7582b01d/pachyderm/generic_config.py#L193-L234
|
def determine_selection_of_iterable_values_from_config(config: DictLike, possible_iterables: Mapping[str, Type[enum.Enum]]) -> Dict[str, List[Any]]:
""" Determine iterable values to use to create objects for a given configuration.
All values of an iterable can be included be setting the value to ``True`` (Not as a single value list,
but as the only value.). Alternatively, an iterator can be disabled by setting the value to ``False``.
Args:
config: The dict-like configuration from ruamel.yaml which should be overridden.
possible_iterables: Key value pairs of names of enumerations and their values.
Returns:
dict: Iterables values that were requested in the config.
"""
iterables = {}
requested_iterables = config["iterables"]
for k, v in requested_iterables.items():
if k not in possible_iterables:
raise KeyError(k, f"Cannot find requested iterable in possible_iterables: {possible_iterables}")
logger.debug(f"k: {k}, v: {v}")
additional_iterable: List[Any] = []
enum_values = possible_iterables[k]
# Check for a string. This is wrong, and the user should be notified.
if isinstance(v, str):
raise TypeError(type(v), f"Passed string {v} when must be either bool or list")
# Allow the possibility to skip
if v is False:
continue
# Allow the possibility to including all possible values in the enum.
elif v is True:
additional_iterable = list(enum_values)
else:
if enum_values is None:
# The enumeration values are none, which means that we want to take
# all of the values defined in the config.
additional_iterable = list(v)
else:
# Otherwise, only take the requested values.
for el in v:
additional_iterable.append(enum_values[el])
# Store for later
iterables[k] = additional_iterable
return iterables
|
[
"def",
"determine_selection_of_iterable_values_from_config",
"(",
"config",
":",
"DictLike",
",",
"possible_iterables",
":",
"Mapping",
"[",
"str",
",",
"Type",
"[",
"enum",
".",
"Enum",
"]",
"]",
")",
"->",
"Dict",
"[",
"str",
",",
"List",
"[",
"Any",
"]",
"]",
":",
"iterables",
"=",
"{",
"}",
"requested_iterables",
"=",
"config",
"[",
"\"iterables\"",
"]",
"for",
"k",
",",
"v",
"in",
"requested_iterables",
".",
"items",
"(",
")",
":",
"if",
"k",
"not",
"in",
"possible_iterables",
":",
"raise",
"KeyError",
"(",
"k",
",",
"f\"Cannot find requested iterable in possible_iterables: {possible_iterables}\"",
")",
"logger",
".",
"debug",
"(",
"f\"k: {k}, v: {v}\"",
")",
"additional_iterable",
":",
"List",
"[",
"Any",
"]",
"=",
"[",
"]",
"enum_values",
"=",
"possible_iterables",
"[",
"k",
"]",
"# Check for a string. This is wrong, and the user should be notified.",
"if",
"isinstance",
"(",
"v",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"type",
"(",
"v",
")",
",",
"f\"Passed string {v} when must be either bool or list\"",
")",
"# Allow the possibility to skip",
"if",
"v",
"is",
"False",
":",
"continue",
"# Allow the possibility to including all possible values in the enum.",
"elif",
"v",
"is",
"True",
":",
"additional_iterable",
"=",
"list",
"(",
"enum_values",
")",
"else",
":",
"if",
"enum_values",
"is",
"None",
":",
"# The enumeration values are none, which means that we want to take",
"# all of the values defined in the config.",
"additional_iterable",
"=",
"list",
"(",
"v",
")",
"else",
":",
"# Otherwise, only take the requested values.",
"for",
"el",
"in",
"v",
":",
"additional_iterable",
".",
"append",
"(",
"enum_values",
"[",
"el",
"]",
")",
"# Store for later",
"iterables",
"[",
"k",
"]",
"=",
"additional_iterable",
"return",
"iterables"
] |
Determine iterable values to use to create objects for a given configuration.
All values of an iterable can be included be setting the value to ``True`` (Not as a single value list,
but as the only value.). Alternatively, an iterator can be disabled by setting the value to ``False``.
Args:
config: The dict-like configuration from ruamel.yaml which should be overridden.
possible_iterables: Key value pairs of names of enumerations and their values.
Returns:
dict: Iterables values that were requested in the config.
|
[
"Determine",
"iterable",
"values",
"to",
"use",
"to",
"create",
"objects",
"for",
"a",
"given",
"configuration",
"."
] |
python
|
train
| 47.880952 |
dslackw/slpkg
|
slpkg/slack/patches.py
|
https://github.com/dslackw/slpkg/blob/dd2e08a80e944d337d157b992167ba631a4343de/slpkg/slack/patches.py#L135-L157
|
def store(self):
"""
Store and return packages for upgrading
"""
data = repo_data(self.PACKAGES_TXT, "slack", self.flag)
black = BlackList().packages(pkgs=data[0], repo="slack")
for name, loc, comp, uncomp in zip(data[0], data[1], data[2], data[3]):
status(0.0003)
repo_pkg_name = split_package(name)[0]
if (not os.path.isfile(self.meta.pkg_path + name[:-4]) and
repo_pkg_name not in black and
repo_pkg_name not in self.skip):
self.dwn_links.append("{0}{1}/{2}".format(mirrors("", ""),
loc, name))
self.comp_sum.append(comp)
self.uncomp_sum.append(uncomp)
self.upgrade_all.append(name)
self.count_upg += 1
if not find_package(repo_pkg_name + self.meta.sp,
self.meta.pkg_path):
self.count_added += 1
self.count_upg -= 1
return self.count_upg
|
[
"def",
"store",
"(",
"self",
")",
":",
"data",
"=",
"repo_data",
"(",
"self",
".",
"PACKAGES_TXT",
",",
"\"slack\"",
",",
"self",
".",
"flag",
")",
"black",
"=",
"BlackList",
"(",
")",
".",
"packages",
"(",
"pkgs",
"=",
"data",
"[",
"0",
"]",
",",
"repo",
"=",
"\"slack\"",
")",
"for",
"name",
",",
"loc",
",",
"comp",
",",
"uncomp",
"in",
"zip",
"(",
"data",
"[",
"0",
"]",
",",
"data",
"[",
"1",
"]",
",",
"data",
"[",
"2",
"]",
",",
"data",
"[",
"3",
"]",
")",
":",
"status",
"(",
"0.0003",
")",
"repo_pkg_name",
"=",
"split_package",
"(",
"name",
")",
"[",
"0",
"]",
"if",
"(",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"self",
".",
"meta",
".",
"pkg_path",
"+",
"name",
"[",
":",
"-",
"4",
"]",
")",
"and",
"repo_pkg_name",
"not",
"in",
"black",
"and",
"repo_pkg_name",
"not",
"in",
"self",
".",
"skip",
")",
":",
"self",
".",
"dwn_links",
".",
"append",
"(",
"\"{0}{1}/{2}\"",
".",
"format",
"(",
"mirrors",
"(",
"\"\"",
",",
"\"\"",
")",
",",
"loc",
",",
"name",
")",
")",
"self",
".",
"comp_sum",
".",
"append",
"(",
"comp",
")",
"self",
".",
"uncomp_sum",
".",
"append",
"(",
"uncomp",
")",
"self",
".",
"upgrade_all",
".",
"append",
"(",
"name",
")",
"self",
".",
"count_upg",
"+=",
"1",
"if",
"not",
"find_package",
"(",
"repo_pkg_name",
"+",
"self",
".",
"meta",
".",
"sp",
",",
"self",
".",
"meta",
".",
"pkg_path",
")",
":",
"self",
".",
"count_added",
"+=",
"1",
"self",
".",
"count_upg",
"-=",
"1",
"return",
"self",
".",
"count_upg"
] |
Store and return packages for upgrading
|
[
"Store",
"and",
"return",
"packages",
"for",
"upgrading"
] |
python
|
train
| 46.956522 |
dls-controls/annotypes
|
annotypes/_fake_typing.py
|
https://github.com/dls-controls/annotypes/blob/31ab68a0367bb70ebd9898e8b9fa9405423465bd/annotypes/_fake_typing.py#L135-L140
|
def _gorg(a):
"""Return the farthest origin of a generic class (internal helper)."""
assert isinstance(a, GenericMeta)
while a.__origin__ is not None:
a = a.__origin__
return a
|
[
"def",
"_gorg",
"(",
"a",
")",
":",
"assert",
"isinstance",
"(",
"a",
",",
"GenericMeta",
")",
"while",
"a",
".",
"__origin__",
"is",
"not",
"None",
":",
"a",
"=",
"a",
".",
"__origin__",
"return",
"a"
] |
Return the farthest origin of a generic class (internal helper).
|
[
"Return",
"the",
"farthest",
"origin",
"of",
"a",
"generic",
"class",
"(",
"internal",
"helper",
")",
"."
] |
python
|
train
| 32.5 |
jut-io/jut-python-tools
|
jut/api/data_engine.py
|
https://github.com/jut-io/jut-python-tools/blob/65574d23f51a7bbced9bb25010d02da5ca5d906f/jut/api/data_engine.py#L92-L102
|
def get_import_data_url(deployment_name,
token_manager=None,
app_url=defaults.APP_URL):
"""
return the import data url
"""
return get_data_url(deployment_name,
endpoint_type='http-import',
app_url=app_url,
token_manager=token_manager)
|
[
"def",
"get_import_data_url",
"(",
"deployment_name",
",",
"token_manager",
"=",
"None",
",",
"app_url",
"=",
"defaults",
".",
"APP_URL",
")",
":",
"return",
"get_data_url",
"(",
"deployment_name",
",",
"endpoint_type",
"=",
"'http-import'",
",",
"app_url",
"=",
"app_url",
",",
"token_manager",
"=",
"token_manager",
")"
] |
return the import data url
|
[
"return",
"the",
"import",
"data",
"url"
] |
python
|
train
| 32.818182 |
Riminder/python-riminder-api
|
riminder/riminder.py
|
https://github.com/Riminder/python-riminder-api/blob/01279f0ece08cf3d1dd45f76de6d9edf7fafec90/riminder/riminder.py#L61-L64
|
def patch(self, resource_endpoint, data={}):
"""Don't use it."""
url = self._create_request_url(resource_endpoint)
return req.patch(url, headers=self.auth_header, json=data)
|
[
"def",
"patch",
"(",
"self",
",",
"resource_endpoint",
",",
"data",
"=",
"{",
"}",
")",
":",
"url",
"=",
"self",
".",
"_create_request_url",
"(",
"resource_endpoint",
")",
"return",
"req",
".",
"patch",
"(",
"url",
",",
"headers",
"=",
"self",
".",
"auth_header",
",",
"json",
"=",
"data",
")"
] |
Don't use it.
|
[
"Don",
"t",
"use",
"it",
"."
] |
python
|
train
| 48.5 |
jxtech/wechatpy
|
wechatpy/client/api/customservice.py
|
https://github.com/jxtech/wechatpy/blob/4df0da795618c0895a10f1c2cde9e9d5c0a93aaa/wechatpy/client/api/customservice.py#L182-L196
|
def get_session_list(self, account):
"""
获取客服的会话列表
详情请参考
http://mp.weixin.qq.com/wiki/2/6c20f3e323bdf5986cfcb33cbd3b829a.html
:param account: 完整客服账号
:return: 客服的会话列表
"""
res = self._get(
'https://api.weixin.qq.com/customservice/kfsession/getsessionlist',
params={'kf_account': account},
result_processor=lambda x: x['sessionlist']
)
return res
|
[
"def",
"get_session_list",
"(",
"self",
",",
"account",
")",
":",
"res",
"=",
"self",
".",
"_get",
"(",
"'https://api.weixin.qq.com/customservice/kfsession/getsessionlist'",
",",
"params",
"=",
"{",
"'kf_account'",
":",
"account",
"}",
",",
"result_processor",
"=",
"lambda",
"x",
":",
"x",
"[",
"'sessionlist'",
"]",
")",
"return",
"res"
] |
获取客服的会话列表
详情请参考
http://mp.weixin.qq.com/wiki/2/6c20f3e323bdf5986cfcb33cbd3b829a.html
:param account: 完整客服账号
:return: 客服的会话列表
|
[
"获取客服的会话列表",
"详情请参考",
"http",
":",
"//",
"mp",
".",
"weixin",
".",
"qq",
".",
"com",
"/",
"wiki",
"/",
"2",
"/",
"6c20f3e323bdf5986cfcb33cbd3b829a",
".",
"html"
] |
python
|
train
| 29.733333 |
Opentrons/opentrons
|
api/src/opentrons/legacy_api/instruments/pipette.py
|
https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/legacy_api/instruments/pipette.py#L1256-L1371
|
def transfer(self, volume, source, dest, **kwargs):
"""
Transfer will move a volume of liquid from a source location(s)
to a dest location(s). It is a higher-level command, incorporating
other :any:`Pipette` commands, like :any:`aspirate` and
:any:`dispense`, designed to make protocol writing easier at the
cost of specificity.
Parameters
----------
volumes : number, list, or tuple
The amount of volume to remove from each `sources` :any:`Placeable`
and add to each `targets` :any:`Placeable`. If `volumes` is a list,
each volume will be used for the sources/targets at the
matching index. If `volumes` is a tuple with two elements,
like `(20, 100)`, then a list of volumes will be generated with
a linear gradient between the two volumes in the tuple.
source : Placeable or list
Single :any:`Placeable` or list of :any:`Placeable`s, from where
liquid will be :any:`aspirate`ed from.
dest : Placeable or list
Single :any:`Placeable` or list of :any:`Placeable`s, where
liquid will be :any:`dispense`ed to.
new_tip : str
The number of clean tips this transfer command will use. If
'never', no tips will be picked up nor dropped. If 'once', a
single tip will be used for all commands. If 'always', a new tip
will be used for each transfer. Default is 'once'.
trash : boolean
If `False` (default behavior) tips will be returned to their
tip rack. If `True` and a trash container has been attached
to this `Pipette`, then the tip will be sent to the trash
container.
touch_tip : boolean
If `True`, a :any:`touch_tip` will occur following each
:any:`aspirate` and :any:`dispense`. If set to `False` (default),
no :any:`touch_tip` will occur.
blow_out : boolean
If `True`, a :any:`blow_out` will occur following each
:any:`dispense`, but only if the pipette has no liquid left in it.
If set to `False` (default), no :any:`blow_out` will occur.
mix_before : tuple
Specify the number of repetitions volume to mix, and a :any:`mix`
will proceed each :any:`aspirate` during the transfer and dispense.
The tuple's values is interpreted as (repetitions, volume).
mix_after : tuple
Specify the number of repetitions volume to mix, and a :any:`mix`
will following each :any:`dispense` during the transfer or
consolidate. The tuple's values is interpreted as
(repetitions, volume).
carryover : boolean
If `True` (default), any `volumes` that exceed the maximum volume
of this `Pipette` will be split into multiple smaller volumes.
repeat : boolean
(Only applicable to :any:`distribute` and :any:`consolidate`)If
`True` (default), sequential :any:`aspirate` volumes will be
combined into one tip for the purpose of saving time. If `False`,
all volumes will be transferred seperately.
gradient : lambda
Function for calculated the curve used for gradient volumes.
When `volumes` is a tuple of length 2, it's values are used
to create a list of gradient volumes. The default curve for
this gradient is linear (lambda x: x), however a method can
be passed with the `gradient` keyword argument to create a
custom curve.
Returns
-------
This instance of :class:`Pipette`.
Examples
--------
...
>>> from opentrons import instruments, labware, robot # doctest: +SKIP
>>> robot.reset() # doctest: +SKIP
>>> plate = labware.load('96-flat', '5') # doctest: +SKIP
>>> p300 = instruments.P300_Single(mount='right') # doctest: +SKIP
>>> p300.transfer(50, plate[0], plate[1]) # doctest: +SKIP
"""
# Note: currently it varies whether the pipette should have a tip on
# or not depending on the parameters for this call, so we cannot
# create a very reliable assertion on tip status
kwargs['mode'] = kwargs.get('mode', 'transfer')
touch_tip = kwargs.get('touch_tip', False)
if touch_tip is True:
touch_tip = -1
kwargs['touch_tip'] = touch_tip
tip_options = {
'once': 1,
'never': 0,
'always': float('inf')
}
tip_option = kwargs.get('new_tip', 'once')
tips = tip_options.get(tip_option)
if tips is None:
raise ValueError('Unknown "new_tip" option: {}'.format(tip_option))
plan = self._create_transfer_plan(volume, source, dest, **kwargs)
self._run_transfer_plan(tips, plan, **kwargs)
return self
|
[
"def",
"transfer",
"(",
"self",
",",
"volume",
",",
"source",
",",
"dest",
",",
"*",
"*",
"kwargs",
")",
":",
"# Note: currently it varies whether the pipette should have a tip on",
"# or not depending on the parameters for this call, so we cannot",
"# create a very reliable assertion on tip status",
"kwargs",
"[",
"'mode'",
"]",
"=",
"kwargs",
".",
"get",
"(",
"'mode'",
",",
"'transfer'",
")",
"touch_tip",
"=",
"kwargs",
".",
"get",
"(",
"'touch_tip'",
",",
"False",
")",
"if",
"touch_tip",
"is",
"True",
":",
"touch_tip",
"=",
"-",
"1",
"kwargs",
"[",
"'touch_tip'",
"]",
"=",
"touch_tip",
"tip_options",
"=",
"{",
"'once'",
":",
"1",
",",
"'never'",
":",
"0",
",",
"'always'",
":",
"float",
"(",
"'inf'",
")",
"}",
"tip_option",
"=",
"kwargs",
".",
"get",
"(",
"'new_tip'",
",",
"'once'",
")",
"tips",
"=",
"tip_options",
".",
"get",
"(",
"tip_option",
")",
"if",
"tips",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'Unknown \"new_tip\" option: {}'",
".",
"format",
"(",
"tip_option",
")",
")",
"plan",
"=",
"self",
".",
"_create_transfer_plan",
"(",
"volume",
",",
"source",
",",
"dest",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"_run_transfer_plan",
"(",
"tips",
",",
"plan",
",",
"*",
"*",
"kwargs",
")",
"return",
"self"
] |
Transfer will move a volume of liquid from a source location(s)
to a dest location(s). It is a higher-level command, incorporating
other :any:`Pipette` commands, like :any:`aspirate` and
:any:`dispense`, designed to make protocol writing easier at the
cost of specificity.
Parameters
----------
volumes : number, list, or tuple
The amount of volume to remove from each `sources` :any:`Placeable`
and add to each `targets` :any:`Placeable`. If `volumes` is a list,
each volume will be used for the sources/targets at the
matching index. If `volumes` is a tuple with two elements,
like `(20, 100)`, then a list of volumes will be generated with
a linear gradient between the two volumes in the tuple.
source : Placeable or list
Single :any:`Placeable` or list of :any:`Placeable`s, from where
liquid will be :any:`aspirate`ed from.
dest : Placeable or list
Single :any:`Placeable` or list of :any:`Placeable`s, where
liquid will be :any:`dispense`ed to.
new_tip : str
The number of clean tips this transfer command will use. If
'never', no tips will be picked up nor dropped. If 'once', a
single tip will be used for all commands. If 'always', a new tip
will be used for each transfer. Default is 'once'.
trash : boolean
If `False` (default behavior) tips will be returned to their
tip rack. If `True` and a trash container has been attached
to this `Pipette`, then the tip will be sent to the trash
container.
touch_tip : boolean
If `True`, a :any:`touch_tip` will occur following each
:any:`aspirate` and :any:`dispense`. If set to `False` (default),
no :any:`touch_tip` will occur.
blow_out : boolean
If `True`, a :any:`blow_out` will occur following each
:any:`dispense`, but only if the pipette has no liquid left in it.
If set to `False` (default), no :any:`blow_out` will occur.
mix_before : tuple
Specify the number of repetitions volume to mix, and a :any:`mix`
will proceed each :any:`aspirate` during the transfer and dispense.
The tuple's values is interpreted as (repetitions, volume).
mix_after : tuple
Specify the number of repetitions volume to mix, and a :any:`mix`
will following each :any:`dispense` during the transfer or
consolidate. The tuple's values is interpreted as
(repetitions, volume).
carryover : boolean
If `True` (default), any `volumes` that exceed the maximum volume
of this `Pipette` will be split into multiple smaller volumes.
repeat : boolean
(Only applicable to :any:`distribute` and :any:`consolidate`)If
`True` (default), sequential :any:`aspirate` volumes will be
combined into one tip for the purpose of saving time. If `False`,
all volumes will be transferred seperately.
gradient : lambda
Function for calculated the curve used for gradient volumes.
When `volumes` is a tuple of length 2, it's values are used
to create a list of gradient volumes. The default curve for
this gradient is linear (lambda x: x), however a method can
be passed with the `gradient` keyword argument to create a
custom curve.
Returns
-------
This instance of :class:`Pipette`.
Examples
--------
...
>>> from opentrons import instruments, labware, robot # doctest: +SKIP
>>> robot.reset() # doctest: +SKIP
>>> plate = labware.load('96-flat', '5') # doctest: +SKIP
>>> p300 = instruments.P300_Single(mount='right') # doctest: +SKIP
>>> p300.transfer(50, plate[0], plate[1]) # doctest: +SKIP
|
[
"Transfer",
"will",
"move",
"a",
"volume",
"of",
"liquid",
"from",
"a",
"source",
"location",
"(",
"s",
")",
"to",
"a",
"dest",
"location",
"(",
"s",
")",
".",
"It",
"is",
"a",
"higher",
"-",
"level",
"command",
"incorporating",
"other",
":",
"any",
":",
"Pipette",
"commands",
"like",
":",
"any",
":",
"aspirate",
"and",
":",
"any",
":",
"dispense",
"designed",
"to",
"make",
"protocol",
"writing",
"easier",
"at",
"the",
"cost",
"of",
"specificity",
"."
] |
python
|
train
| 42.405172 |
NuGrid/NuGridPy
|
nugridpy/astronomy.py
|
https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/astronomy.py#L384-L399
|
def Nasv(macs,T):
'''
Returns
-------
Na*<sigma v>
for MACS [mb] at T [K].
'''
Na = avogadro_constant
k = boltzmann_constant
vtherm=(2.*k*T/mass_H_atom)**0.5
s = macs*1.e-27
Nasv = s*vtherm*Na
return Nasv
|
[
"def",
"Nasv",
"(",
"macs",
",",
"T",
")",
":",
"Na",
"=",
"avogadro_constant",
"k",
"=",
"boltzmann_constant",
"vtherm",
"=",
"(",
"2.",
"*",
"k",
"*",
"T",
"/",
"mass_H_atom",
")",
"**",
"0.5",
"s",
"=",
"macs",
"*",
"1.e-27",
"Nasv",
"=",
"s",
"*",
"vtherm",
"*",
"Na",
"return",
"Nasv"
] |
Returns
-------
Na*<sigma v>
for MACS [mb] at T [K].
|
[
"Returns",
"-------",
"Na",
"*",
"<sigma",
"v",
">",
"for",
"MACS",
"[",
"mb",
"]",
"at",
"T",
"[",
"K",
"]",
"."
] |
python
|
train
| 15.9375 |
mitsei/dlkit
|
dlkit/json_/osid/objects.py
|
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/osid/objects.py#L859-L912
|
def _is_valid_input(self, inpt, metadata, array):
"""The _is_valid_input method takes three arguments:
the user input to be checked, the associated osid.Metadata object
containing validation requirements and a boolean value indicating
whether this is an array value.
"""
# pylint: disable=too-many-branches,no-self-use
# Please redesign, and move to utility module
syntax = metadata.get_syntax()
# First check if this is a required data element
if metadata.is_required and not inpt:
return False
valid = True # Innocent until proven guilty
# Recursively run through all the elements of an array
if array:
if len(inpt) < metadata.get_minimum_elements():
valid = False
elif len(inpt) > metadata.get_maximum_elements():
valid = False
else:
for element in inpt:
valid = (valid and self._is_valid_input(element, metadata, False))
# Run through all the possible syntax types
elif syntax == 'ID':
valid = self._is_valid_id(inpt)
elif syntax == 'TYPE':
valid = self._is_valid_type(inpt)
elif syntax == 'BOOLEAN':
valid = self._is_valid_boolean(inpt)
elif syntax == 'STRING':
valid = self._is_valid_string(inpt, metadata)
elif syntax == 'INTEGER':
valid = self._is_valid_integer(inpt, metadata)
elif syntax == 'DECIMAL':
valid = self._is_valid_decimal(inpt, metadata)
elif syntax == 'DATETIME':
valid = self._is_valid_date_time(inpt, metadata)
elif syntax == 'DURATION':
valid = self._is_valid_duration(inpt, metadata)
elif syntax == 'CARDINAL':
valid = self._is_valid_cardinal(inpt, metadata)
elif syntax == 'INTEGER':
valid = self._is_valid_integer(inpt, metadata)
elif syntax == 'DECIMAL':
valid = self._is_valid_decimal(inpt, metadata)
else:
raise errors.OperationFailed('no validation function available for ' + syntax)
return valid
|
[
"def",
"_is_valid_input",
"(",
"self",
",",
"inpt",
",",
"metadata",
",",
"array",
")",
":",
"# pylint: disable=too-many-branches,no-self-use",
"# Please redesign, and move to utility module",
"syntax",
"=",
"metadata",
".",
"get_syntax",
"(",
")",
"# First check if this is a required data element",
"if",
"metadata",
".",
"is_required",
"and",
"not",
"inpt",
":",
"return",
"False",
"valid",
"=",
"True",
"# Innocent until proven guilty",
"# Recursively run through all the elements of an array",
"if",
"array",
":",
"if",
"len",
"(",
"inpt",
")",
"<",
"metadata",
".",
"get_minimum_elements",
"(",
")",
":",
"valid",
"=",
"False",
"elif",
"len",
"(",
"inpt",
")",
">",
"metadata",
".",
"get_maximum_elements",
"(",
")",
":",
"valid",
"=",
"False",
"else",
":",
"for",
"element",
"in",
"inpt",
":",
"valid",
"=",
"(",
"valid",
"and",
"self",
".",
"_is_valid_input",
"(",
"element",
",",
"metadata",
",",
"False",
")",
")",
"# Run through all the possible syntax types",
"elif",
"syntax",
"==",
"'ID'",
":",
"valid",
"=",
"self",
".",
"_is_valid_id",
"(",
"inpt",
")",
"elif",
"syntax",
"==",
"'TYPE'",
":",
"valid",
"=",
"self",
".",
"_is_valid_type",
"(",
"inpt",
")",
"elif",
"syntax",
"==",
"'BOOLEAN'",
":",
"valid",
"=",
"self",
".",
"_is_valid_boolean",
"(",
"inpt",
")",
"elif",
"syntax",
"==",
"'STRING'",
":",
"valid",
"=",
"self",
".",
"_is_valid_string",
"(",
"inpt",
",",
"metadata",
")",
"elif",
"syntax",
"==",
"'INTEGER'",
":",
"valid",
"=",
"self",
".",
"_is_valid_integer",
"(",
"inpt",
",",
"metadata",
")",
"elif",
"syntax",
"==",
"'DECIMAL'",
":",
"valid",
"=",
"self",
".",
"_is_valid_decimal",
"(",
"inpt",
",",
"metadata",
")",
"elif",
"syntax",
"==",
"'DATETIME'",
":",
"valid",
"=",
"self",
".",
"_is_valid_date_time",
"(",
"inpt",
",",
"metadata",
")",
"elif",
"syntax",
"==",
"'DURATION'",
":",
"valid",
"=",
"self",
".",
"_is_valid_duration",
"(",
"inpt",
",",
"metadata",
")",
"elif",
"syntax",
"==",
"'CARDINAL'",
":",
"valid",
"=",
"self",
".",
"_is_valid_cardinal",
"(",
"inpt",
",",
"metadata",
")",
"elif",
"syntax",
"==",
"'INTEGER'",
":",
"valid",
"=",
"self",
".",
"_is_valid_integer",
"(",
"inpt",
",",
"metadata",
")",
"elif",
"syntax",
"==",
"'DECIMAL'",
":",
"valid",
"=",
"self",
".",
"_is_valid_decimal",
"(",
"inpt",
",",
"metadata",
")",
"else",
":",
"raise",
"errors",
".",
"OperationFailed",
"(",
"'no validation function available for '",
"+",
"syntax",
")",
"return",
"valid"
] |
The _is_valid_input method takes three arguments:
the user input to be checked, the associated osid.Metadata object
containing validation requirements and a boolean value indicating
whether this is an array value.
|
[
"The",
"_is_valid_input",
"method",
"takes",
"three",
"arguments",
":"
] |
python
|
train
| 39.981481 |
fermiPy/fermipy
|
fermipy/gtanalysis.py
|
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/gtanalysis.py#L1786-L1798
|
def set_parameter_scale(self, name, par, scale):
"""Update the scale of a parameter while keeping its value constant."""
name = self.roi.get_source_by_name(name).name
idx = self.like.par_index(name, par)
current_bounds = list(self.like.model[idx].getBounds())
current_scale = self.like.model[idx].getScale()
current_value = self.like[idx].getValue()
self.like[idx].setScale(scale)
self.like[idx].setValue(current_value * current_scale / scale)
self.like[idx].setBounds(current_bounds[0] * current_scale / scale,
current_bounds[1] * current_scale / scale)
self._sync_params(name)
|
[
"def",
"set_parameter_scale",
"(",
"self",
",",
"name",
",",
"par",
",",
"scale",
")",
":",
"name",
"=",
"self",
".",
"roi",
".",
"get_source_by_name",
"(",
"name",
")",
".",
"name",
"idx",
"=",
"self",
".",
"like",
".",
"par_index",
"(",
"name",
",",
"par",
")",
"current_bounds",
"=",
"list",
"(",
"self",
".",
"like",
".",
"model",
"[",
"idx",
"]",
".",
"getBounds",
"(",
")",
")",
"current_scale",
"=",
"self",
".",
"like",
".",
"model",
"[",
"idx",
"]",
".",
"getScale",
"(",
")",
"current_value",
"=",
"self",
".",
"like",
"[",
"idx",
"]",
".",
"getValue",
"(",
")",
"self",
".",
"like",
"[",
"idx",
"]",
".",
"setScale",
"(",
"scale",
")",
"self",
".",
"like",
"[",
"idx",
"]",
".",
"setValue",
"(",
"current_value",
"*",
"current_scale",
"/",
"scale",
")",
"self",
".",
"like",
"[",
"idx",
"]",
".",
"setBounds",
"(",
"current_bounds",
"[",
"0",
"]",
"*",
"current_scale",
"/",
"scale",
",",
"current_bounds",
"[",
"1",
"]",
"*",
"current_scale",
"/",
"scale",
")",
"self",
".",
"_sync_params",
"(",
"name",
")"
] |
Update the scale of a parameter while keeping its value constant.
|
[
"Update",
"the",
"scale",
"of",
"a",
"parameter",
"while",
"keeping",
"its",
"value",
"constant",
"."
] |
python
|
train
| 52.307692 |
CalebBell/thermo
|
thermo/utils.py
|
https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/utils.py#L1263-L1309
|
def allclose_variable(a, b, limits, rtols=None, atols=None):
'''Returns True if two arrays are element-wise equal within several
different tolerances. Tolerance values are always positive, usually
very small. Based on numpy's allclose function.
Only atols or rtols needs to be specified; both are used if given.
Parameters
----------
a, b : array_like
Input arrays to compare.
limits : array_like
Fractions of elements allowed to not match to within each tolerance.
rtols : array_like
The relative tolerance parameters.
atols : float
The absolute tolerance parameters.
Returns
-------
allclose : bool
Returns True if the two arrays are equal within the given
tolerances; False otherwise.
Examples
--------
10 random similar variables, all of them matching to within 1E-5, allowing
up to half to match up to 1E-6.
>>> x = [2.7244322249597719e-08, 3.0105683900110473e-10, 2.7244124924802327e-08, 3.0105259397637556e-10, 2.7243929226310193e-08, 3.0104990272770901e-10, 2.7243666849384451e-08, 3.0104101821236015e-10, 2.7243433745917367e-08, 3.0103707421519949e-10]
>>> y = [2.7244328304561904e-08, 3.0105753470546008e-10, 2.724412872417824e-08, 3.0105303055834564e-10, 2.7243914341030203e-08, 3.0104819238021998e-10, 2.7243684057561379e-08, 3.0104299541023674e-10, 2.7243436694839306e-08, 3.010374130526363e-10]
>>> allclose_variable(x, y, limits=[.0, .5], rtols=[1E-5, 1E-6])
True
'''
l = float(len(a))
if rtols is None and atols is None:
raise Exception('Either absolute errors or relative errors must be supplied.')
elif rtols is None:
rtols = [0 for i in atols]
elif atols is None:
atols = [0 for i in rtols]
for atol, rtol, lim in zip(atols, rtols, limits):
matches = np.count_nonzero(np.isclose(a, b, rtol=rtol, atol=atol))
if 1-matches/l > lim:
return False
return True
|
[
"def",
"allclose_variable",
"(",
"a",
",",
"b",
",",
"limits",
",",
"rtols",
"=",
"None",
",",
"atols",
"=",
"None",
")",
":",
"l",
"=",
"float",
"(",
"len",
"(",
"a",
")",
")",
"if",
"rtols",
"is",
"None",
"and",
"atols",
"is",
"None",
":",
"raise",
"Exception",
"(",
"'Either absolute errors or relative errors must be supplied.'",
")",
"elif",
"rtols",
"is",
"None",
":",
"rtols",
"=",
"[",
"0",
"for",
"i",
"in",
"atols",
"]",
"elif",
"atols",
"is",
"None",
":",
"atols",
"=",
"[",
"0",
"for",
"i",
"in",
"rtols",
"]",
"for",
"atol",
",",
"rtol",
",",
"lim",
"in",
"zip",
"(",
"atols",
",",
"rtols",
",",
"limits",
")",
":",
"matches",
"=",
"np",
".",
"count_nonzero",
"(",
"np",
".",
"isclose",
"(",
"a",
",",
"b",
",",
"rtol",
"=",
"rtol",
",",
"atol",
"=",
"atol",
")",
")",
"if",
"1",
"-",
"matches",
"/",
"l",
">",
"lim",
":",
"return",
"False",
"return",
"True"
] |
Returns True if two arrays are element-wise equal within several
different tolerances. Tolerance values are always positive, usually
very small. Based on numpy's allclose function.
Only atols or rtols needs to be specified; both are used if given.
Parameters
----------
a, b : array_like
Input arrays to compare.
limits : array_like
Fractions of elements allowed to not match to within each tolerance.
rtols : array_like
The relative tolerance parameters.
atols : float
The absolute tolerance parameters.
Returns
-------
allclose : bool
Returns True if the two arrays are equal within the given
tolerances; False otherwise.
Examples
--------
10 random similar variables, all of them matching to within 1E-5, allowing
up to half to match up to 1E-6.
>>> x = [2.7244322249597719e-08, 3.0105683900110473e-10, 2.7244124924802327e-08, 3.0105259397637556e-10, 2.7243929226310193e-08, 3.0104990272770901e-10, 2.7243666849384451e-08, 3.0104101821236015e-10, 2.7243433745917367e-08, 3.0103707421519949e-10]
>>> y = [2.7244328304561904e-08, 3.0105753470546008e-10, 2.724412872417824e-08, 3.0105303055834564e-10, 2.7243914341030203e-08, 3.0104819238021998e-10, 2.7243684057561379e-08, 3.0104299541023674e-10, 2.7243436694839306e-08, 3.010374130526363e-10]
>>> allclose_variable(x, y, limits=[.0, .5], rtols=[1E-5, 1E-6])
True
|
[
"Returns",
"True",
"if",
"two",
"arrays",
"are",
"element",
"-",
"wise",
"equal",
"within",
"several",
"different",
"tolerances",
".",
"Tolerance",
"values",
"are",
"always",
"positive",
"usually",
"very",
"small",
".",
"Based",
"on",
"numpy",
"s",
"allclose",
"function",
".",
"Only",
"atols",
"or",
"rtols",
"needs",
"to",
"be",
"specified",
";",
"both",
"are",
"used",
"if",
"given",
".",
"Parameters",
"----------",
"a",
"b",
":",
"array_like",
"Input",
"arrays",
"to",
"compare",
".",
"limits",
":",
"array_like",
"Fractions",
"of",
"elements",
"allowed",
"to",
"not",
"match",
"to",
"within",
"each",
"tolerance",
".",
"rtols",
":",
"array_like",
"The",
"relative",
"tolerance",
"parameters",
".",
"atols",
":",
"float",
"The",
"absolute",
"tolerance",
"parameters",
"."
] |
python
|
valid
| 42.042553 |
assamite/creamas
|
creamas/core/environment.py
|
https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/core/environment.py#L198-L215
|
def create_connections(self, connection_map):
'''Create agent connections from a given connection map.
:param dict connection_map:
A map of connections to be created. Dictionary where keys are
agent addresses and values are lists of (addr, attitude)-tuples
suitable for
:meth:`~creamas.core.agent.CreativeAgent.add_connections`.
Only connections for agents in this environment are made.
'''
agents = self.get_agents(addr=False)
rets = []
for a in agents:
if a.addr in connection_map:
r = a.add_connections(connection_map[a.addr])
rets.append(r)
return rets
|
[
"def",
"create_connections",
"(",
"self",
",",
"connection_map",
")",
":",
"agents",
"=",
"self",
".",
"get_agents",
"(",
"addr",
"=",
"False",
")",
"rets",
"=",
"[",
"]",
"for",
"a",
"in",
"agents",
":",
"if",
"a",
".",
"addr",
"in",
"connection_map",
":",
"r",
"=",
"a",
".",
"add_connections",
"(",
"connection_map",
"[",
"a",
".",
"addr",
"]",
")",
"rets",
".",
"append",
"(",
"r",
")",
"return",
"rets"
] |
Create agent connections from a given connection map.
:param dict connection_map:
A map of connections to be created. Dictionary where keys are
agent addresses and values are lists of (addr, attitude)-tuples
suitable for
:meth:`~creamas.core.agent.CreativeAgent.add_connections`.
Only connections for agents in this environment are made.
|
[
"Create",
"agent",
"connections",
"from",
"a",
"given",
"connection",
"map",
"."
] |
python
|
train
| 38.722222 |
CartoDB/cartoframes
|
cartoframes/context.py
|
https://github.com/CartoDB/cartoframes/blob/c94238a545f3dec45963dac3892540942b6f0df8/cartoframes/context.py#L368-L383
|
def delete(self, table_name):
"""Delete a table in user's CARTO account.
Args:
table_name (str): Name of table to delete
Returns:
bool: `True` if table is removed
"""
dataset = Dataset(self, table_name)
deleted = dataset.delete()
if deleted:
return deleted
raise CartoException('''The table `{}` doesn't exist'''.format(table_name))
|
[
"def",
"delete",
"(",
"self",
",",
"table_name",
")",
":",
"dataset",
"=",
"Dataset",
"(",
"self",
",",
"table_name",
")",
"deleted",
"=",
"dataset",
".",
"delete",
"(",
")",
"if",
"deleted",
":",
"return",
"deleted",
"raise",
"CartoException",
"(",
"'''The table `{}` doesn't exist'''",
".",
"format",
"(",
"table_name",
")",
")"
] |
Delete a table in user's CARTO account.
Args:
table_name (str): Name of table to delete
Returns:
bool: `True` if table is removed
|
[
"Delete",
"a",
"table",
"in",
"user",
"s",
"CARTO",
"account",
"."
] |
python
|
train
| 26.3125 |
MaT1g3R/option
|
option/result.py
|
https://github.com/MaT1g3R/option/blob/37c954e6e74273d48649b3236bc881a1286107d6/option/result.py#L278-L296
|
def unwrap_or_else(self, op: Callable[[E], U]) -> Union[T, U]:
"""
Returns the sucess value in the :class:`Result` or computes a default
from the error value.
Args:
op: The function to computes default with.
Returns:
The success value in the :class:`Result` if it is
a :meth:`Result.Ok` value, otherwise ``op(E)``.
Examples:
>>> Ok(1).unwrap_or_else(lambda e: e * 10)
1
>>> Err(1).unwrap_or_else(lambda e: e * 10)
10
"""
return cast(T, self._val) if self._is_ok else op(cast(E, self._val))
|
[
"def",
"unwrap_or_else",
"(",
"self",
",",
"op",
":",
"Callable",
"[",
"[",
"E",
"]",
",",
"U",
"]",
")",
"->",
"Union",
"[",
"T",
",",
"U",
"]",
":",
"return",
"cast",
"(",
"T",
",",
"self",
".",
"_val",
")",
"if",
"self",
".",
"_is_ok",
"else",
"op",
"(",
"cast",
"(",
"E",
",",
"self",
".",
"_val",
")",
")"
] |
Returns the sucess value in the :class:`Result` or computes a default
from the error value.
Args:
op: The function to computes default with.
Returns:
The success value in the :class:`Result` if it is
a :meth:`Result.Ok` value, otherwise ``op(E)``.
Examples:
>>> Ok(1).unwrap_or_else(lambda e: e * 10)
1
>>> Err(1).unwrap_or_else(lambda e: e * 10)
10
|
[
"Returns",
"the",
"sucess",
"value",
"in",
"the",
":",
"class",
":",
"Result",
"or",
"computes",
"a",
"default",
"from",
"the",
"error",
"value",
"."
] |
python
|
train
| 32.789474 |
bcbio/bcbio-nextgen
|
bcbio/variation/vcfutils.py
|
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L680-L690
|
def cyvcf_add_filter(rec, name):
"""Add a FILTER value to a cyvcf2 record
"""
if rec.FILTER:
filters = rec.FILTER.split(";")
else:
filters = []
if name not in filters:
filters.append(name)
rec.FILTER = filters
return rec
|
[
"def",
"cyvcf_add_filter",
"(",
"rec",
",",
"name",
")",
":",
"if",
"rec",
".",
"FILTER",
":",
"filters",
"=",
"rec",
".",
"FILTER",
".",
"split",
"(",
"\";\"",
")",
"else",
":",
"filters",
"=",
"[",
"]",
"if",
"name",
"not",
"in",
"filters",
":",
"filters",
".",
"append",
"(",
"name",
")",
"rec",
".",
"FILTER",
"=",
"filters",
"return",
"rec"
] |
Add a FILTER value to a cyvcf2 record
|
[
"Add",
"a",
"FILTER",
"value",
"to",
"a",
"cyvcf2",
"record"
] |
python
|
train
| 24.181818 |
schlamar/cov-core
|
cov_core.py
|
https://github.com/schlamar/cov-core/blob/791b1f6890456ee9e3beec33c89a7c573a382b7b/cov_core.py#L167-L172
|
def configure_node(self, node):
"""Slaves need to know if they are collocated and what files have moved."""
node.slaveinput['cov_master_host'] = socket.gethostname()
node.slaveinput['cov_master_topdir'] = self.topdir
node.slaveinput['cov_master_rsync_roots'] = [str(root) for root in node.nodemanager.roots]
|
[
"def",
"configure_node",
"(",
"self",
",",
"node",
")",
":",
"node",
".",
"slaveinput",
"[",
"'cov_master_host'",
"]",
"=",
"socket",
".",
"gethostname",
"(",
")",
"node",
".",
"slaveinput",
"[",
"'cov_master_topdir'",
"]",
"=",
"self",
".",
"topdir",
"node",
".",
"slaveinput",
"[",
"'cov_master_rsync_roots'",
"]",
"=",
"[",
"str",
"(",
"root",
")",
"for",
"root",
"in",
"node",
".",
"nodemanager",
".",
"roots",
"]"
] |
Slaves need to know if they are collocated and what files have moved.
|
[
"Slaves",
"need",
"to",
"know",
"if",
"they",
"are",
"collocated",
"and",
"what",
"files",
"have",
"moved",
"."
] |
python
|
train
| 55.833333 |
librosa/librosa
|
librosa/core/time_frequency.py
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/time_frequency.py#L1014-L1067
|
def times_like(X, sr=22050, hop_length=512, n_fft=None, axis=-1):
"""Return an array of time values to match the time axis from a feature matrix.
Parameters
----------
X : np.ndarray or scalar
- If ndarray, X is a feature matrix, e.g. STFT, chromagram, or mel spectrogram.
- If scalar, X represents the number of frames.
sr : number > 0 [scalar]
audio sampling rate
hop_length : int > 0 [scalar]
number of samples between successive frames
n_fft : None or int > 0 [scalar]
Optional: length of the FFT window.
If given, time conversion will include an offset of `n_fft / 2`
to counteract windowing effects when using a non-centered STFT.
axis : int [scalar]
The axis representing the time axis of X.
By default, the last axis (-1) is taken.
Returns
-------
times : np.ndarray [shape=(n,)]
ndarray of times (in seconds) corresponding to each frame of X.
See Also
--------
samples_like : Return an array of sample indices to match the time axis from a feature matrix.
Examples
--------
Provide a feature matrix input:
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> X = librosa.stft(y)
>>> times = librosa.times_like(X)
>>> times
array([ 0.00000000e+00, 2.32199546e-02, 4.64399093e-02, ...,
6.13935601e+01, 6.14167800e+01, 6.14400000e+01])
Provide a scalar input:
>>> n_frames = 2647
>>> times = librosa.times_like(n_frames)
>>> times
array([ 0.00000000e+00, 2.32199546e-02, 4.64399093e-02, ...,
6.13935601e+01, 6.14167800e+01, 6.14400000e+01])
"""
samples = samples_like(X, hop_length=hop_length, n_fft=n_fft, axis=axis)
return samples_to_time(samples, sr=sr)
|
[
"def",
"times_like",
"(",
"X",
",",
"sr",
"=",
"22050",
",",
"hop_length",
"=",
"512",
",",
"n_fft",
"=",
"None",
",",
"axis",
"=",
"-",
"1",
")",
":",
"samples",
"=",
"samples_like",
"(",
"X",
",",
"hop_length",
"=",
"hop_length",
",",
"n_fft",
"=",
"n_fft",
",",
"axis",
"=",
"axis",
")",
"return",
"samples_to_time",
"(",
"samples",
",",
"sr",
"=",
"sr",
")"
] |
Return an array of time values to match the time axis from a feature matrix.
Parameters
----------
X : np.ndarray or scalar
- If ndarray, X is a feature matrix, e.g. STFT, chromagram, or mel spectrogram.
- If scalar, X represents the number of frames.
sr : number > 0 [scalar]
audio sampling rate
hop_length : int > 0 [scalar]
number of samples between successive frames
n_fft : None or int > 0 [scalar]
Optional: length of the FFT window.
If given, time conversion will include an offset of `n_fft / 2`
to counteract windowing effects when using a non-centered STFT.
axis : int [scalar]
The axis representing the time axis of X.
By default, the last axis (-1) is taken.
Returns
-------
times : np.ndarray [shape=(n,)]
ndarray of times (in seconds) corresponding to each frame of X.
See Also
--------
samples_like : Return an array of sample indices to match the time axis from a feature matrix.
Examples
--------
Provide a feature matrix input:
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> X = librosa.stft(y)
>>> times = librosa.times_like(X)
>>> times
array([ 0.00000000e+00, 2.32199546e-02, 4.64399093e-02, ...,
6.13935601e+01, 6.14167800e+01, 6.14400000e+01])
Provide a scalar input:
>>> n_frames = 2647
>>> times = librosa.times_like(n_frames)
>>> times
array([ 0.00000000e+00, 2.32199546e-02, 4.64399093e-02, ...,
6.13935601e+01, 6.14167800e+01, 6.14400000e+01])
|
[
"Return",
"an",
"array",
"of",
"time",
"values",
"to",
"match",
"the",
"time",
"axis",
"from",
"a",
"feature",
"matrix",
"."
] |
python
|
test
| 32.87037 |
Julian/Ivoire
|
ivoire/run.py
|
https://github.com/Julian/Ivoire/blob/5b8218cffa409ed733cf850a6fde16fafb8fc2af/ivoire/run.py#L45-L59
|
def parse(argv=None):
"""
Parse some arguments using the parser.
"""
if argv is None:
argv = sys.argv[1:]
# Evade http://bugs.python.org/issue9253
if not argv or argv[0] not in {"run", "transform"}:
argv = ["run"] + argv
arguments = _clean(_parser.parse_args(argv))
return arguments
|
[
"def",
"parse",
"(",
"argv",
"=",
"None",
")",
":",
"if",
"argv",
"is",
"None",
":",
"argv",
"=",
"sys",
".",
"argv",
"[",
"1",
":",
"]",
"# Evade http://bugs.python.org/issue9253",
"if",
"not",
"argv",
"or",
"argv",
"[",
"0",
"]",
"not",
"in",
"{",
"\"run\"",
",",
"\"transform\"",
"}",
":",
"argv",
"=",
"[",
"\"run\"",
"]",
"+",
"argv",
"arguments",
"=",
"_clean",
"(",
"_parser",
".",
"parse_args",
"(",
"argv",
")",
")",
"return",
"arguments"
] |
Parse some arguments using the parser.
|
[
"Parse",
"some",
"arguments",
"using",
"the",
"parser",
"."
] |
python
|
test
| 21.333333 |
mgedmin/check-manifest
|
check_manifest.py
|
https://github.com/mgedmin/check-manifest/blob/7f787e8272f56c5750670bfb3223509e0df72708/check_manifest.py#L701-L771
|
def _get_ignore_from_manifest_lines(lines):
"""Gather the various ignore patterns from a MANIFEST.in.
'lines' should be a list of strings with comments removed
and continuation lines joined.
Returns a list of standard ignore patterns and a list of regular
expressions to ignore.
"""
ignore = []
ignore_regexps = []
for line in lines:
try:
cmd, rest = line.split(None, 1)
except ValueError:
# no whitespace, so not interesting
continue
for part in rest.split():
# distutils enforces these warnings on Windows only
if part.startswith('/'):
warning("ERROR: Leading slashes are not allowed in MANIFEST.in on Windows: %s" % part)
if part.endswith('/'):
warning("ERROR: Trailing slashes are not allowed in MANIFEST.in on Windows: %s" % part)
if cmd == 'exclude':
# An exclude of 'dirname/*css' can match 'dirname/foo.css'
# but not 'dirname/subdir/bar.css'. We need a regular
# expression for that, since fnmatch doesn't pay attention to
# directory separators.
for pat in rest.split():
if '*' in pat or '?' in pat or '[!' in pat:
ignore_regexps.append(_glob_to_regexp(pat))
else:
# No need for special handling.
ignore.append(pat)
elif cmd == 'global-exclude':
ignore.extend(rest.split())
elif cmd == 'recursive-exclude':
try:
dirname, patterns = rest.split(None, 1)
except ValueError:
# Wrong MANIFEST.in line.
warning("You have a wrong line in MANIFEST.in: %r\n"
"'recursive-exclude' expects <dir> <pattern1> "
"<pattern2> ..." % line)
continue
# Strip path separator for clarity.
dirname = dirname.rstrip(os.path.sep)
for pattern in patterns.split():
if pattern.startswith('*'):
ignore.append(dirname + os.path.sep + pattern)
else:
# 'recursive-exclude plone metadata.xml' should
# exclude plone/metadata.xml and
# plone/*/metadata.xml, where * can be any number
# of sub directories. We could use a regexp, but
# two ignores seems easier.
ignore.append(dirname + os.path.sep + pattern)
ignore.append(
dirname + os.path.sep + '*' + os.path.sep + pattern)
elif cmd == 'prune':
# rest is considered to be a directory name. It should
# not contain a path separator, as it actually has no
# effect in that case, but that could differ per python
# version. We strip it here to avoid double separators.
# XXX: mg: I'm not 100% sure the above is correct, AFAICS
# all pythons from 2.6 complain if the path has a leading or
# trailing slash -- on Windows, that is.
rest = rest.rstrip('/\\')
ignore.append(rest)
ignore.append(rest + os.path.sep + '*')
return ignore, ignore_regexps
|
[
"def",
"_get_ignore_from_manifest_lines",
"(",
"lines",
")",
":",
"ignore",
"=",
"[",
"]",
"ignore_regexps",
"=",
"[",
"]",
"for",
"line",
"in",
"lines",
":",
"try",
":",
"cmd",
",",
"rest",
"=",
"line",
".",
"split",
"(",
"None",
",",
"1",
")",
"except",
"ValueError",
":",
"# no whitespace, so not interesting",
"continue",
"for",
"part",
"in",
"rest",
".",
"split",
"(",
")",
":",
"# distutils enforces these warnings on Windows only",
"if",
"part",
".",
"startswith",
"(",
"'/'",
")",
":",
"warning",
"(",
"\"ERROR: Leading slashes are not allowed in MANIFEST.in on Windows: %s\"",
"%",
"part",
")",
"if",
"part",
".",
"endswith",
"(",
"'/'",
")",
":",
"warning",
"(",
"\"ERROR: Trailing slashes are not allowed in MANIFEST.in on Windows: %s\"",
"%",
"part",
")",
"if",
"cmd",
"==",
"'exclude'",
":",
"# An exclude of 'dirname/*css' can match 'dirname/foo.css'",
"# but not 'dirname/subdir/bar.css'. We need a regular",
"# expression for that, since fnmatch doesn't pay attention to",
"# directory separators.",
"for",
"pat",
"in",
"rest",
".",
"split",
"(",
")",
":",
"if",
"'*'",
"in",
"pat",
"or",
"'?'",
"in",
"pat",
"or",
"'[!'",
"in",
"pat",
":",
"ignore_regexps",
".",
"append",
"(",
"_glob_to_regexp",
"(",
"pat",
")",
")",
"else",
":",
"# No need for special handling.",
"ignore",
".",
"append",
"(",
"pat",
")",
"elif",
"cmd",
"==",
"'global-exclude'",
":",
"ignore",
".",
"extend",
"(",
"rest",
".",
"split",
"(",
")",
")",
"elif",
"cmd",
"==",
"'recursive-exclude'",
":",
"try",
":",
"dirname",
",",
"patterns",
"=",
"rest",
".",
"split",
"(",
"None",
",",
"1",
")",
"except",
"ValueError",
":",
"# Wrong MANIFEST.in line.",
"warning",
"(",
"\"You have a wrong line in MANIFEST.in: %r\\n\"",
"\"'recursive-exclude' expects <dir> <pattern1> \"",
"\"<pattern2> ...\"",
"%",
"line",
")",
"continue",
"# Strip path separator for clarity.",
"dirname",
"=",
"dirname",
".",
"rstrip",
"(",
"os",
".",
"path",
".",
"sep",
")",
"for",
"pattern",
"in",
"patterns",
".",
"split",
"(",
")",
":",
"if",
"pattern",
".",
"startswith",
"(",
"'*'",
")",
":",
"ignore",
".",
"append",
"(",
"dirname",
"+",
"os",
".",
"path",
".",
"sep",
"+",
"pattern",
")",
"else",
":",
"# 'recursive-exclude plone metadata.xml' should",
"# exclude plone/metadata.xml and",
"# plone/*/metadata.xml, where * can be any number",
"# of sub directories. We could use a regexp, but",
"# two ignores seems easier.",
"ignore",
".",
"append",
"(",
"dirname",
"+",
"os",
".",
"path",
".",
"sep",
"+",
"pattern",
")",
"ignore",
".",
"append",
"(",
"dirname",
"+",
"os",
".",
"path",
".",
"sep",
"+",
"'*'",
"+",
"os",
".",
"path",
".",
"sep",
"+",
"pattern",
")",
"elif",
"cmd",
"==",
"'prune'",
":",
"# rest is considered to be a directory name. It should",
"# not contain a path separator, as it actually has no",
"# effect in that case, but that could differ per python",
"# version. We strip it here to avoid double separators.",
"# XXX: mg: I'm not 100% sure the above is correct, AFAICS",
"# all pythons from 2.6 complain if the path has a leading or",
"# trailing slash -- on Windows, that is.",
"rest",
"=",
"rest",
".",
"rstrip",
"(",
"'/\\\\'",
")",
"ignore",
".",
"append",
"(",
"rest",
")",
"ignore",
".",
"append",
"(",
"rest",
"+",
"os",
".",
"path",
".",
"sep",
"+",
"'*'",
")",
"return",
"ignore",
",",
"ignore_regexps"
] |
Gather the various ignore patterns from a MANIFEST.in.
'lines' should be a list of strings with comments removed
and continuation lines joined.
Returns a list of standard ignore patterns and a list of regular
expressions to ignore.
|
[
"Gather",
"the",
"various",
"ignore",
"patterns",
"from",
"a",
"MANIFEST",
".",
"in",
"."
] |
python
|
train
| 46.169014 |
brocade/pynos
|
pynos/versions/ver_6/ver_6_0_1/yang/ietf_netconf_monitoring.py
|
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/ietf_netconf_monitoring.py#L509-L520
|
def get_schema_input_version(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_schema = ET.Element("get_schema")
config = get_schema
input = ET.SubElement(get_schema, "input")
version = ET.SubElement(input, "version")
version.text = kwargs.pop('version')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
[
"def",
"get_schema_input_version",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"get_schema",
"=",
"ET",
".",
"Element",
"(",
"\"get_schema\"",
")",
"config",
"=",
"get_schema",
"input",
"=",
"ET",
".",
"SubElement",
"(",
"get_schema",
",",
"\"input\"",
")",
"version",
"=",
"ET",
".",
"SubElement",
"(",
"input",
",",
"\"version\"",
")",
"version",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'version'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] |
Auto Generated Code
|
[
"Auto",
"Generated",
"Code"
] |
python
|
train
| 35.5 |
luckydonald/pytgbot
|
code_generation/output/pytgbot/api_types/receivable/peer.py
|
https://github.com/luckydonald/pytgbot/blob/67f4b5a1510d4583d40b5477e876b1ef0eb8971b/code_generation/output/pytgbot/api_types/receivable/peer.py#L373-L403
|
def from_array(array):
"""
Deserialize a new Chat from a given dictionary.
:return: new Chat instance.
:rtype: Chat
"""
if array is None or not array:
return None
# end if
assert_type_or_raise(array, dict, parameter_name="array")
from pytgbot.api_types.receivable.media import ChatPhoto
from pytgbot.api_types.receivable.updates import Message
data = {}
data['id'] = int(array.get('id'))
data['type'] = u(array.get('type'))
data['title'] = u(array.get('title')) if array.get('title') is not None else None
data['username'] = u(array.get('username')) if array.get('username') is not None else None
data['first_name'] = u(array.get('first_name')) if array.get('first_name') is not None else None
data['last_name'] = u(array.get('last_name')) if array.get('last_name') is not None else None
data['all_members_are_administrators'] = bool(array.get('all_members_are_administrators')) if array.get('all_members_are_administrators') is not None else None
data['photo'] = ChatPhoto.from_array(array.get('photo')) if array.get('photo') is not None else None
data['description'] = u(array.get('description')) if array.get('description') is not None else None
data['invite_link'] = u(array.get('invite_link')) if array.get('invite_link') is not None else None
data['pinned_message'] = Message.from_array(array.get('pinned_message')) if array.get('pinned_message') is not None else None
data['sticker_set_name'] = u(array.get('sticker_set_name')) if array.get('sticker_set_name') is not None else None
data['can_set_sticker_set'] = bool(array.get('can_set_sticker_set')) if array.get('can_set_sticker_set') is not None else None
data['_raw'] = array
return Chat(**data)
|
[
"def",
"from_array",
"(",
"array",
")",
":",
"if",
"array",
"is",
"None",
"or",
"not",
"array",
":",
"return",
"None",
"# end if",
"assert_type_or_raise",
"(",
"array",
",",
"dict",
",",
"parameter_name",
"=",
"\"array\"",
")",
"from",
"pytgbot",
".",
"api_types",
".",
"receivable",
".",
"media",
"import",
"ChatPhoto",
"from",
"pytgbot",
".",
"api_types",
".",
"receivable",
".",
"updates",
"import",
"Message",
"data",
"=",
"{",
"}",
"data",
"[",
"'id'",
"]",
"=",
"int",
"(",
"array",
".",
"get",
"(",
"'id'",
")",
")",
"data",
"[",
"'type'",
"]",
"=",
"u",
"(",
"array",
".",
"get",
"(",
"'type'",
")",
")",
"data",
"[",
"'title'",
"]",
"=",
"u",
"(",
"array",
".",
"get",
"(",
"'title'",
")",
")",
"if",
"array",
".",
"get",
"(",
"'title'",
")",
"is",
"not",
"None",
"else",
"None",
"data",
"[",
"'username'",
"]",
"=",
"u",
"(",
"array",
".",
"get",
"(",
"'username'",
")",
")",
"if",
"array",
".",
"get",
"(",
"'username'",
")",
"is",
"not",
"None",
"else",
"None",
"data",
"[",
"'first_name'",
"]",
"=",
"u",
"(",
"array",
".",
"get",
"(",
"'first_name'",
")",
")",
"if",
"array",
".",
"get",
"(",
"'first_name'",
")",
"is",
"not",
"None",
"else",
"None",
"data",
"[",
"'last_name'",
"]",
"=",
"u",
"(",
"array",
".",
"get",
"(",
"'last_name'",
")",
")",
"if",
"array",
".",
"get",
"(",
"'last_name'",
")",
"is",
"not",
"None",
"else",
"None",
"data",
"[",
"'all_members_are_administrators'",
"]",
"=",
"bool",
"(",
"array",
".",
"get",
"(",
"'all_members_are_administrators'",
")",
")",
"if",
"array",
".",
"get",
"(",
"'all_members_are_administrators'",
")",
"is",
"not",
"None",
"else",
"None",
"data",
"[",
"'photo'",
"]",
"=",
"ChatPhoto",
".",
"from_array",
"(",
"array",
".",
"get",
"(",
"'photo'",
")",
")",
"if",
"array",
".",
"get",
"(",
"'photo'",
")",
"is",
"not",
"None",
"else",
"None",
"data",
"[",
"'description'",
"]",
"=",
"u",
"(",
"array",
".",
"get",
"(",
"'description'",
")",
")",
"if",
"array",
".",
"get",
"(",
"'description'",
")",
"is",
"not",
"None",
"else",
"None",
"data",
"[",
"'invite_link'",
"]",
"=",
"u",
"(",
"array",
".",
"get",
"(",
"'invite_link'",
")",
")",
"if",
"array",
".",
"get",
"(",
"'invite_link'",
")",
"is",
"not",
"None",
"else",
"None",
"data",
"[",
"'pinned_message'",
"]",
"=",
"Message",
".",
"from_array",
"(",
"array",
".",
"get",
"(",
"'pinned_message'",
")",
")",
"if",
"array",
".",
"get",
"(",
"'pinned_message'",
")",
"is",
"not",
"None",
"else",
"None",
"data",
"[",
"'sticker_set_name'",
"]",
"=",
"u",
"(",
"array",
".",
"get",
"(",
"'sticker_set_name'",
")",
")",
"if",
"array",
".",
"get",
"(",
"'sticker_set_name'",
")",
"is",
"not",
"None",
"else",
"None",
"data",
"[",
"'can_set_sticker_set'",
"]",
"=",
"bool",
"(",
"array",
".",
"get",
"(",
"'can_set_sticker_set'",
")",
")",
"if",
"array",
".",
"get",
"(",
"'can_set_sticker_set'",
")",
"is",
"not",
"None",
"else",
"None",
"data",
"[",
"'_raw'",
"]",
"=",
"array",
"return",
"Chat",
"(",
"*",
"*",
"data",
")"
] |
Deserialize a new Chat from a given dictionary.
:return: new Chat instance.
:rtype: Chat
|
[
"Deserialize",
"a",
"new",
"Chat",
"from",
"a",
"given",
"dictionary",
"."
] |
python
|
train
| 59.935484 |
asphalt-framework/asphalt
|
asphalt/core/context.py
|
https://github.com/asphalt-framework/asphalt/blob/4114b3ac9743cbd9facb374a3f53e19d3afef22d/asphalt/core/context.py#L487-L502
|
def call_async(self, func: Callable, *args, **kwargs):
"""
Call the given callable in the event loop thread.
This method lets you call asynchronous code from a worker thread.
Do not use it from within the event loop thread.
If the callable returns an awaitable, it is resolved before returning to the caller.
:param func: a regular function or a coroutine function
:param args: positional arguments to call the callable with
:param kwargs: keyword arguments to call the callable with
:return: the return value of the call
"""
return asyncio_extras.call_async(self.loop, func, *args, **kwargs)
|
[
"def",
"call_async",
"(",
"self",
",",
"func",
":",
"Callable",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"asyncio_extras",
".",
"call_async",
"(",
"self",
".",
"loop",
",",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Call the given callable in the event loop thread.
This method lets you call asynchronous code from a worker thread.
Do not use it from within the event loop thread.
If the callable returns an awaitable, it is resolved before returning to the caller.
:param func: a regular function or a coroutine function
:param args: positional arguments to call the callable with
:param kwargs: keyword arguments to call the callable with
:return: the return value of the call
|
[
"Call",
"the",
"given",
"callable",
"in",
"the",
"event",
"loop",
"thread",
"."
] |
python
|
train
| 41.8125 |
adamcharnock/swiftwind
|
swiftwind/costs/models.py
|
https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/costs/models.py#L236-L248
|
def _is_ready(self, as_of):
"""Is the RecurringCost ready to be enacted as of the date `as_of`
This determines if `as_of` precedes the start of `initial_billing_cycle`. If so,
we should not be enacting this RecurringCost yet.
Args:
as_of (Date):
"""
if self.is_one_off():
return self.initial_billing_cycle.date_range.lower <= as_of
else:
return True
|
[
"def",
"_is_ready",
"(",
"self",
",",
"as_of",
")",
":",
"if",
"self",
".",
"is_one_off",
"(",
")",
":",
"return",
"self",
".",
"initial_billing_cycle",
".",
"date_range",
".",
"lower",
"<=",
"as_of",
"else",
":",
"return",
"True"
] |
Is the RecurringCost ready to be enacted as of the date `as_of`
This determines if `as_of` precedes the start of `initial_billing_cycle`. If so,
we should not be enacting this RecurringCost yet.
Args:
as_of (Date):
|
[
"Is",
"the",
"RecurringCost",
"ready",
"to",
"be",
"enacted",
"as",
"of",
"the",
"date",
"as_of"
] |
python
|
train
| 33.153846 |
mstuttgart/pycep-correios
|
pycep_correios/cliente.py
|
https://github.com/mstuttgart/pycep-correios/blob/99f98b08f9a78a11373804bffe618f26f893518d/pycep_correios/cliente.py#L34-L74
|
def consultar_cep(cep, ambiente=PRODUCAO):
"""Retorna o endereço correspondente ao número de CEP informado.
Arguments:
cep {str} -- CEP a ser consultado.
Keyword Arguments:
ambiente {int} -- Indica qual será o webservice utilizado na consulta de CEP. Valor default é PRODUCAO (default: {PRODUCAO})
Raises:
KeyError -- Quando ambiente selecionado não existe (esperado: PRODUCAO ou HOMOLOGACAO)
ExcecaoPyCEPCorreios -- Quando ocorre qualquer erro na consulta do CEP.
Returns:
dict -- Dados do endereço do CEP consultado.
"""
if ambiente not in URL:
raise KeyError('Ambiente inválido! Valor deve ser 1 para produção e 2 '
'para homologação')
try:
with warnings.catch_warnings():
# Desabilitamos o warning
warnings.simplefilter('ignore', InsecureRequestWarning)
warnings.simplefilter('ignore', ImportWarning)
client = zeep.Client(URL[ambiente])
endereco = client.service.consultaCEP(formatar_cep(cep))
return {
'bairro': endereco.bairro,
'cep': endereco.cep,
'cidade': endereco.cidade,
'end': endereco.end,
'uf': endereco.uf,
'complemento2': endereco.complemento2,
'unidadesPostagem': endereco.unidadesPostagem,
}
except zeep.exceptions.Fault as e:
raise excecoes.ExcecaoPyCEPCorreios(message=e.message)
|
[
"def",
"consultar_cep",
"(",
"cep",
",",
"ambiente",
"=",
"PRODUCAO",
")",
":",
"if",
"ambiente",
"not",
"in",
"URL",
":",
"raise",
"KeyError",
"(",
"'Ambiente inválido! Valor deve ser 1 para produção e 2 '",
"'para homologação')",
"",
"try",
":",
"with",
"warnings",
".",
"catch_warnings",
"(",
")",
":",
"# Desabilitamos o warning",
"warnings",
".",
"simplefilter",
"(",
"'ignore'",
",",
"InsecureRequestWarning",
")",
"warnings",
".",
"simplefilter",
"(",
"'ignore'",
",",
"ImportWarning",
")",
"client",
"=",
"zeep",
".",
"Client",
"(",
"URL",
"[",
"ambiente",
"]",
")",
"endereco",
"=",
"client",
".",
"service",
".",
"consultaCEP",
"(",
"formatar_cep",
"(",
"cep",
")",
")",
"return",
"{",
"'bairro'",
":",
"endereco",
".",
"bairro",
",",
"'cep'",
":",
"endereco",
".",
"cep",
",",
"'cidade'",
":",
"endereco",
".",
"cidade",
",",
"'end'",
":",
"endereco",
".",
"end",
",",
"'uf'",
":",
"endereco",
".",
"uf",
",",
"'complemento2'",
":",
"endereco",
".",
"complemento2",
",",
"'unidadesPostagem'",
":",
"endereco",
".",
"unidadesPostagem",
",",
"}",
"except",
"zeep",
".",
"exceptions",
".",
"Fault",
"as",
"e",
":",
"raise",
"excecoes",
".",
"ExcecaoPyCEPCorreios",
"(",
"message",
"=",
"e",
".",
"message",
")"
] |
Retorna o endereço correspondente ao número de CEP informado.
Arguments:
cep {str} -- CEP a ser consultado.
Keyword Arguments:
ambiente {int} -- Indica qual será o webservice utilizado na consulta de CEP. Valor default é PRODUCAO (default: {PRODUCAO})
Raises:
KeyError -- Quando ambiente selecionado não existe (esperado: PRODUCAO ou HOMOLOGACAO)
ExcecaoPyCEPCorreios -- Quando ocorre qualquer erro na consulta do CEP.
Returns:
dict -- Dados do endereço do CEP consultado.
|
[
"Retorna",
"o",
"endereço",
"correspondente",
"ao",
"número",
"de",
"CEP",
"informado",
"."
] |
python
|
train
| 36.243902 |
secdev/scapy
|
scapy/arch/common.py
|
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/arch/common.py#L128-L150
|
def get_bpf_pointer(tcpdump_lines):
"""Create a BPF Pointer for TCPDump filter"""
if conf.use_pypy:
return _legacy_bpf_pointer(tcpdump_lines)
# Allocate BPF instructions
size = int(tcpdump_lines[0])
bpf_insn_a = bpf_insn * size
bip = bpf_insn_a()
# Fill the BPF instruction structures with the byte code
tcpdump_lines = tcpdump_lines[1:]
i = 0
for line in tcpdump_lines:
values = [int(v) for v in line.split()]
bip[i].code = c_ushort(values[0])
bip[i].jt = c_ubyte(values[1])
bip[i].jf = c_ubyte(values[2])
bip[i].k = c_uint(values[3])
i += 1
# Create the BPF program
return bpf_program(size, bip)
|
[
"def",
"get_bpf_pointer",
"(",
"tcpdump_lines",
")",
":",
"if",
"conf",
".",
"use_pypy",
":",
"return",
"_legacy_bpf_pointer",
"(",
"tcpdump_lines",
")",
"# Allocate BPF instructions",
"size",
"=",
"int",
"(",
"tcpdump_lines",
"[",
"0",
"]",
")",
"bpf_insn_a",
"=",
"bpf_insn",
"*",
"size",
"bip",
"=",
"bpf_insn_a",
"(",
")",
"# Fill the BPF instruction structures with the byte code",
"tcpdump_lines",
"=",
"tcpdump_lines",
"[",
"1",
":",
"]",
"i",
"=",
"0",
"for",
"line",
"in",
"tcpdump_lines",
":",
"values",
"=",
"[",
"int",
"(",
"v",
")",
"for",
"v",
"in",
"line",
".",
"split",
"(",
")",
"]",
"bip",
"[",
"i",
"]",
".",
"code",
"=",
"c_ushort",
"(",
"values",
"[",
"0",
"]",
")",
"bip",
"[",
"i",
"]",
".",
"jt",
"=",
"c_ubyte",
"(",
"values",
"[",
"1",
"]",
")",
"bip",
"[",
"i",
"]",
".",
"jf",
"=",
"c_ubyte",
"(",
"values",
"[",
"2",
"]",
")",
"bip",
"[",
"i",
"]",
".",
"k",
"=",
"c_uint",
"(",
"values",
"[",
"3",
"]",
")",
"i",
"+=",
"1",
"# Create the BPF program",
"return",
"bpf_program",
"(",
"size",
",",
"bip",
")"
] |
Create a BPF Pointer for TCPDump filter
|
[
"Create",
"a",
"BPF",
"Pointer",
"for",
"TCPDump",
"filter"
] |
python
|
train
| 29.652174 |
apriha/lineage
|
src/lineage/snps.py
|
https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/snps.py#L272-L306
|
def _read_ftdna_famfinder(file):
""" Read and parse Family Tree DNA (FTDNA) "famfinder" file.
https://www.familytreedna.com
Parameters
----------
file : str
path to file
Returns
-------
pandas.DataFrame
individual's genetic data normalized for use with `lineage`
str
name of data source
"""
df = pd.read_csv(
file,
comment="#",
na_values="-",
names=["rsid", "chrom", "pos", "allele1", "allele2"],
index_col=0,
dtype={"chrom": object},
)
# create genotype column from allele columns
df["genotype"] = df["allele1"] + df["allele2"]
# delete allele columns
# http://stackoverflow.com/a/13485766
del df["allele1"]
del df["allele2"]
return sort_snps(df), "FTDNA"
|
[
"def",
"_read_ftdna_famfinder",
"(",
"file",
")",
":",
"df",
"=",
"pd",
".",
"read_csv",
"(",
"file",
",",
"comment",
"=",
"\"#\"",
",",
"na_values",
"=",
"\"-\"",
",",
"names",
"=",
"[",
"\"rsid\"",
",",
"\"chrom\"",
",",
"\"pos\"",
",",
"\"allele1\"",
",",
"\"allele2\"",
"]",
",",
"index_col",
"=",
"0",
",",
"dtype",
"=",
"{",
"\"chrom\"",
":",
"object",
"}",
",",
")",
"# create genotype column from allele columns",
"df",
"[",
"\"genotype\"",
"]",
"=",
"df",
"[",
"\"allele1\"",
"]",
"+",
"df",
"[",
"\"allele2\"",
"]",
"# delete allele columns",
"# http://stackoverflow.com/a/13485766",
"del",
"df",
"[",
"\"allele1\"",
"]",
"del",
"df",
"[",
"\"allele2\"",
"]",
"return",
"sort_snps",
"(",
"df",
")",
",",
"\"FTDNA\""
] |
Read and parse Family Tree DNA (FTDNA) "famfinder" file.
https://www.familytreedna.com
Parameters
----------
file : str
path to file
Returns
-------
pandas.DataFrame
individual's genetic data normalized for use with `lineage`
str
name of data source
|
[
"Read",
"and",
"parse",
"Family",
"Tree",
"DNA",
"(",
"FTDNA",
")",
"famfinder",
"file",
"."
] |
python
|
train
| 25.371429 |
python-visualization/folium
|
folium/features.py
|
https://github.com/python-visualization/folium/blob/8595240517135d1637ca4cf7cc624045f1d911b3/folium/features.py#L469-L494
|
def process_data(self, data):
"""Convert an unknown data input into a geojson dictionary."""
if isinstance(data, dict):
self.embed = True
return data
elif isinstance(data, str):
if data.lower().startswith(('http:', 'ftp:', 'https:')):
if not self.embed:
self.embed_link = data
return requests.get(data).json()
elif data.lstrip()[0] in '[{': # This is a GeoJSON inline string
self.embed = True
return json.loads(data)
else: # This is a filename
if not self.embed:
self.embed_link = data
with open(data) as f:
return json.loads(f.read())
elif hasattr(data, '__geo_interface__'):
self.embed = True
if hasattr(data, 'to_crs'):
data = data.to_crs(epsg='4326')
return json.loads(json.dumps(data.__geo_interface__))
else:
raise ValueError('Cannot render objects with any missing geometries'
': {!r}'.format(data))
|
[
"def",
"process_data",
"(",
"self",
",",
"data",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"dict",
")",
":",
"self",
".",
"embed",
"=",
"True",
"return",
"data",
"elif",
"isinstance",
"(",
"data",
",",
"str",
")",
":",
"if",
"data",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"(",
"'http:'",
",",
"'ftp:'",
",",
"'https:'",
")",
")",
":",
"if",
"not",
"self",
".",
"embed",
":",
"self",
".",
"embed_link",
"=",
"data",
"return",
"requests",
".",
"get",
"(",
"data",
")",
".",
"json",
"(",
")",
"elif",
"data",
".",
"lstrip",
"(",
")",
"[",
"0",
"]",
"in",
"'[{'",
":",
"# This is a GeoJSON inline string",
"self",
".",
"embed",
"=",
"True",
"return",
"json",
".",
"loads",
"(",
"data",
")",
"else",
":",
"# This is a filename",
"if",
"not",
"self",
".",
"embed",
":",
"self",
".",
"embed_link",
"=",
"data",
"with",
"open",
"(",
"data",
")",
"as",
"f",
":",
"return",
"json",
".",
"loads",
"(",
"f",
".",
"read",
"(",
")",
")",
"elif",
"hasattr",
"(",
"data",
",",
"'__geo_interface__'",
")",
":",
"self",
".",
"embed",
"=",
"True",
"if",
"hasattr",
"(",
"data",
",",
"'to_crs'",
")",
":",
"data",
"=",
"data",
".",
"to_crs",
"(",
"epsg",
"=",
"'4326'",
")",
"return",
"json",
".",
"loads",
"(",
"json",
".",
"dumps",
"(",
"data",
".",
"__geo_interface__",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Cannot render objects with any missing geometries'",
"': {!r}'",
".",
"format",
"(",
"data",
")",
")"
] |
Convert an unknown data input into a geojson dictionary.
|
[
"Convert",
"an",
"unknown",
"data",
"input",
"into",
"a",
"geojson",
"dictionary",
"."
] |
python
|
train
| 43.538462 |
lightning-viz/lightning-python
|
lightning/types/base.py
|
https://github.com/lightning-viz/lightning-python/blob/68563e1da82d162d204069d7586f7c695b8bd4a6/lightning/types/base.py#L141-L179
|
def _baseplot(cls, session, type, *args, **kwargs):
"""
Base method for plotting data and images.
Applies a plot-type specific cleaning operation to generate
a dictionary with the data, then creates a visualization with the data.
Expects a session and a type, followed by all plot-type specific
positional and keyword arguments, which will be handled by the clean
method of the given plot type.
If the dictionary contains only images, or only non-image data,
they will be passed on their own. If the dictionary contains
both images and non-image data, the images will be appended
to the visualization.
"""
if not type:
raise Exception("Must provide a plot type")
options, description = cls._clean_options(**kwargs)
data = cls._clean_data(*args)
if 'images' in data and len(data) > 1:
images = data['images']
del data['images']
viz = cls._create(session, data=data, type=type, options=options, description=description)
first_image, remaining_images = images[0], images[1:]
viz._append_image(first_image)
for image in remaining_images:
viz._append_image(image)
elif 'images' in data:
images = data['images']
viz = cls._create(session, images=images, type=type, options=options, description=description)
else:
viz = cls._create(session, data=data, type=type, options=options, description=description)
return viz
|
[
"def",
"_baseplot",
"(",
"cls",
",",
"session",
",",
"type",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"type",
":",
"raise",
"Exception",
"(",
"\"Must provide a plot type\"",
")",
"options",
",",
"description",
"=",
"cls",
".",
"_clean_options",
"(",
"*",
"*",
"kwargs",
")",
"data",
"=",
"cls",
".",
"_clean_data",
"(",
"*",
"args",
")",
"if",
"'images'",
"in",
"data",
"and",
"len",
"(",
"data",
")",
">",
"1",
":",
"images",
"=",
"data",
"[",
"'images'",
"]",
"del",
"data",
"[",
"'images'",
"]",
"viz",
"=",
"cls",
".",
"_create",
"(",
"session",
",",
"data",
"=",
"data",
",",
"type",
"=",
"type",
",",
"options",
"=",
"options",
",",
"description",
"=",
"description",
")",
"first_image",
",",
"remaining_images",
"=",
"images",
"[",
"0",
"]",
",",
"images",
"[",
"1",
":",
"]",
"viz",
".",
"_append_image",
"(",
"first_image",
")",
"for",
"image",
"in",
"remaining_images",
":",
"viz",
".",
"_append_image",
"(",
"image",
")",
"elif",
"'images'",
"in",
"data",
":",
"images",
"=",
"data",
"[",
"'images'",
"]",
"viz",
"=",
"cls",
".",
"_create",
"(",
"session",
",",
"images",
"=",
"images",
",",
"type",
"=",
"type",
",",
"options",
"=",
"options",
",",
"description",
"=",
"description",
")",
"else",
":",
"viz",
"=",
"cls",
".",
"_create",
"(",
"session",
",",
"data",
"=",
"data",
",",
"type",
"=",
"type",
",",
"options",
"=",
"options",
",",
"description",
"=",
"description",
")",
"return",
"viz"
] |
Base method for plotting data and images.
Applies a plot-type specific cleaning operation to generate
a dictionary with the data, then creates a visualization with the data.
Expects a session and a type, followed by all plot-type specific
positional and keyword arguments, which will be handled by the clean
method of the given plot type.
If the dictionary contains only images, or only non-image data,
they will be passed on their own. If the dictionary contains
both images and non-image data, the images will be appended
to the visualization.
|
[
"Base",
"method",
"for",
"plotting",
"data",
"and",
"images",
"."
] |
python
|
train
| 40.205128 |
opentok/Opentok-Python-SDK
|
opentok/opentok.py
|
https://github.com/opentok/Opentok-Python-SDK/blob/ffc6714e76be0d29e6b56aff8cbf7509b71a8b2c/opentok/opentok.py#L415-L434
|
def delete_archive(self, archive_id):
"""
Deletes an OpenTok archive.
You can only delete an archive which has a status of "available" or "uploaded". Deleting an
archive removes its record from the list of archives. For an "available" archive, it also
removes the archive file, making it unavailable for download.
:param String archive_id: The archive ID of the archive to be deleted.
"""
response = requests.delete(self.endpoints.archive_url(archive_id), headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout)
if response.status_code < 300:
pass
elif response.status_code == 403:
raise AuthError()
elif response.status_code == 404:
raise NotFoundError("Archive not found")
else:
raise RequestError("An unexpected error occurred", response.status_code)
|
[
"def",
"delete_archive",
"(",
"self",
",",
"archive_id",
")",
":",
"response",
"=",
"requests",
".",
"delete",
"(",
"self",
".",
"endpoints",
".",
"archive_url",
"(",
"archive_id",
")",
",",
"headers",
"=",
"self",
".",
"json_headers",
"(",
")",
",",
"proxies",
"=",
"self",
".",
"proxies",
",",
"timeout",
"=",
"self",
".",
"timeout",
")",
"if",
"response",
".",
"status_code",
"<",
"300",
":",
"pass",
"elif",
"response",
".",
"status_code",
"==",
"403",
":",
"raise",
"AuthError",
"(",
")",
"elif",
"response",
".",
"status_code",
"==",
"404",
":",
"raise",
"NotFoundError",
"(",
"\"Archive not found\"",
")",
"else",
":",
"raise",
"RequestError",
"(",
"\"An unexpected error occurred\"",
",",
"response",
".",
"status_code",
")"
] |
Deletes an OpenTok archive.
You can only delete an archive which has a status of "available" or "uploaded". Deleting an
archive removes its record from the list of archives. For an "available" archive, it also
removes the archive file, making it unavailable for download.
:param String archive_id: The archive ID of the archive to be deleted.
|
[
"Deletes",
"an",
"OpenTok",
"archive",
"."
] |
python
|
train
| 44.9 |
codelv/enaml-native
|
examples/playground/main.py
|
https://github.com/codelv/enaml-native/blob/c33986e9eda468c508806e0a3e73c771401e5718/examples/playground/main.py#L25-L41
|
def main():
""" Called by PyBridge.start()
"""
#: If we set the TMP env variable the dev reloader will save file
#: and load changes in this directory instead of overwriting the
#: ones installed with the app.
os.environ['TMP'] = os.path.join(sys.path[0], '../tmp')
from enamlnative.android.app import AndroidApplication
app = AndroidApplication(
debug=True, #: Makes a lot of lag!
dev='server',
load_view=load_view,
)
app.start()
|
[
"def",
"main",
"(",
")",
":",
"#: If we set the TMP env variable the dev reloader will save file",
"#: and load changes in this directory instead of overwriting the",
"#: ones installed with the app.",
"os",
".",
"environ",
"[",
"'TMP'",
"]",
"=",
"os",
".",
"path",
".",
"join",
"(",
"sys",
".",
"path",
"[",
"0",
"]",
",",
"'../tmp'",
")",
"from",
"enamlnative",
".",
"android",
".",
"app",
"import",
"AndroidApplication",
"app",
"=",
"AndroidApplication",
"(",
"debug",
"=",
"True",
",",
"#: Makes a lot of lag!",
"dev",
"=",
"'server'",
",",
"load_view",
"=",
"load_view",
",",
")",
"app",
".",
"start",
"(",
")"
] |
Called by PyBridge.start()
|
[
"Called",
"by",
"PyBridge",
".",
"start",
"()"
] |
python
|
train
| 28.352941 |
mdgoldberg/sportsref
|
sportsref/nfl/teams.py
|
https://github.com/mdgoldberg/sportsref/blob/09f11ac856a23c96d666d1d510bb35d6f050b5c3/sportsref/nfl/teams.py#L265-L280
|
def sos(self, year):
"""Returns the SOS (Strength of Schedule) for a team in a year, based
on SRS.
:year: The year for the season in question.
:returns: A float of SOS.
"""
try:
sos_text = self._year_info_pq(year, 'SOS').text()
except ValueError:
return None
m = re.search(r'SOS\s*:\s*(\S+)', sos_text)
if m:
return float(m.group(1))
else:
return None
|
[
"def",
"sos",
"(",
"self",
",",
"year",
")",
":",
"try",
":",
"sos_text",
"=",
"self",
".",
"_year_info_pq",
"(",
"year",
",",
"'SOS'",
")",
".",
"text",
"(",
")",
"except",
"ValueError",
":",
"return",
"None",
"m",
"=",
"re",
".",
"search",
"(",
"r'SOS\\s*:\\s*(\\S+)'",
",",
"sos_text",
")",
"if",
"m",
":",
"return",
"float",
"(",
"m",
".",
"group",
"(",
"1",
")",
")",
"else",
":",
"return",
"None"
] |
Returns the SOS (Strength of Schedule) for a team in a year, based
on SRS.
:year: The year for the season in question.
:returns: A float of SOS.
|
[
"Returns",
"the",
"SOS",
"(",
"Strength",
"of",
"Schedule",
")",
"for",
"a",
"team",
"in",
"a",
"year",
"based",
"on",
"SRS",
"."
] |
python
|
test
| 29.0625 |
brocade/pynos
|
pynos/versions/ver_6/ver_6_0_1/yang/brocade_trilloam.py
|
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_trilloam.py#L238-L252
|
def l2traceroute_result_output_l2_hop_results_l2_hop_egress_interface_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
l2traceroute_result = ET.Element("l2traceroute_result")
config = l2traceroute_result
output = ET.SubElement(l2traceroute_result, "output")
l2_hop_results = ET.SubElement(output, "l2-hop-results")
l2_hop = ET.SubElement(l2_hop_results, "l2-hop")
egress = ET.SubElement(l2_hop, "egress")
interface_type = ET.SubElement(egress, "interface-type")
interface_type.text = kwargs.pop('interface_type')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
[
"def",
"l2traceroute_result_output_l2_hop_results_l2_hop_egress_interface_type",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"l2traceroute_result",
"=",
"ET",
".",
"Element",
"(",
"\"l2traceroute_result\"",
")",
"config",
"=",
"l2traceroute_result",
"output",
"=",
"ET",
".",
"SubElement",
"(",
"l2traceroute_result",
",",
"\"output\"",
")",
"l2_hop_results",
"=",
"ET",
".",
"SubElement",
"(",
"output",
",",
"\"l2-hop-results\"",
")",
"l2_hop",
"=",
"ET",
".",
"SubElement",
"(",
"l2_hop_results",
",",
"\"l2-hop\"",
")",
"egress",
"=",
"ET",
".",
"SubElement",
"(",
"l2_hop",
",",
"\"egress\"",
")",
"interface_type",
"=",
"ET",
".",
"SubElement",
"(",
"egress",
",",
"\"interface-type\"",
")",
"interface_type",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'interface_type'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] |
Auto Generated Code
|
[
"Auto",
"Generated",
"Code"
] |
python
|
train
| 47.133333 |
LuqueDaniel/pybooru
|
pybooru/moebooru.py
|
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/moebooru.py#L90-L102
|
def _build_url(self, api_call):
"""Build request url.
Parameters:
api_call (str): Base API Call.
Returns:
Complete url (str).
"""
if self.api_version in ('1.13.0', '1.13.0+update.1', '1.13.0+update.2'):
if '/' not in api_call:
return "{0}/{1}/index.json".format(self.site_url, api_call)
return "{0}/{1}.json".format(self.site_url, api_call)
|
[
"def",
"_build_url",
"(",
"self",
",",
"api_call",
")",
":",
"if",
"self",
".",
"api_version",
"in",
"(",
"'1.13.0'",
",",
"'1.13.0+update.1'",
",",
"'1.13.0+update.2'",
")",
":",
"if",
"'/'",
"not",
"in",
"api_call",
":",
"return",
"\"{0}/{1}/index.json\"",
".",
"format",
"(",
"self",
".",
"site_url",
",",
"api_call",
")",
"return",
"\"{0}/{1}.json\"",
".",
"format",
"(",
"self",
".",
"site_url",
",",
"api_call",
")"
] |
Build request url.
Parameters:
api_call (str): Base API Call.
Returns:
Complete url (str).
|
[
"Build",
"request",
"url",
"."
] |
python
|
train
| 33.076923 |
e7dal/bubble3
|
behave4cmd0/textutil.py
|
https://github.com/e7dal/bubble3/blob/59c735281a95b44f6263a25f4d6ce24fca520082/behave4cmd0/textutil.py#L164-L172
|
def text_remove_empty_lines(text):
"""
Whitespace normalization:
- Strip empty lines
- Strip trailing whitespace
"""
lines = [ line.rstrip() for line in text.splitlines() if line.strip() ]
return "\n".join(lines)
|
[
"def",
"text_remove_empty_lines",
"(",
"text",
")",
":",
"lines",
"=",
"[",
"line",
".",
"rstrip",
"(",
")",
"for",
"line",
"in",
"text",
".",
"splitlines",
"(",
")",
"if",
"line",
".",
"strip",
"(",
")",
"]",
"return",
"\"\\n\"",
".",
"join",
"(",
"lines",
")"
] |
Whitespace normalization:
- Strip empty lines
- Strip trailing whitespace
|
[
"Whitespace",
"normalization",
":"
] |
python
|
train
| 26.555556 |
materialsproject/pymatgen
|
pymatgen/io/xr.py
|
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/xr.py#L142-L162
|
def from_file(filename, use_cores=True, thresh=1.e-4):
"""
Reads an xr-formatted file to create an Xr object.
Args:
filename (str): name of file to read from.
use_cores (bool): use core positions and discard shell
positions if set to True (default). Otherwise,
use shell positions and discard core positions.
thresh (float): relative threshold for consistency check
between cell parameters (lengths and angles) from
header information and cell vectors, respectively.
Returns:
xr (Xr): Xr object corresponding to the input
file.
"""
with zopen(filename, "rt") as f:
return Xr.from_string(
f.read(), use_cores=use_cores,
thresh=thresh)
|
[
"def",
"from_file",
"(",
"filename",
",",
"use_cores",
"=",
"True",
",",
"thresh",
"=",
"1.e-4",
")",
":",
"with",
"zopen",
"(",
"filename",
",",
"\"rt\"",
")",
"as",
"f",
":",
"return",
"Xr",
".",
"from_string",
"(",
"f",
".",
"read",
"(",
")",
",",
"use_cores",
"=",
"use_cores",
",",
"thresh",
"=",
"thresh",
")"
] |
Reads an xr-formatted file to create an Xr object.
Args:
filename (str): name of file to read from.
use_cores (bool): use core positions and discard shell
positions if set to True (default). Otherwise,
use shell positions and discard core positions.
thresh (float): relative threshold for consistency check
between cell parameters (lengths and angles) from
header information and cell vectors, respectively.
Returns:
xr (Xr): Xr object corresponding to the input
file.
|
[
"Reads",
"an",
"xr",
"-",
"formatted",
"file",
"to",
"create",
"an",
"Xr",
"object",
"."
] |
python
|
train
| 41.142857 |
aholkner/bacon
|
native/Vendor/FreeType/src/tools/glnames.py
|
https://github.com/aholkner/bacon/blob/edf3810dcb211942d392a8637945871399b0650d/native/Vendor/FreeType/src/tools/glnames.py#L5238-L5479
|
def main():
"""main program body"""
if len( sys.argv ) != 2:
print __doc__ % sys.argv[0]
sys.exit( 1 )
file = open( sys.argv[1], "w\n" )
write = file.write
count_sid = len( sid_standard_names )
# `mac_extras' contains the list of glyph names in the Macintosh standard
# encoding which are not in the SID Standard Names.
#
mac_extras = filter_glyph_names( mac_standard_names, sid_standard_names )
# `base_list' contains the names of our final glyph names table.
# It consists of the `mac_extras' glyph names, followed by the SID
# standard names.
#
mac_extras_count = len( mac_extras )
base_list = mac_extras + sid_standard_names
write( "/***************************************************************************/\n" )
write( "/* */\n" )
write( "/* %-71s*/\n" % os.path.basename( sys.argv[1] ) )
write( "/* */\n" )
write( "/* PostScript glyph names. */\n" )
write( "/* */\n" )
write( "/* Copyright 2005, 2008, 2011 by */\n" )
write( "/* David Turner, Robert Wilhelm, and Werner Lemberg. */\n" )
write( "/* */\n" )
write( "/* This file is part of the FreeType project, and may only be used, */\n" )
write( "/* modified, and distributed under the terms of the FreeType project */\n" )
write( "/* license, LICENSE.TXT. By continuing to use, modify, or distribute */\n" )
write( "/* this file you indicate that you have read the license and */\n" )
write( "/* understand and accept it fully. */\n" )
write( "/* */\n" )
write( "/***************************************************************************/\n" )
write( "\n" )
write( "\n" )
write( " /* This file has been generated automatically -- do not edit! */\n" )
write( "\n" )
write( "\n" )
# dump final glyph list (mac extras + sid standard names)
#
st = StringTable( base_list, "ft_standard_glyph_names" )
st.dump( file )
st.dump_sublist( file, "ft_mac_names",
"FT_NUM_MAC_NAMES", mac_standard_names )
st.dump_sublist( file, "ft_sid_names",
"FT_NUM_SID_NAMES", sid_standard_names )
dump_encoding( file, "t1_standard_encoding", t1_standard_encoding )
dump_encoding( file, "t1_expert_encoding", t1_expert_encoding )
# dump the AGL in its compressed form
#
agl_glyphs, agl_values = adobe_glyph_values()
dict = StringNode( "", 0 )
for g in range( len( agl_glyphs ) ):
dict.add( agl_glyphs[g], eval( "0x" + agl_values[g] ) )
dict = dict.optimize()
dict_len = dict.locate( 0 )
dict_array = dict.store( "" )
write( """\
/*
* This table is a compressed version of the Adobe Glyph List (AGL),
* optimized for efficient searching. It has been generated by the
* `glnames.py' python script located in the `src/tools' directory.
*
* The lookup function to get the Unicode value for a given string
* is defined below the table.
*/
#ifdef FT_CONFIG_OPTION_ADOBE_GLYPH_LIST
""" )
dump_array( dict_array, write, "ft_adobe_glyph_list" )
# write the lookup routine now
#
write( """\
/*
* This function searches the compressed table efficiently.
*/
static unsigned long
ft_get_adobe_glyph_index( const char* name,
const char* limit )
{
int c = 0;
int count, min, max;
const unsigned char* p = ft_adobe_glyph_list;
if ( name == 0 || name >= limit )
goto NotFound;
c = *name++;
count = p[1];
p += 2;
min = 0;
max = count;
while ( min < max )
{
int mid = ( min + max ) >> 1;
const unsigned char* q = p + mid * 2;
int c2;
q = ft_adobe_glyph_list + ( ( (int)q[0] << 8 ) | q[1] );
c2 = q[0] & 127;
if ( c2 == c )
{
p = q;
goto Found;
}
if ( c2 < c )
min = mid + 1;
else
max = mid;
}
goto NotFound;
Found:
for (;;)
{
/* assert (*p & 127) == c */
if ( name >= limit )
{
if ( (p[0] & 128) == 0 &&
(p[1] & 128) != 0 )
return (unsigned long)( ( (int)p[2] << 8 ) | p[3] );
goto NotFound;
}
c = *name++;
if ( p[0] & 128 )
{
p++;
if ( c != (p[0] & 127) )
goto NotFound;
continue;
}
p++;
count = p[0] & 127;
if ( p[0] & 128 )
p += 2;
p++;
for ( ; count > 0; count--, p += 2 )
{
int offset = ( (int)p[0] << 8 ) | p[1];
const unsigned char* q = ft_adobe_glyph_list + offset;
if ( c == ( q[0] & 127 ) )
{
p = q;
goto NextIter;
}
}
goto NotFound;
NextIter:
;
}
NotFound:
return 0;
}
#endif /* FT_CONFIG_OPTION_ADOBE_GLYPH_LIST */
""" )
if 0: # generate unit test, or don't
#
# now write the unit test to check that everything works OK
#
write( "#ifdef TEST\n\n" )
write( "static const char* const the_names[] = {\n" )
for name in agl_glyphs:
write( ' "' + name + '",\n' )
write( " 0\n};\n" )
write( "static const unsigned long the_values[] = {\n" )
for val in agl_values:
write( ' 0x' + val + ',\n' )
write( " 0\n};\n" )
write( """
#include <stdlib.h>
#include <stdio.h>
int
main( void )
{
int result = 0;
const char* const* names = the_names;
const unsigned long* values = the_values;
for ( ; *names; names++, values++ )
{
const char* name = *names;
unsigned long reference = *values;
unsigned long value;
value = ft_get_adobe_glyph_index( name, name + strlen( name ) );
if ( value != reference )
{
result = 1;
fprintf( stderr, "name '%s' => %04x instead of %04x\\n",
name, value, reference );
}
}
return result;
}
""" )
write( "#endif /* TEST */\n" )
write("\n/* END */\n")
|
[
"def",
"main",
"(",
")",
":",
"if",
"len",
"(",
"sys",
".",
"argv",
")",
"!=",
"2",
":",
"print",
"__doc__",
"%",
"sys",
".",
"argv",
"[",
"0",
"]",
"sys",
".",
"exit",
"(",
"1",
")",
"file",
"=",
"open",
"(",
"sys",
".",
"argv",
"[",
"1",
"]",
",",
"\"w\\n\"",
")",
"write",
"=",
"file",
".",
"write",
"count_sid",
"=",
"len",
"(",
"sid_standard_names",
")",
"# `mac_extras' contains the list of glyph names in the Macintosh standard",
"# encoding which are not in the SID Standard Names.",
"#",
"mac_extras",
"=",
"filter_glyph_names",
"(",
"mac_standard_names",
",",
"sid_standard_names",
")",
"# `base_list' contains the names of our final glyph names table.",
"# It consists of the `mac_extras' glyph names, followed by the SID",
"# standard names.",
"#",
"mac_extras_count",
"=",
"len",
"(",
"mac_extras",
")",
"base_list",
"=",
"mac_extras",
"+",
"sid_standard_names",
"write",
"(",
"\"/***************************************************************************/\\n\"",
")",
"write",
"(",
"\"/* */\\n\"",
")",
"write",
"(",
"\"/* %-71s*/\\n\"",
"%",
"os",
".",
"path",
".",
"basename",
"(",
"sys",
".",
"argv",
"[",
"1",
"]",
")",
")",
"write",
"(",
"\"/* */\\n\"",
")",
"write",
"(",
"\"/* PostScript glyph names. */\\n\"",
")",
"write",
"(",
"\"/* */\\n\"",
")",
"write",
"(",
"\"/* Copyright 2005, 2008, 2011 by */\\n\"",
")",
"write",
"(",
"\"/* David Turner, Robert Wilhelm, and Werner Lemberg. */\\n\"",
")",
"write",
"(",
"\"/* */\\n\"",
")",
"write",
"(",
"\"/* This file is part of the FreeType project, and may only be used, */\\n\"",
")",
"write",
"(",
"\"/* modified, and distributed under the terms of the FreeType project */\\n\"",
")",
"write",
"(",
"\"/* license, LICENSE.TXT. By continuing to use, modify, or distribute */\\n\"",
")",
"write",
"(",
"\"/* this file you indicate that you have read the license and */\\n\"",
")",
"write",
"(",
"\"/* understand and accept it fully. */\\n\"",
")",
"write",
"(",
"\"/* */\\n\"",
")",
"write",
"(",
"\"/***************************************************************************/\\n\"",
")",
"write",
"(",
"\"\\n\"",
")",
"write",
"(",
"\"\\n\"",
")",
"write",
"(",
"\" /* This file has been generated automatically -- do not edit! */\\n\"",
")",
"write",
"(",
"\"\\n\"",
")",
"write",
"(",
"\"\\n\"",
")",
"# dump final glyph list (mac extras + sid standard names)",
"#",
"st",
"=",
"StringTable",
"(",
"base_list",
",",
"\"ft_standard_glyph_names\"",
")",
"st",
".",
"dump",
"(",
"file",
")",
"st",
".",
"dump_sublist",
"(",
"file",
",",
"\"ft_mac_names\"",
",",
"\"FT_NUM_MAC_NAMES\"",
",",
"mac_standard_names",
")",
"st",
".",
"dump_sublist",
"(",
"file",
",",
"\"ft_sid_names\"",
",",
"\"FT_NUM_SID_NAMES\"",
",",
"sid_standard_names",
")",
"dump_encoding",
"(",
"file",
",",
"\"t1_standard_encoding\"",
",",
"t1_standard_encoding",
")",
"dump_encoding",
"(",
"file",
",",
"\"t1_expert_encoding\"",
",",
"t1_expert_encoding",
")",
"# dump the AGL in its compressed form",
"#",
"agl_glyphs",
",",
"agl_values",
"=",
"adobe_glyph_values",
"(",
")",
"dict",
"=",
"StringNode",
"(",
"\"\"",
",",
"0",
")",
"for",
"g",
"in",
"range",
"(",
"len",
"(",
"agl_glyphs",
")",
")",
":",
"dict",
".",
"add",
"(",
"agl_glyphs",
"[",
"g",
"]",
",",
"eval",
"(",
"\"0x\"",
"+",
"agl_values",
"[",
"g",
"]",
")",
")",
"dict",
"=",
"dict",
".",
"optimize",
"(",
")",
"dict_len",
"=",
"dict",
".",
"locate",
"(",
"0",
")",
"dict_array",
"=",
"dict",
".",
"store",
"(",
"\"\"",
")",
"write",
"(",
"\"\"\"\\\n /*\n * This table is a compressed version of the Adobe Glyph List (AGL),\n * optimized for efficient searching. It has been generated by the\n * `glnames.py' python script located in the `src/tools' directory.\n *\n * The lookup function to get the Unicode value for a given string\n * is defined below the table.\n */\n\n#ifdef FT_CONFIG_OPTION_ADOBE_GLYPH_LIST\n\n\"\"\"",
")",
"dump_array",
"(",
"dict_array",
",",
"write",
",",
"\"ft_adobe_glyph_list\"",
")",
"# write the lookup routine now",
"#",
"write",
"(",
"\"\"\"\\\n /*\n * This function searches the compressed table efficiently.\n */\n static unsigned long\n ft_get_adobe_glyph_index( const char* name,\n const char* limit )\n {\n int c = 0;\n int count, min, max;\n const unsigned char* p = ft_adobe_glyph_list;\n\n\n if ( name == 0 || name >= limit )\n goto NotFound;\n\n c = *name++;\n count = p[1];\n p += 2;\n\n min = 0;\n max = count;\n\n while ( min < max )\n {\n int mid = ( min + max ) >> 1;\n const unsigned char* q = p + mid * 2;\n int c2;\n\n\n q = ft_adobe_glyph_list + ( ( (int)q[0] << 8 ) | q[1] );\n\n c2 = q[0] & 127;\n if ( c2 == c )\n {\n p = q;\n goto Found;\n }\n if ( c2 < c )\n min = mid + 1;\n else\n max = mid;\n }\n goto NotFound;\n\n Found:\n for (;;)\n {\n /* assert (*p & 127) == c */\n\n if ( name >= limit )\n {\n if ( (p[0] & 128) == 0 &&\n (p[1] & 128) != 0 )\n return (unsigned long)( ( (int)p[2] << 8 ) | p[3] );\n\n goto NotFound;\n }\n c = *name++;\n if ( p[0] & 128 )\n {\n p++;\n if ( c != (p[0] & 127) )\n goto NotFound;\n\n continue;\n }\n\n p++;\n count = p[0] & 127;\n if ( p[0] & 128 )\n p += 2;\n\n p++;\n\n for ( ; count > 0; count--, p += 2 )\n {\n int offset = ( (int)p[0] << 8 ) | p[1];\n const unsigned char* q = ft_adobe_glyph_list + offset;\n\n if ( c == ( q[0] & 127 ) )\n {\n p = q;\n goto NextIter;\n }\n }\n goto NotFound;\n\n NextIter:\n ;\n }\n\n NotFound:\n return 0;\n }\n\n#endif /* FT_CONFIG_OPTION_ADOBE_GLYPH_LIST */\n\n\"\"\"",
")",
"if",
"0",
":",
"# generate unit test, or don't",
"#",
"# now write the unit test to check that everything works OK",
"#",
"write",
"(",
"\"#ifdef TEST\\n\\n\"",
")",
"write",
"(",
"\"static const char* const the_names[] = {\\n\"",
")",
"for",
"name",
"in",
"agl_glyphs",
":",
"write",
"(",
"' \"'",
"+",
"name",
"+",
"'\",\\n'",
")",
"write",
"(",
"\" 0\\n};\\n\"",
")",
"write",
"(",
"\"static const unsigned long the_values[] = {\\n\"",
")",
"for",
"val",
"in",
"agl_values",
":",
"write",
"(",
"' 0x'",
"+",
"val",
"+",
"',\\n'",
")",
"write",
"(",
"\" 0\\n};\\n\"",
")",
"write",
"(",
"\"\"\"\n#include <stdlib.h>\n#include <stdio.h>\n\n int\n main( void )\n {\n int result = 0;\n const char* const* names = the_names;\n const unsigned long* values = the_values;\n\n\n for ( ; *names; names++, values++ )\n {\n const char* name = *names;\n unsigned long reference = *values;\n unsigned long value;\n\n\n value = ft_get_adobe_glyph_index( name, name + strlen( name ) );\n if ( value != reference )\n {\n result = 1;\n fprintf( stderr, \"name '%s' => %04x instead of %04x\\\\n\",\n name, value, reference );\n }\n }\n\n return result;\n }\n\"\"\"",
")",
"write",
"(",
"\"#endif /* TEST */\\n\"",
")",
"write",
"(",
"\"\\n/* END */\\n\"",
")"
] |
main program body
|
[
"main",
"program",
"body"
] |
python
|
test
| 26.239669 |
ungarj/mapchete
|
mapchete/formats/default/geojson.py
|
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/formats/default/geojson.py#L145-L165
|
def is_valid_with_config(self, config):
"""
Check if output format is valid with other process parameters.
Parameters
----------
config : dictionary
output configuration parameters
Returns
-------
is_valid : bool
"""
validate_values(config, [("schema", dict), ("path", str)])
validate_values(config["schema"], [("properties", dict), ("geometry", str)])
if config["schema"]["geometry"] not in [
"Geometry", "Point", "MultiPoint", "Line", "MultiLine",
"Polygon", "MultiPolygon"
]:
raise TypeError("invalid geometry type")
return True
|
[
"def",
"is_valid_with_config",
"(",
"self",
",",
"config",
")",
":",
"validate_values",
"(",
"config",
",",
"[",
"(",
"\"schema\"",
",",
"dict",
")",
",",
"(",
"\"path\"",
",",
"str",
")",
"]",
")",
"validate_values",
"(",
"config",
"[",
"\"schema\"",
"]",
",",
"[",
"(",
"\"properties\"",
",",
"dict",
")",
",",
"(",
"\"geometry\"",
",",
"str",
")",
"]",
")",
"if",
"config",
"[",
"\"schema\"",
"]",
"[",
"\"geometry\"",
"]",
"not",
"in",
"[",
"\"Geometry\"",
",",
"\"Point\"",
",",
"\"MultiPoint\"",
",",
"\"Line\"",
",",
"\"MultiLine\"",
",",
"\"Polygon\"",
",",
"\"MultiPolygon\"",
"]",
":",
"raise",
"TypeError",
"(",
"\"invalid geometry type\"",
")",
"return",
"True"
] |
Check if output format is valid with other process parameters.
Parameters
----------
config : dictionary
output configuration parameters
Returns
-------
is_valid : bool
|
[
"Check",
"if",
"output",
"format",
"is",
"valid",
"with",
"other",
"process",
"parameters",
"."
] |
python
|
valid
| 32.047619 |
limodou/uliweb
|
uliweb/core/SimpleFrame.py
|
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/core/SimpleFrame.py#L222-L261
|
def CORS(func=None):
"""
CORS support
"""
def w(r=None):
from uliweb import request, response
if request.method == 'OPTIONS':
response = Response(status=204)
response.headers['Access-Control-Allow-Credentials'] = 'true'
response.headers['Access-Control-Allow-Origin'] = request.headers['Origin']
response.headers['Access-Control-Allow-Methods'] = 'GET, POST, PUT, DELETE, OPTIONS'
response.headers['Access-Control-Allow-Headers'] = 'DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range'
response.headers['Access-Control-Max-Age'] = 24*3600
response.headers['Content-Type'] = 'text/plain; charset=utf-8'
response.headers['Content-Length'] = 0
return response
elif request.method in ('GET', 'POST'):
if isinstance(r, Response):
response = r
response.headers['Access-Control-Allow-Credentials'] = 'true'
if 'Origin' in request.headers:
response.headers['Access-Control-Allow-Origin'] = request.headers['Origin']
response.headers['Access-Control-Allow-Methods'] = 'GET, POST, PUT, DELETE, OPTIONS'
response.headers['Access-Control-Allow-Headers'] = 'DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range'
response.headers['Access-Control-Expose-Headers'] = 'Content-Length,Content-Range'
if callable(func):
@wraps(func)
def f(*arg, **kwargs):
if request.method == 'OPTIONS':
return w()
ret = func(*arg, **kwargs)
w(ret)
return ret
return f
else:
w()
|
[
"def",
"CORS",
"(",
"func",
"=",
"None",
")",
":",
"def",
"w",
"(",
"r",
"=",
"None",
")",
":",
"from",
"uliweb",
"import",
"request",
",",
"response",
"if",
"request",
".",
"method",
"==",
"'OPTIONS'",
":",
"response",
"=",
"Response",
"(",
"status",
"=",
"204",
")",
"response",
".",
"headers",
"[",
"'Access-Control-Allow-Credentials'",
"]",
"=",
"'true'",
"response",
".",
"headers",
"[",
"'Access-Control-Allow-Origin'",
"]",
"=",
"request",
".",
"headers",
"[",
"'Origin'",
"]",
"response",
".",
"headers",
"[",
"'Access-Control-Allow-Methods'",
"]",
"=",
"'GET, POST, PUT, DELETE, OPTIONS'",
"response",
".",
"headers",
"[",
"'Access-Control-Allow-Headers'",
"]",
"=",
"'DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range'",
"response",
".",
"headers",
"[",
"'Access-Control-Max-Age'",
"]",
"=",
"24",
"*",
"3600",
"response",
".",
"headers",
"[",
"'Content-Type'",
"]",
"=",
"'text/plain; charset=utf-8'",
"response",
".",
"headers",
"[",
"'Content-Length'",
"]",
"=",
"0",
"return",
"response",
"elif",
"request",
".",
"method",
"in",
"(",
"'GET'",
",",
"'POST'",
")",
":",
"if",
"isinstance",
"(",
"r",
",",
"Response",
")",
":",
"response",
"=",
"r",
"response",
".",
"headers",
"[",
"'Access-Control-Allow-Credentials'",
"]",
"=",
"'true'",
"if",
"'Origin'",
"in",
"request",
".",
"headers",
":",
"response",
".",
"headers",
"[",
"'Access-Control-Allow-Origin'",
"]",
"=",
"request",
".",
"headers",
"[",
"'Origin'",
"]",
"response",
".",
"headers",
"[",
"'Access-Control-Allow-Methods'",
"]",
"=",
"'GET, POST, PUT, DELETE, OPTIONS'",
"response",
".",
"headers",
"[",
"'Access-Control-Allow-Headers'",
"]",
"=",
"'DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range'",
"response",
".",
"headers",
"[",
"'Access-Control-Expose-Headers'",
"]",
"=",
"'Content-Length,Content-Range'",
"if",
"callable",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"f",
"(",
"*",
"arg",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"request",
".",
"method",
"==",
"'OPTIONS'",
":",
"return",
"w",
"(",
")",
"ret",
"=",
"func",
"(",
"*",
"arg",
",",
"*",
"*",
"kwargs",
")",
"w",
"(",
"ret",
")",
"return",
"ret",
"return",
"f",
"else",
":",
"w",
"(",
")"
] |
CORS support
|
[
"CORS",
"support"
] |
python
|
train
| 43.125 |
i3visio/osrframework
|
osrframework/thirdparties/pipl_com/lib/fields.py
|
https://github.com/i3visio/osrframework/blob/83437f4c14c9c08cb80a896bd9834c77f6567871/osrframework/thirdparties/pipl_com/lib/fields.py#L383-L391
|
def is_valid_email(self):
"""A bool value that indicates whether the address is a valid
email address.
Note that the check is done be matching to the regular expression
at Email.re_email which is very basic and far from covering end-cases...
"""
return bool(self.address and Email.re_email.match(self.address))
|
[
"def",
"is_valid_email",
"(",
"self",
")",
":",
"return",
"bool",
"(",
"self",
".",
"address",
"and",
"Email",
".",
"re_email",
".",
"match",
"(",
"self",
".",
"address",
")",
")"
] |
A bool value that indicates whether the address is a valid
email address.
Note that the check is done be matching to the regular expression
at Email.re_email which is very basic and far from covering end-cases...
|
[
"A",
"bool",
"value",
"that",
"indicates",
"whether",
"the",
"address",
"is",
"a",
"valid",
"email",
"address",
".",
"Note",
"that",
"the",
"check",
"is",
"done",
"be",
"matching",
"to",
"the",
"regular",
"expression",
"at",
"Email",
".",
"re_email",
"which",
"is",
"very",
"basic",
"and",
"far",
"from",
"covering",
"end",
"-",
"cases",
"..."
] |
python
|
train
| 42 |
vtkiorg/vtki
|
vtki/examples/downloads.py
|
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/examples/downloads.py#L324-L358
|
def download_kitchen(split=False):
"""Download structured grid of kitchen with velocity field. Use the
``split`` argument to extract all of the furniture in the kitchen.
"""
mesh = _download_and_read('kitchen.vtk')
if not split:
return mesh
extents = {
'door' : (27, 27, 14, 18, 0, 11),
'window1' : (0, 0, 9, 18, 6, 12),
'window2' : (5, 12, 23, 23, 6, 12),
'klower1' : (17, 17, 0, 11, 0, 6),
'klower2' : (19, 19, 0, 11, 0, 6),
'klower3' : (17, 19, 0, 0, 0, 6),
'klower4' : (17, 19, 11, 11, 0, 6),
'klower5' : (17, 19, 0, 11, 0, 0),
'klower6' : (17, 19, 0, 7, 6, 6),
'klower7' : (17, 19, 9, 11, 6, 6),
'hood1' : (17, 17, 0, 11, 11, 16),
'hood2' : (19, 19, 0, 11, 11, 16),
'hood3' : (17, 19, 0, 0, 11, 16),
'hood4' : (17, 19, 11, 11, 11, 16),
'hood5' : (17, 19, 0, 11, 16, 16),
'cookingPlate' : (17, 19, 7, 9, 6, 6),
'furniture' : (17, 19, 7, 9, 11, 11),
}
kitchen = vtki.MultiBlock()
for key, extent in extents.items():
alg = vtk.vtkStructuredGridGeometryFilter()
alg.SetInputDataObject(mesh)
alg.SetExtent(extent)
alg.Update()
result = vtki.filters._get_output(alg)
kitchen[key] = result
return kitchen
|
[
"def",
"download_kitchen",
"(",
"split",
"=",
"False",
")",
":",
"mesh",
"=",
"_download_and_read",
"(",
"'kitchen.vtk'",
")",
"if",
"not",
"split",
":",
"return",
"mesh",
"extents",
"=",
"{",
"'door'",
":",
"(",
"27",
",",
"27",
",",
"14",
",",
"18",
",",
"0",
",",
"11",
")",
",",
"'window1'",
":",
"(",
"0",
",",
"0",
",",
"9",
",",
"18",
",",
"6",
",",
"12",
")",
",",
"'window2'",
":",
"(",
"5",
",",
"12",
",",
"23",
",",
"23",
",",
"6",
",",
"12",
")",
",",
"'klower1'",
":",
"(",
"17",
",",
"17",
",",
"0",
",",
"11",
",",
"0",
",",
"6",
")",
",",
"'klower2'",
":",
"(",
"19",
",",
"19",
",",
"0",
",",
"11",
",",
"0",
",",
"6",
")",
",",
"'klower3'",
":",
"(",
"17",
",",
"19",
",",
"0",
",",
"0",
",",
"0",
",",
"6",
")",
",",
"'klower4'",
":",
"(",
"17",
",",
"19",
",",
"11",
",",
"11",
",",
"0",
",",
"6",
")",
",",
"'klower5'",
":",
"(",
"17",
",",
"19",
",",
"0",
",",
"11",
",",
"0",
",",
"0",
")",
",",
"'klower6'",
":",
"(",
"17",
",",
"19",
",",
"0",
",",
"7",
",",
"6",
",",
"6",
")",
",",
"'klower7'",
":",
"(",
"17",
",",
"19",
",",
"9",
",",
"11",
",",
"6",
",",
"6",
")",
",",
"'hood1'",
":",
"(",
"17",
",",
"17",
",",
"0",
",",
"11",
",",
"11",
",",
"16",
")",
",",
"'hood2'",
":",
"(",
"19",
",",
"19",
",",
"0",
",",
"11",
",",
"11",
",",
"16",
")",
",",
"'hood3'",
":",
"(",
"17",
",",
"19",
",",
"0",
",",
"0",
",",
"11",
",",
"16",
")",
",",
"'hood4'",
":",
"(",
"17",
",",
"19",
",",
"11",
",",
"11",
",",
"11",
",",
"16",
")",
",",
"'hood5'",
":",
"(",
"17",
",",
"19",
",",
"0",
",",
"11",
",",
"16",
",",
"16",
")",
",",
"'cookingPlate'",
":",
"(",
"17",
",",
"19",
",",
"7",
",",
"9",
",",
"6",
",",
"6",
")",
",",
"'furniture'",
":",
"(",
"17",
",",
"19",
",",
"7",
",",
"9",
",",
"11",
",",
"11",
")",
",",
"}",
"kitchen",
"=",
"vtki",
".",
"MultiBlock",
"(",
")",
"for",
"key",
",",
"extent",
"in",
"extents",
".",
"items",
"(",
")",
":",
"alg",
"=",
"vtk",
".",
"vtkStructuredGridGeometryFilter",
"(",
")",
"alg",
".",
"SetInputDataObject",
"(",
"mesh",
")",
"alg",
".",
"SetExtent",
"(",
"extent",
")",
"alg",
".",
"Update",
"(",
")",
"result",
"=",
"vtki",
".",
"filters",
".",
"_get_output",
"(",
"alg",
")",
"kitchen",
"[",
"key",
"]",
"=",
"result",
"return",
"kitchen"
] |
Download structured grid of kitchen with velocity field. Use the
``split`` argument to extract all of the furniture in the kitchen.
|
[
"Download",
"structured",
"grid",
"of",
"kitchen",
"with",
"velocity",
"field",
".",
"Use",
"the",
"split",
"argument",
"to",
"extract",
"all",
"of",
"the",
"furniture",
"in",
"the",
"kitchen",
"."
] |
python
|
train
| 37.171429 |
numenta/nupic
|
src/nupic/swarming/hypersearch/support.py
|
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/hypersearch/support.py#L117-L136
|
def makeDirectoryFromAbsolutePath(absDirPath):
""" Makes directory for the given directory path with default permissions.
If the directory already exists, it is treated as success.
absDirPath: absolute path of the directory to create.
Returns: absDirPath arg
Exceptions: OSError if directory creation fails
"""
assert os.path.isabs(absDirPath)
try:
os.makedirs(absDirPath)
except OSError, e:
if e.errno != os.errno.EEXIST:
raise
return absDirPath
|
[
"def",
"makeDirectoryFromAbsolutePath",
"(",
"absDirPath",
")",
":",
"assert",
"os",
".",
"path",
".",
"isabs",
"(",
"absDirPath",
")",
"try",
":",
"os",
".",
"makedirs",
"(",
"absDirPath",
")",
"except",
"OSError",
",",
"e",
":",
"if",
"e",
".",
"errno",
"!=",
"os",
".",
"errno",
".",
"EEXIST",
":",
"raise",
"return",
"absDirPath"
] |
Makes directory for the given directory path with default permissions.
If the directory already exists, it is treated as success.
absDirPath: absolute path of the directory to create.
Returns: absDirPath arg
Exceptions: OSError if directory creation fails
|
[
"Makes",
"directory",
"for",
"the",
"given",
"directory",
"path",
"with",
"default",
"permissions",
".",
"If",
"the",
"directory",
"already",
"exists",
"it",
"is",
"treated",
"as",
"success",
"."
] |
python
|
valid
| 24.1 |
glue-viz/glue-vispy-viewers
|
glue_vispy_viewers/extern/vispy/app/canvas.py
|
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/app/canvas.py#L302-L330
|
def connect(self, fun):
""" Connect a function to an event
The name of the function
should be on_X, with X the name of the event (e.g. 'on_draw').
This method is typically used as a decorator on a function
definition for an event handler.
Parameters
----------
fun : callable
The function.
"""
# Get and check name
name = fun.__name__
if not name.startswith('on_'):
raise ValueError('When connecting a function based on its name, '
'the name should start with "on_"')
eventname = name[3:]
# Get emitter
try:
emitter = self.events[eventname]
except KeyError:
raise ValueError(
'Event "%s" not available on this canvas.' %
eventname)
# Connect
emitter.connect(fun)
|
[
"def",
"connect",
"(",
"self",
",",
"fun",
")",
":",
"# Get and check name",
"name",
"=",
"fun",
".",
"__name__",
"if",
"not",
"name",
".",
"startswith",
"(",
"'on_'",
")",
":",
"raise",
"ValueError",
"(",
"'When connecting a function based on its name, '",
"'the name should start with \"on_\"'",
")",
"eventname",
"=",
"name",
"[",
"3",
":",
"]",
"# Get emitter",
"try",
":",
"emitter",
"=",
"self",
".",
"events",
"[",
"eventname",
"]",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"'Event \"%s\" not available on this canvas.'",
"%",
"eventname",
")",
"# Connect",
"emitter",
".",
"connect",
"(",
"fun",
")"
] |
Connect a function to an event
The name of the function
should be on_X, with X the name of the event (e.g. 'on_draw').
This method is typically used as a decorator on a function
definition for an event handler.
Parameters
----------
fun : callable
The function.
|
[
"Connect",
"a",
"function",
"to",
"an",
"event"
] |
python
|
train
| 30.689655 |
openai/baselines
|
baselines/common/plot_util.py
|
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/plot_util.py#L39-L109
|
def one_sided_ema(xolds, yolds, low=None, high=None, n=512, decay_steps=1., low_counts_threshold=1e-8):
'''
perform one-sided (causal) EMA (exponential moving average)
smoothing and resampling to an even grid with n points.
Does not do extrapolation, so we assume
xolds[0] <= low && high <= xolds[-1]
Arguments:
xolds: array or list - x values of data. Needs to be sorted in ascending order
yolds: array of list - y values of data. Has to have the same length as xolds
low: float - min value of the new x grid. By default equals to xolds[0]
high: float - max value of the new x grid. By default equals to xolds[-1]
n: int - number of points in new x grid
decay_steps: float - EMA decay factor, expressed in new x grid steps.
low_counts_threshold: float or int
- y values with counts less than this value will be set to NaN
Returns:
tuple sum_ys, count_ys where
xs - array with new x grid
ys - array of EMA of y at each point of the new x grid
count_ys - array of EMA of y counts at each point of the new x grid
'''
low = xolds[0] if low is None else low
high = xolds[-1] if high is None else high
assert xolds[0] <= low, 'low = {} < xolds[0] = {} - extrapolation not permitted!'.format(low, xolds[0])
assert xolds[-1] >= high, 'high = {} > xolds[-1] = {} - extrapolation not permitted!'.format(high, xolds[-1])
assert len(xolds) == len(yolds), 'length of xolds ({}) and yolds ({}) do not match!'.format(len(xolds), len(yolds))
xolds = xolds.astype('float64')
yolds = yolds.astype('float64')
luoi = 0 # last unused old index
sum_y = 0.
count_y = 0.
xnews = np.linspace(low, high, n)
decay_period = (high - low) / (n - 1) * decay_steps
interstep_decay = np.exp(- 1. / decay_steps)
sum_ys = np.zeros_like(xnews)
count_ys = np.zeros_like(xnews)
for i in range(n):
xnew = xnews[i]
sum_y *= interstep_decay
count_y *= interstep_decay
while True:
xold = xolds[luoi]
if xold <= xnew:
decay = np.exp(- (xnew - xold) / decay_period)
sum_y += decay * yolds[luoi]
count_y += decay
luoi += 1
else:
break
if luoi >= len(xolds):
break
sum_ys[i] = sum_y
count_ys[i] = count_y
ys = sum_ys / count_ys
ys[count_ys < low_counts_threshold] = np.nan
return xnews, ys, count_ys
|
[
"def",
"one_sided_ema",
"(",
"xolds",
",",
"yolds",
",",
"low",
"=",
"None",
",",
"high",
"=",
"None",
",",
"n",
"=",
"512",
",",
"decay_steps",
"=",
"1.",
",",
"low_counts_threshold",
"=",
"1e-8",
")",
":",
"low",
"=",
"xolds",
"[",
"0",
"]",
"if",
"low",
"is",
"None",
"else",
"low",
"high",
"=",
"xolds",
"[",
"-",
"1",
"]",
"if",
"high",
"is",
"None",
"else",
"high",
"assert",
"xolds",
"[",
"0",
"]",
"<=",
"low",
",",
"'low = {} < xolds[0] = {} - extrapolation not permitted!'",
".",
"format",
"(",
"low",
",",
"xolds",
"[",
"0",
"]",
")",
"assert",
"xolds",
"[",
"-",
"1",
"]",
">=",
"high",
",",
"'high = {} > xolds[-1] = {} - extrapolation not permitted!'",
".",
"format",
"(",
"high",
",",
"xolds",
"[",
"-",
"1",
"]",
")",
"assert",
"len",
"(",
"xolds",
")",
"==",
"len",
"(",
"yolds",
")",
",",
"'length of xolds ({}) and yolds ({}) do not match!'",
".",
"format",
"(",
"len",
"(",
"xolds",
")",
",",
"len",
"(",
"yolds",
")",
")",
"xolds",
"=",
"xolds",
".",
"astype",
"(",
"'float64'",
")",
"yolds",
"=",
"yolds",
".",
"astype",
"(",
"'float64'",
")",
"luoi",
"=",
"0",
"# last unused old index",
"sum_y",
"=",
"0.",
"count_y",
"=",
"0.",
"xnews",
"=",
"np",
".",
"linspace",
"(",
"low",
",",
"high",
",",
"n",
")",
"decay_period",
"=",
"(",
"high",
"-",
"low",
")",
"/",
"(",
"n",
"-",
"1",
")",
"*",
"decay_steps",
"interstep_decay",
"=",
"np",
".",
"exp",
"(",
"-",
"1.",
"/",
"decay_steps",
")",
"sum_ys",
"=",
"np",
".",
"zeros_like",
"(",
"xnews",
")",
"count_ys",
"=",
"np",
".",
"zeros_like",
"(",
"xnews",
")",
"for",
"i",
"in",
"range",
"(",
"n",
")",
":",
"xnew",
"=",
"xnews",
"[",
"i",
"]",
"sum_y",
"*=",
"interstep_decay",
"count_y",
"*=",
"interstep_decay",
"while",
"True",
":",
"xold",
"=",
"xolds",
"[",
"luoi",
"]",
"if",
"xold",
"<=",
"xnew",
":",
"decay",
"=",
"np",
".",
"exp",
"(",
"-",
"(",
"xnew",
"-",
"xold",
")",
"/",
"decay_period",
")",
"sum_y",
"+=",
"decay",
"*",
"yolds",
"[",
"luoi",
"]",
"count_y",
"+=",
"decay",
"luoi",
"+=",
"1",
"else",
":",
"break",
"if",
"luoi",
">=",
"len",
"(",
"xolds",
")",
":",
"break",
"sum_ys",
"[",
"i",
"]",
"=",
"sum_y",
"count_ys",
"[",
"i",
"]",
"=",
"count_y",
"ys",
"=",
"sum_ys",
"/",
"count_ys",
"ys",
"[",
"count_ys",
"<",
"low_counts_threshold",
"]",
"=",
"np",
".",
"nan",
"return",
"xnews",
",",
"ys",
",",
"count_ys"
] |
perform one-sided (causal) EMA (exponential moving average)
smoothing and resampling to an even grid with n points.
Does not do extrapolation, so we assume
xolds[0] <= low && high <= xolds[-1]
Arguments:
xolds: array or list - x values of data. Needs to be sorted in ascending order
yolds: array of list - y values of data. Has to have the same length as xolds
low: float - min value of the new x grid. By default equals to xolds[0]
high: float - max value of the new x grid. By default equals to xolds[-1]
n: int - number of points in new x grid
decay_steps: float - EMA decay factor, expressed in new x grid steps.
low_counts_threshold: float or int
- y values with counts less than this value will be set to NaN
Returns:
tuple sum_ys, count_ys where
xs - array with new x grid
ys - array of EMA of y at each point of the new x grid
count_ys - array of EMA of y counts at each point of the new x grid
|
[
"perform",
"one",
"-",
"sided",
"(",
"causal",
")",
"EMA",
"(",
"exponential",
"moving",
"average",
")",
"smoothing",
"and",
"resampling",
"to",
"an",
"even",
"grid",
"with",
"n",
"points",
".",
"Does",
"not",
"do",
"extrapolation",
"so",
"we",
"assume",
"xolds",
"[",
"0",
"]",
"<",
"=",
"low",
"&&",
"high",
"<",
"=",
"xolds",
"[",
"-",
"1",
"]"
] |
python
|
valid
| 35.971831 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.