repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
praekeltfoundation/marathon-acme
|
marathon_acme/cli.py
|
https://github.com/praekeltfoundation/marathon-acme/blob/b1b71e3dde0ba30e575089280658bd32890e3325/marathon_acme/cli.py#L241-L268
|
def init_storage_dir(storage_dir):
"""
Initialise the storage directory with the certificates directory and a
default wildcard self-signed certificate for HAProxy.
:return: the storage path and certs path
"""
storage_path = FilePath(storage_dir)
# Create the default wildcard certificate if it doesn't already exist
default_cert_path = storage_path.child('default.pem')
if not default_cert_path.exists():
default_cert_path.setContent(generate_wildcard_pem_bytes())
# Create a directory for unmanaged certs. We don't touch this again, but it
# needs to be there and it makes sense to create it at the same time as
# everything else.
unmanaged_certs_path = storage_path.child('unmanaged-certs')
if not unmanaged_certs_path.exists():
unmanaged_certs_path.createDirectory()
# Store certificates in a directory inside the storage directory, so
# HAProxy will read just the certificates there.
certs_path = storage_path.child('certs')
if not certs_path.exists():
certs_path.createDirectory()
return storage_path, certs_path
|
[
"def",
"init_storage_dir",
"(",
"storage_dir",
")",
":",
"storage_path",
"=",
"FilePath",
"(",
"storage_dir",
")",
"# Create the default wildcard certificate if it doesn't already exist",
"default_cert_path",
"=",
"storage_path",
".",
"child",
"(",
"'default.pem'",
")",
"if",
"not",
"default_cert_path",
".",
"exists",
"(",
")",
":",
"default_cert_path",
".",
"setContent",
"(",
"generate_wildcard_pem_bytes",
"(",
")",
")",
"# Create a directory for unmanaged certs. We don't touch this again, but it",
"# needs to be there and it makes sense to create it at the same time as",
"# everything else.",
"unmanaged_certs_path",
"=",
"storage_path",
".",
"child",
"(",
"'unmanaged-certs'",
")",
"if",
"not",
"unmanaged_certs_path",
".",
"exists",
"(",
")",
":",
"unmanaged_certs_path",
".",
"createDirectory",
"(",
")",
"# Store certificates in a directory inside the storage directory, so",
"# HAProxy will read just the certificates there.",
"certs_path",
"=",
"storage_path",
".",
"child",
"(",
"'certs'",
")",
"if",
"not",
"certs_path",
".",
"exists",
"(",
")",
":",
"certs_path",
".",
"createDirectory",
"(",
")",
"return",
"storage_path",
",",
"certs_path"
] |
Initialise the storage directory with the certificates directory and a
default wildcard self-signed certificate for HAProxy.
:return: the storage path and certs path
|
[
"Initialise",
"the",
"storage",
"directory",
"with",
"the",
"certificates",
"directory",
"and",
"a",
"default",
"wildcard",
"self",
"-",
"signed",
"certificate",
"for",
"HAProxy",
"."
] |
python
|
valid
|
merll/docker-map
|
dockermap/map/config/main.py
|
https://github.com/merll/docker-map/blob/e14fe86a6ff5c33d121eb2f9157e9359cb80dd02/dockermap/map/config/main.py#L502-L521
|
def get_extended(self, config):
"""
Generates a configuration that includes all inherited values.
:param config: Container configuration.
:type config: ContainerConfiguration
:return: A merged (shallow) copy of all inherited configurations merged with the container configuration.
:rtype: ContainerConfiguration
"""
if not config.extends or self._extended:
return config
extended_config = ContainerConfiguration()
for ext_name in config.extends:
ext_cfg_base = self._containers.get(ext_name)
if not ext_cfg_base:
raise KeyError(ext_name)
ext_cfg = self.get_extended(ext_cfg_base)
extended_config.merge_from_obj(ext_cfg)
extended_config.merge_from_obj(config)
return extended_config
|
[
"def",
"get_extended",
"(",
"self",
",",
"config",
")",
":",
"if",
"not",
"config",
".",
"extends",
"or",
"self",
".",
"_extended",
":",
"return",
"config",
"extended_config",
"=",
"ContainerConfiguration",
"(",
")",
"for",
"ext_name",
"in",
"config",
".",
"extends",
":",
"ext_cfg_base",
"=",
"self",
".",
"_containers",
".",
"get",
"(",
"ext_name",
")",
"if",
"not",
"ext_cfg_base",
":",
"raise",
"KeyError",
"(",
"ext_name",
")",
"ext_cfg",
"=",
"self",
".",
"get_extended",
"(",
"ext_cfg_base",
")",
"extended_config",
".",
"merge_from_obj",
"(",
"ext_cfg",
")",
"extended_config",
".",
"merge_from_obj",
"(",
"config",
")",
"return",
"extended_config"
] |
Generates a configuration that includes all inherited values.
:param config: Container configuration.
:type config: ContainerConfiguration
:return: A merged (shallow) copy of all inherited configurations merged with the container configuration.
:rtype: ContainerConfiguration
|
[
"Generates",
"a",
"configuration",
"that",
"includes",
"all",
"inherited",
"values",
"."
] |
python
|
train
|
hustlzp/Flask-Boost
|
flask_boost/cli.py
|
https://github.com/hustlzp/Flask-Boost/blob/d0308408ebb248dd752b77123b845f8ec637fab2/flask_boost/cli.py#L49-L98
|
def generate_project(args):
"""New project."""
# Project templates path
src = os.path.join(dirname(abspath(__file__)), 'project')
project_name = args.get('<project>')
if not project_name:
logger.warning('Project name cannot be empty.')
return
# Destination project path
dst = os.path.join(os.getcwd(), project_name)
if os.path.isdir(dst):
logger.warning('Project directory already exists.')
return
logger.info('Start generating project files.')
_mkdir_p(dst)
for src_dir, sub_dirs, filenames in os.walk(src):
# Build and create destination directory path
relative_path = src_dir.split(src)[1].lstrip(os.path.sep)
dst_dir = os.path.join(dst, relative_path)
if src != src_dir:
_mkdir_p(dst_dir)
# Copy, rewrite and move project files
for filename in filenames:
if filename in ['development.py', 'production.py']:
continue
src_file = os.path.join(src_dir, filename)
dst_file = os.path.join(dst_dir, filename)
if filename.endswith(REWRITE_FILE_EXTS):
_rewrite_and_copy(src_file, dst_file, project_name)
else:
shutil.copy(src_file, dst_file)
logger.info("New: %s" % dst_file)
if filename in ['development_sample.py', 'production_sample.py']:
dst_file = os.path.join(dst_dir, "%s.py" % filename.split('_')[0])
_rewrite_and_copy(src_file, dst_file, project_name)
logger.info("New: %s" % dst_file)
logger.info('Finish generating project files.')
|
[
"def",
"generate_project",
"(",
"args",
")",
":",
"# Project templates path",
"src",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dirname",
"(",
"abspath",
"(",
"__file__",
")",
")",
",",
"'project'",
")",
"project_name",
"=",
"args",
".",
"get",
"(",
"'<project>'",
")",
"if",
"not",
"project_name",
":",
"logger",
".",
"warning",
"(",
"'Project name cannot be empty.'",
")",
"return",
"# Destination project path",
"dst",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"project_name",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"dst",
")",
":",
"logger",
".",
"warning",
"(",
"'Project directory already exists.'",
")",
"return",
"logger",
".",
"info",
"(",
"'Start generating project files.'",
")",
"_mkdir_p",
"(",
"dst",
")",
"for",
"src_dir",
",",
"sub_dirs",
",",
"filenames",
"in",
"os",
".",
"walk",
"(",
"src",
")",
":",
"# Build and create destination directory path",
"relative_path",
"=",
"src_dir",
".",
"split",
"(",
"src",
")",
"[",
"1",
"]",
".",
"lstrip",
"(",
"os",
".",
"path",
".",
"sep",
")",
"dst_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dst",
",",
"relative_path",
")",
"if",
"src",
"!=",
"src_dir",
":",
"_mkdir_p",
"(",
"dst_dir",
")",
"# Copy, rewrite and move project files",
"for",
"filename",
"in",
"filenames",
":",
"if",
"filename",
"in",
"[",
"'development.py'",
",",
"'production.py'",
"]",
":",
"continue",
"src_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"src_dir",
",",
"filename",
")",
"dst_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dst_dir",
",",
"filename",
")",
"if",
"filename",
".",
"endswith",
"(",
"REWRITE_FILE_EXTS",
")",
":",
"_rewrite_and_copy",
"(",
"src_file",
",",
"dst_file",
",",
"project_name",
")",
"else",
":",
"shutil",
".",
"copy",
"(",
"src_file",
",",
"dst_file",
")",
"logger",
".",
"info",
"(",
"\"New: %s\"",
"%",
"dst_file",
")",
"if",
"filename",
"in",
"[",
"'development_sample.py'",
",",
"'production_sample.py'",
"]",
":",
"dst_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dst_dir",
",",
"\"%s.py\"",
"%",
"filename",
".",
"split",
"(",
"'_'",
")",
"[",
"0",
"]",
")",
"_rewrite_and_copy",
"(",
"src_file",
",",
"dst_file",
",",
"project_name",
")",
"logger",
".",
"info",
"(",
"\"New: %s\"",
"%",
"dst_file",
")",
"logger",
".",
"info",
"(",
"'Finish generating project files.'",
")"
] |
New project.
|
[
"New",
"project",
"."
] |
python
|
test
|
bcbio/bcbio-nextgen
|
bcbio/structural/cnvkit.py
|
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/cnvkit.py#L490-L503
|
def _add_seg_to_output(out, data, enumerate_chroms=False):
"""Export outputs to 'seg' format compatible with IGV and GenePattern.
"""
out_file = "%s.seg" % os.path.splitext(out["cns"])[0]
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "export",
"seg"]
if enumerate_chroms:
cmd += ["--enumerate-chroms"]
cmd += ["-o", tx_out_file, out["cns"]]
do.run(cmd, "CNVkit export seg")
out["seg"] = out_file
return out
|
[
"def",
"_add_seg_to_output",
"(",
"out",
",",
"data",
",",
"enumerate_chroms",
"=",
"False",
")",
":",
"out_file",
"=",
"\"%s.seg\"",
"%",
"os",
".",
"path",
".",
"splitext",
"(",
"out",
"[",
"\"cns\"",
"]",
")",
"[",
"0",
"]",
"if",
"not",
"utils",
".",
"file_exists",
"(",
"out_file",
")",
":",
"with",
"file_transaction",
"(",
"data",
",",
"out_file",
")",
"as",
"tx_out_file",
":",
"cmd",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"sys",
".",
"executable",
")",
",",
"\"cnvkit.py\"",
")",
",",
"\"export\"",
",",
"\"seg\"",
"]",
"if",
"enumerate_chroms",
":",
"cmd",
"+=",
"[",
"\"--enumerate-chroms\"",
"]",
"cmd",
"+=",
"[",
"\"-o\"",
",",
"tx_out_file",
",",
"out",
"[",
"\"cns\"",
"]",
"]",
"do",
".",
"run",
"(",
"cmd",
",",
"\"CNVkit export seg\"",
")",
"out",
"[",
"\"seg\"",
"]",
"=",
"out_file",
"return",
"out"
] |
Export outputs to 'seg' format compatible with IGV and GenePattern.
|
[
"Export",
"outputs",
"to",
"seg",
"format",
"compatible",
"with",
"IGV",
"and",
"GenePattern",
"."
] |
python
|
train
|
pebble/libpebble2
|
libpebble2/protocol/base/types.py
|
https://github.com/pebble/libpebble2/blob/23e2eb92cfc084e6f9e8c718711ac994ef606d18/libpebble2/protocol/base/types.py#L43-L71
|
def buffer_to_value(self, obj, buffer, offset, default_endianness=DEFAULT_ENDIANNESS):
"""
Converts the bytes in ``buffer`` at ``offset`` to a native Python value. Returns that value and the number of
bytes consumed to create it.
:param obj: The parent :class:`.PebblePacket` of this field
:type obj: .PebblePacket
:param buffer: The buffer from which to extract a value.
:type buffer: bytes
:param offset: The offset in the buffer to start at.
:type offset: int
:param default_endianness: The default endianness of the value. Used if ``endianness`` was not passed to the
:class:`Field` constructor.
:type default_endianness: str
:return: (value, length)
:rtype: (:class:`object`, :any:`int`)
"""
try:
value, length = struct.unpack_from(str(self.endianness or default_endianness)
+ self.struct_format, buffer, offset)[0], struct.calcsize(self.struct_format)
if self._enum is not None:
try:
return self._enum(value), length
except ValueError as e:
raise PacketDecodeError("{}: {}".format(self.type, e))
else:
return value, length
except struct.error as e:
raise PacketDecodeError("{}: {}".format(self.type, e))
|
[
"def",
"buffer_to_value",
"(",
"self",
",",
"obj",
",",
"buffer",
",",
"offset",
",",
"default_endianness",
"=",
"DEFAULT_ENDIANNESS",
")",
":",
"try",
":",
"value",
",",
"length",
"=",
"struct",
".",
"unpack_from",
"(",
"str",
"(",
"self",
".",
"endianness",
"or",
"default_endianness",
")",
"+",
"self",
".",
"struct_format",
",",
"buffer",
",",
"offset",
")",
"[",
"0",
"]",
",",
"struct",
".",
"calcsize",
"(",
"self",
".",
"struct_format",
")",
"if",
"self",
".",
"_enum",
"is",
"not",
"None",
":",
"try",
":",
"return",
"self",
".",
"_enum",
"(",
"value",
")",
",",
"length",
"except",
"ValueError",
"as",
"e",
":",
"raise",
"PacketDecodeError",
"(",
"\"{}: {}\"",
".",
"format",
"(",
"self",
".",
"type",
",",
"e",
")",
")",
"else",
":",
"return",
"value",
",",
"length",
"except",
"struct",
".",
"error",
"as",
"e",
":",
"raise",
"PacketDecodeError",
"(",
"\"{}: {}\"",
".",
"format",
"(",
"self",
".",
"type",
",",
"e",
")",
")"
] |
Converts the bytes in ``buffer`` at ``offset`` to a native Python value. Returns that value and the number of
bytes consumed to create it.
:param obj: The parent :class:`.PebblePacket` of this field
:type obj: .PebblePacket
:param buffer: The buffer from which to extract a value.
:type buffer: bytes
:param offset: The offset in the buffer to start at.
:type offset: int
:param default_endianness: The default endianness of the value. Used if ``endianness`` was not passed to the
:class:`Field` constructor.
:type default_endianness: str
:return: (value, length)
:rtype: (:class:`object`, :any:`int`)
|
[
"Converts",
"the",
"bytes",
"in",
"buffer",
"at",
"offset",
"to",
"a",
"native",
"Python",
"value",
".",
"Returns",
"that",
"value",
"and",
"the",
"number",
"of",
"bytes",
"consumed",
"to",
"create",
"it",
"."
] |
python
|
train
|
StackStorm/pybind
|
pybind/nos/v6_0_2f/logging/__init__.py
|
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/logging/__init__.py#L232-L253
|
def _set_syslog_client(self, v, load=False):
"""
Setter method for syslog_client, mapped from YANG variable /logging/syslog_client (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_syslog_client is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_syslog_client() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=syslog_client.syslog_client, is_container='container', presence=False, yang_name="syslog-client", rest_name="syslog-client", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Syslog Client configurations', u'callpoint': u'RASSysFcCallPoint'}}, namespace='urn:brocade.com:mgmt:brocade-ras', defining_module='brocade-ras', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """syslog_client must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=syslog_client.syslog_client, is_container='container', presence=False, yang_name="syslog-client", rest_name="syslog-client", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Syslog Client configurations', u'callpoint': u'RASSysFcCallPoint'}}, namespace='urn:brocade.com:mgmt:brocade-ras', defining_module='brocade-ras', yang_type='container', is_config=True)""",
})
self.__syslog_client = t
if hasattr(self, '_set'):
self._set()
|
[
"def",
"_set_syslog_client",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"syslog_client",
".",
"syslog_client",
",",
"is_container",
"=",
"'container'",
",",
"presence",
"=",
"False",
",",
"yang_name",
"=",
"\"syslog-client\"",
",",
"rest_name",
"=",
"\"syslog-client\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"True",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'info'",
":",
"u'Syslog Client configurations'",
",",
"u'callpoint'",
":",
"u'RASSysFcCallPoint'",
"}",
"}",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-ras'",
",",
"defining_module",
"=",
"'brocade-ras'",
",",
"yang_type",
"=",
"'container'",
",",
"is_config",
"=",
"True",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"syslog_client must be of a type compatible with container\"\"\"",
",",
"'defined-type'",
":",
"\"container\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=syslog_client.syslog_client, is_container='container', presence=False, yang_name=\"syslog-client\", rest_name=\"syslog-client\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Syslog Client configurations', u'callpoint': u'RASSysFcCallPoint'}}, namespace='urn:brocade.com:mgmt:brocade-ras', defining_module='brocade-ras', yang_type='container', is_config=True)\"\"\"",
",",
"}",
")",
"self",
".",
"__syslog_client",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
] |
Setter method for syslog_client, mapped from YANG variable /logging/syslog_client (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_syslog_client is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_syslog_client() directly.
|
[
"Setter",
"method",
"for",
"syslog_client",
"mapped",
"from",
"YANG",
"variable",
"/",
"logging",
"/",
"syslog_client",
"(",
"container",
")",
"If",
"this",
"variable",
"is",
"read",
"-",
"only",
"(",
"config",
":",
"false",
")",
"in",
"the",
"source",
"YANG",
"file",
"then",
"_set_syslog_client",
"is",
"considered",
"as",
"a",
"private",
"method",
".",
"Backends",
"looking",
"to",
"populate",
"this",
"variable",
"should",
"do",
"so",
"via",
"calling",
"thisObj",
".",
"_set_syslog_client",
"()",
"directly",
"."
] |
python
|
train
|
Clinical-Genomics/scout
|
scout/server/blueprints/cases/controllers.py
|
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/server/blueprints/cases/controllers.py#L167-L258
|
def case_report_content(store, institute_obj, case_obj):
"""Gather contents to be visualized in a case report
Args:
store(adapter.MongoAdapter)
institute_obj(models.Institute)
case_obj(models.Case)
Returns:
data(dict)
"""
variant_types = {
'causatives_detailed': 'causatives',
'suspects_detailed': 'suspects',
'classified_detailed': 'acmg_classification',
'tagged_detailed': 'manual_rank',
'dismissed_detailed': 'dismiss_variant',
'commented_detailed': 'is_commented',
}
data = case_obj
for individual in data['individuals']:
try:
sex = int(individual.get('sex', 0))
except ValueError as err:
sex = 0
individual['sex_human'] = SEX_MAP[sex]
individual['phenotype_human'] = PHENOTYPE_MAP.get(individual['phenotype'])
# Add the case comments
data['comments'] = store.events(institute_obj, case=case_obj, comments=True)
data['manual_rank_options'] = MANUAL_RANK_OPTIONS
data['dismissed_options'] = DISMISS_VARIANT_OPTIONS
data['genetic_models'] = dict(GENETIC_MODELS)
data['report_created_at'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
evaluated_variants = {}
for vt in variant_types:
evaluated_variants[vt] = []
# We collect all causatives and suspected variants
# These are handeled in separate since they are on case level
for var_type in ['causatives', 'suspects']:
#These include references to variants
vt = '_'.join([var_type, 'detailed'])
for var_id in case_obj.get(var_type,[]):
variant_obj = store.variant(var_id)
if not variant_obj:
continue
# If the variant exists we add it to the evaluated variants
evaluated_variants[vt].append(variant_obj)
## get variants for this case that are either classified, commented, tagged or dismissed.
for var_obj in store.evaluated_variants(case_id=case_obj['_id']):
# Check which category it belongs to
for vt in variant_types:
keyword = variant_types[vt]
# When found we add it to the categpry
# Eac variant can belong to multiple categories
if keyword in var_obj:
evaluated_variants[vt].append(var_obj)
for var_type in evaluated_variants:
decorated_variants = []
for var_obj in evaluated_variants[var_type]:
# We decorate the variant with some extra information
if var_obj['category'] == 'snv':
decorated_info = variant_decorator(
store=store,
institute_obj=institute_obj,
case_obj=case_obj,
variant_id=None,
variant_obj=var_obj,
add_case=False,
add_other=False,
get_overlapping=False
)
else:
decorated_info = sv_variant(
store=store,
institute_id=institute_obj['_id'],
case_name=case_obj['display_name'],
variant_obj=var_obj,
add_case=False,
get_overlapping=False
)
decorated_variants.append(decorated_info['variant'])
# Add the decorated variants to the case
data[var_type] = decorated_variants
return data
|
[
"def",
"case_report_content",
"(",
"store",
",",
"institute_obj",
",",
"case_obj",
")",
":",
"variant_types",
"=",
"{",
"'causatives_detailed'",
":",
"'causatives'",
",",
"'suspects_detailed'",
":",
"'suspects'",
",",
"'classified_detailed'",
":",
"'acmg_classification'",
",",
"'tagged_detailed'",
":",
"'manual_rank'",
",",
"'dismissed_detailed'",
":",
"'dismiss_variant'",
",",
"'commented_detailed'",
":",
"'is_commented'",
",",
"}",
"data",
"=",
"case_obj",
"for",
"individual",
"in",
"data",
"[",
"'individuals'",
"]",
":",
"try",
":",
"sex",
"=",
"int",
"(",
"individual",
".",
"get",
"(",
"'sex'",
",",
"0",
")",
")",
"except",
"ValueError",
"as",
"err",
":",
"sex",
"=",
"0",
"individual",
"[",
"'sex_human'",
"]",
"=",
"SEX_MAP",
"[",
"sex",
"]",
"individual",
"[",
"'phenotype_human'",
"]",
"=",
"PHENOTYPE_MAP",
".",
"get",
"(",
"individual",
"[",
"'phenotype'",
"]",
")",
"# Add the case comments",
"data",
"[",
"'comments'",
"]",
"=",
"store",
".",
"events",
"(",
"institute_obj",
",",
"case",
"=",
"case_obj",
",",
"comments",
"=",
"True",
")",
"data",
"[",
"'manual_rank_options'",
"]",
"=",
"MANUAL_RANK_OPTIONS",
"data",
"[",
"'dismissed_options'",
"]",
"=",
"DISMISS_VARIANT_OPTIONS",
"data",
"[",
"'genetic_models'",
"]",
"=",
"dict",
"(",
"GENETIC_MODELS",
")",
"data",
"[",
"'report_created_at'",
"]",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
".",
"strftime",
"(",
"\"%Y-%m-%d %H:%M\"",
")",
"evaluated_variants",
"=",
"{",
"}",
"for",
"vt",
"in",
"variant_types",
":",
"evaluated_variants",
"[",
"vt",
"]",
"=",
"[",
"]",
"# We collect all causatives and suspected variants",
"# These are handeled in separate since they are on case level",
"for",
"var_type",
"in",
"[",
"'causatives'",
",",
"'suspects'",
"]",
":",
"#These include references to variants",
"vt",
"=",
"'_'",
".",
"join",
"(",
"[",
"var_type",
",",
"'detailed'",
"]",
")",
"for",
"var_id",
"in",
"case_obj",
".",
"get",
"(",
"var_type",
",",
"[",
"]",
")",
":",
"variant_obj",
"=",
"store",
".",
"variant",
"(",
"var_id",
")",
"if",
"not",
"variant_obj",
":",
"continue",
"# If the variant exists we add it to the evaluated variants",
"evaluated_variants",
"[",
"vt",
"]",
".",
"append",
"(",
"variant_obj",
")",
"## get variants for this case that are either classified, commented, tagged or dismissed.",
"for",
"var_obj",
"in",
"store",
".",
"evaluated_variants",
"(",
"case_id",
"=",
"case_obj",
"[",
"'_id'",
"]",
")",
":",
"# Check which category it belongs to",
"for",
"vt",
"in",
"variant_types",
":",
"keyword",
"=",
"variant_types",
"[",
"vt",
"]",
"# When found we add it to the categpry",
"# Eac variant can belong to multiple categories",
"if",
"keyword",
"in",
"var_obj",
":",
"evaluated_variants",
"[",
"vt",
"]",
".",
"append",
"(",
"var_obj",
")",
"for",
"var_type",
"in",
"evaluated_variants",
":",
"decorated_variants",
"=",
"[",
"]",
"for",
"var_obj",
"in",
"evaluated_variants",
"[",
"var_type",
"]",
":",
"# We decorate the variant with some extra information",
"if",
"var_obj",
"[",
"'category'",
"]",
"==",
"'snv'",
":",
"decorated_info",
"=",
"variant_decorator",
"(",
"store",
"=",
"store",
",",
"institute_obj",
"=",
"institute_obj",
",",
"case_obj",
"=",
"case_obj",
",",
"variant_id",
"=",
"None",
",",
"variant_obj",
"=",
"var_obj",
",",
"add_case",
"=",
"False",
",",
"add_other",
"=",
"False",
",",
"get_overlapping",
"=",
"False",
")",
"else",
":",
"decorated_info",
"=",
"sv_variant",
"(",
"store",
"=",
"store",
",",
"institute_id",
"=",
"institute_obj",
"[",
"'_id'",
"]",
",",
"case_name",
"=",
"case_obj",
"[",
"'display_name'",
"]",
",",
"variant_obj",
"=",
"var_obj",
",",
"add_case",
"=",
"False",
",",
"get_overlapping",
"=",
"False",
")",
"decorated_variants",
".",
"append",
"(",
"decorated_info",
"[",
"'variant'",
"]",
")",
"# Add the decorated variants to the case",
"data",
"[",
"var_type",
"]",
"=",
"decorated_variants",
"return",
"data"
] |
Gather contents to be visualized in a case report
Args:
store(adapter.MongoAdapter)
institute_obj(models.Institute)
case_obj(models.Case)
Returns:
data(dict)
|
[
"Gather",
"contents",
"to",
"be",
"visualized",
"in",
"a",
"case",
"report"
] |
python
|
test
|
fprimex/zdesk
|
zdesk/zdesk_api.py
|
https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L473-L477
|
def brand_delete(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/brands#delete-a-brand"
api_path = "/api/v2/brands/{id}.json"
api_path = api_path.format(id=id)
return self.call(api_path, method="DELETE", **kwargs)
|
[
"def",
"brand_delete",
"(",
"self",
",",
"id",
",",
"*",
"*",
"kwargs",
")",
":",
"api_path",
"=",
"\"/api/v2/brands/{id}.json\"",
"api_path",
"=",
"api_path",
".",
"format",
"(",
"id",
"=",
"id",
")",
"return",
"self",
".",
"call",
"(",
"api_path",
",",
"method",
"=",
"\"DELETE\"",
",",
"*",
"*",
"kwargs",
")"
] |
https://developer.zendesk.com/rest_api/docs/core/brands#delete-a-brand
|
[
"https",
":",
"//",
"developer",
".",
"zendesk",
".",
"com",
"/",
"rest_api",
"/",
"docs",
"/",
"core",
"/",
"brands#delete",
"-",
"a",
"-",
"brand"
] |
python
|
train
|
barrust/mediawiki
|
mediawiki/mediawiki.py
|
https://github.com/barrust/mediawiki/blob/292e0be6c752409062dceed325d74839caf16a9b/mediawiki/mediawiki.py#L371-L389
|
def random(self, pages=1):
""" Request a random page title or list of random titles
Args:
pages (int): Number of random pages to return
Returns:
list or int: A list of random page titles or a random page \
title if pages = 1 """
if pages is None or pages < 1:
raise ValueError("Number of pages must be greater than 0")
query_params = {"list": "random", "rnnamespace": 0, "rnlimit": pages}
request = self.wiki_request(query_params)
titles = [page["title"] for page in request["query"]["random"]]
if len(titles) == 1:
return titles[0]
return titles
|
[
"def",
"random",
"(",
"self",
",",
"pages",
"=",
"1",
")",
":",
"if",
"pages",
"is",
"None",
"or",
"pages",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"\"Number of pages must be greater than 0\"",
")",
"query_params",
"=",
"{",
"\"list\"",
":",
"\"random\"",
",",
"\"rnnamespace\"",
":",
"0",
",",
"\"rnlimit\"",
":",
"pages",
"}",
"request",
"=",
"self",
".",
"wiki_request",
"(",
"query_params",
")",
"titles",
"=",
"[",
"page",
"[",
"\"title\"",
"]",
"for",
"page",
"in",
"request",
"[",
"\"query\"",
"]",
"[",
"\"random\"",
"]",
"]",
"if",
"len",
"(",
"titles",
")",
"==",
"1",
":",
"return",
"titles",
"[",
"0",
"]",
"return",
"titles"
] |
Request a random page title or list of random titles
Args:
pages (int): Number of random pages to return
Returns:
list or int: A list of random page titles or a random page \
title if pages = 1
|
[
"Request",
"a",
"random",
"page",
"title",
"or",
"list",
"of",
"random",
"titles"
] |
python
|
train
|
rootpy/rootpy
|
rootpy/plotting/graph.py
|
https://github.com/rootpy/rootpy/blob/3926935e1f2100d8ba68070c2ab44055d4800f73/rootpy/plotting/graph.py#L620-L638
|
def Append(self, other):
"""
Append points from another graph
"""
orig_len = len(self)
self.Set(orig_len + len(other))
ipoint = orig_len
if hasattr(self, 'SetPointError'):
for point in other:
self.SetPoint(ipoint, point.x.value, point.y.value)
self.SetPointError(
ipoint,
point.x.error_low, point.x.error_hi,
point.y.error_low, point.y.error_hi)
ipoint += 1
else:
for point in other:
self.SetPoint(ipoint, point.x.value, point.y.value)
ipoint += 1
|
[
"def",
"Append",
"(",
"self",
",",
"other",
")",
":",
"orig_len",
"=",
"len",
"(",
"self",
")",
"self",
".",
"Set",
"(",
"orig_len",
"+",
"len",
"(",
"other",
")",
")",
"ipoint",
"=",
"orig_len",
"if",
"hasattr",
"(",
"self",
",",
"'SetPointError'",
")",
":",
"for",
"point",
"in",
"other",
":",
"self",
".",
"SetPoint",
"(",
"ipoint",
",",
"point",
".",
"x",
".",
"value",
",",
"point",
".",
"y",
".",
"value",
")",
"self",
".",
"SetPointError",
"(",
"ipoint",
",",
"point",
".",
"x",
".",
"error_low",
",",
"point",
".",
"x",
".",
"error_hi",
",",
"point",
".",
"y",
".",
"error_low",
",",
"point",
".",
"y",
".",
"error_hi",
")",
"ipoint",
"+=",
"1",
"else",
":",
"for",
"point",
"in",
"other",
":",
"self",
".",
"SetPoint",
"(",
"ipoint",
",",
"point",
".",
"x",
".",
"value",
",",
"point",
".",
"y",
".",
"value",
")",
"ipoint",
"+=",
"1"
] |
Append points from another graph
|
[
"Append",
"points",
"from",
"another",
"graph"
] |
python
|
train
|
Erotemic/utool
|
utool/experimental/euler_tour_tree_avl.py
|
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/experimental/euler_tour_tree_avl.py#L834-L846
|
def avl_new_top(t1, t2, top, direction=0):
"""
if direction == 0:
(t1, t2) is (left, right)
if direction == 1:
(t1, t2) is (right, left)
"""
top.parent = None
assert top.parent is None, str(top.parent.value)
top.set_child(direction, t1)
top.set_child(1 - direction, t2)
top.balance = max(height(t1), height(t2)) + 1
return top
|
[
"def",
"avl_new_top",
"(",
"t1",
",",
"t2",
",",
"top",
",",
"direction",
"=",
"0",
")",
":",
"top",
".",
"parent",
"=",
"None",
"assert",
"top",
".",
"parent",
"is",
"None",
",",
"str",
"(",
"top",
".",
"parent",
".",
"value",
")",
"top",
".",
"set_child",
"(",
"direction",
",",
"t1",
")",
"top",
".",
"set_child",
"(",
"1",
"-",
"direction",
",",
"t2",
")",
"top",
".",
"balance",
"=",
"max",
"(",
"height",
"(",
"t1",
")",
",",
"height",
"(",
"t2",
")",
")",
"+",
"1",
"return",
"top"
] |
if direction == 0:
(t1, t2) is (left, right)
if direction == 1:
(t1, t2) is (right, left)
|
[
"if",
"direction",
"==",
"0",
":",
"(",
"t1",
"t2",
")",
"is",
"(",
"left",
"right",
")",
"if",
"direction",
"==",
"1",
":",
"(",
"t1",
"t2",
")",
"is",
"(",
"right",
"left",
")"
] |
python
|
train
|
secdev/scapy
|
scapy/modules/p0f.py
|
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/modules/p0f.py#L363-L566
|
def p0f_impersonate(pkt, osgenre=None, osdetails=None, signature=None,
extrahops=0, mtu=1500, uptime=None):
"""Modifies pkt so that p0f will think it has been sent by a
specific OS. If osdetails is None, then we randomly pick up a
personality matching osgenre. If osgenre and signature are also None,
we use a local signature (using p0f_getlocalsigs). If signature is
specified (as a tuple), we use the signature.
For now, only TCP Syn packets are supported.
Some specifications of the p0f.fp file are not (yet) implemented."""
pkt = pkt.copy()
# pkt = pkt.__class__(raw(pkt))
while pkt.haslayer(IP) and pkt.haslayer(TCP):
pkt = pkt.getlayer(IP)
if isinstance(pkt.payload, TCP):
break
pkt = pkt.payload
if not isinstance(pkt, IP) or not isinstance(pkt.payload, TCP):
raise TypeError("Not a TCP/IP packet")
db = p0f_selectdb(pkt.payload.flags)
if osgenre:
pb = db.get_base()
if pb is None:
pb = []
pb = [x for x in pb if x[6] == osgenre]
if osdetails:
pb = [x for x in pb if x[7] == osdetails]
elif signature:
pb = [signature]
else:
pb = p0f_getlocalsigs()[db]
if db == p0fr_kdb:
# 'K' quirk <=> RST+ACK
if pkt.payload.flags & 0x4 == 0x4:
pb = [x for x in pb if 'K' in x[5]]
else:
pb = [x for x in pb if 'K' not in x[5]]
if not pb:
raise Scapy_Exception("No match in the p0f database")
pers = pb[random.randint(0, len(pb) - 1)]
# options (we start with options because of MSS)
# Take the options already set as "hints" to use in the new packet if we
# can. MSS, WScale and Timestamp can all be wildcarded in a signature, so
# we'll use the already-set values if they're valid integers.
orig_opts = dict(pkt.payload.options)
int_only = lambda val: val if isinstance(val, six.integer_types) else None
mss_hint = int_only(orig_opts.get('MSS'))
wscale_hint = int_only(orig_opts.get('WScale'))
ts_hint = [int_only(o) for o in orig_opts.get('Timestamp', (None, None))]
options = []
if pers[4] != '.':
for opt in pers[4].split(','):
if opt[0] == 'M':
# MSS might have a maximum size because of window size
# specification
if pers[0][0] == 'S':
maxmss = (2**16 - 1) // int(pers[0][1:])
else:
maxmss = (2**16 - 1)
# disregard hint if out of range
if mss_hint and not 0 <= mss_hint <= maxmss:
mss_hint = None
# If we have to randomly pick up a value, we cannot use
# scapy RandXXX() functions, because the value has to be
# set in case we need it for the window size value. That's
# why we use random.randint()
if opt[1:] == '*':
if mss_hint is not None:
options.append(('MSS', mss_hint))
else:
options.append(('MSS', random.randint(1, maxmss)))
elif opt[1] == '%':
coef = int(opt[2:])
if mss_hint is not None and mss_hint % coef == 0:
options.append(('MSS', mss_hint))
else:
options.append((
'MSS', coef * random.randint(1, maxmss // coef)))
else:
options.append(('MSS', int(opt[1:])))
elif opt[0] == 'W':
if wscale_hint and not 0 <= wscale_hint < 2**8:
wscale_hint = None
if opt[1:] == '*':
if wscale_hint is not None:
options.append(('WScale', wscale_hint))
else:
options.append(('WScale', RandByte()))
elif opt[1] == '%':
coef = int(opt[2:])
if wscale_hint is not None and wscale_hint % coef == 0:
options.append(('WScale', wscale_hint))
else:
options.append((
'WScale', coef * RandNum(min=1, max=(2**8 - 1) // coef))) # noqa: E501
else:
options.append(('WScale', int(opt[1:])))
elif opt == 'T0':
options.append(('Timestamp', (0, 0)))
elif opt == 'T':
# Determine first timestamp.
if uptime is not None:
ts_a = uptime
elif ts_hint[0] and 0 < ts_hint[0] < 2**32:
# Note: if first ts is 0, p0f registers it as "T0" not "T",
# hence we don't want to use the hint if it was 0.
ts_a = ts_hint[0]
else:
ts_a = random.randint(120, 100 * 60 * 60 * 24 * 365)
# Determine second timestamp.
if 'T' not in pers[5]:
ts_b = 0
elif ts_hint[1] and 0 < ts_hint[1] < 2**32:
ts_b = ts_hint[1]
else:
# FIXME: RandInt() here does not work (bug (?) in
# TCPOptionsField.m2i often raises "OverflowError:
# long int too large to convert to int" in:
# oval = struct.pack(ofmt, *oval)"
# Actually, this is enough to often raise the error:
# struct.pack('I', RandInt())
ts_b = random.randint(1, 2**32 - 1)
options.append(('Timestamp', (ts_a, ts_b)))
elif opt == 'S':
options.append(('SAckOK', ''))
elif opt == 'N':
options.append(('NOP', None))
elif opt == 'E':
options.append(('EOL', None))
elif opt[0] == '?':
if int(opt[1:]) in TCPOptions[0]:
optname = TCPOptions[0][int(opt[1:])][0]
optstruct = TCPOptions[0][int(opt[1:])][1]
options.append((optname,
struct.unpack(optstruct,
RandString(struct.calcsize(optstruct))._fix()))) # noqa: E501
else:
options.append((int(opt[1:]), ''))
# FIXME: qqP not handled
else:
warning("unhandled TCP option " + opt)
pkt.payload.options = options
# window size
if pers[0] == '*':
pkt.payload.window = RandShort()
elif pers[0].isdigit():
pkt.payload.window = int(pers[0])
elif pers[0][0] == '%':
coef = int(pers[0][1:])
pkt.payload.window = coef * RandNum(min=1, max=(2**16 - 1) // coef)
elif pers[0][0] == 'T':
pkt.payload.window = mtu * int(pers[0][1:])
elif pers[0][0] == 'S':
# needs MSS set
mss = [x for x in options if x[0] == 'MSS']
if not mss:
raise Scapy_Exception("TCP window value requires MSS, and MSS option not set") # noqa: E501
pkt.payload.window = mss[0][1] * int(pers[0][1:])
else:
raise Scapy_Exception('Unhandled window size specification')
# ttl
pkt.ttl = pers[1] - extrahops
# DF flag
pkt.flags |= (2 * pers[2])
# FIXME: ss (packet size) not handled (how ? may be with D quirk
# if present)
# Quirks
if pers[5] != '.':
for qq in pers[5]:
# FIXME: not handled: P, I, X, !
# T handled with the Timestamp option
if qq == 'Z':
pkt.id = 0
elif qq == 'U':
pkt.payload.urgptr = RandShort()
elif qq == 'A':
pkt.payload.ack = RandInt()
elif qq == 'F':
if db == p0fo_kdb:
pkt.payload.flags |= 0x20 # U
else:
pkt.payload.flags |= random.choice([8, 32, 40]) # P/U/PU
elif qq == 'D' and db != p0fo_kdb:
pkt /= conf.raw_layer(load=RandString(random.randint(1, 10))) # XXX p0fo.fp # noqa: E501
elif qq == 'Q':
pkt.payload.seq = pkt.payload.ack
# elif qq == '0': pkt.payload.seq = 0
# if db == p0fr_kdb:
# '0' quirk is actually not only for p0fr.fp (see
# packet2p0f())
if '0' in pers[5]:
pkt.payload.seq = 0
elif pkt.payload.seq == 0:
pkt.payload.seq = RandInt()
while pkt.underlayer:
pkt = pkt.underlayer
return pkt
|
[
"def",
"p0f_impersonate",
"(",
"pkt",
",",
"osgenre",
"=",
"None",
",",
"osdetails",
"=",
"None",
",",
"signature",
"=",
"None",
",",
"extrahops",
"=",
"0",
",",
"mtu",
"=",
"1500",
",",
"uptime",
"=",
"None",
")",
":",
"pkt",
"=",
"pkt",
".",
"copy",
"(",
")",
"# pkt = pkt.__class__(raw(pkt))",
"while",
"pkt",
".",
"haslayer",
"(",
"IP",
")",
"and",
"pkt",
".",
"haslayer",
"(",
"TCP",
")",
":",
"pkt",
"=",
"pkt",
".",
"getlayer",
"(",
"IP",
")",
"if",
"isinstance",
"(",
"pkt",
".",
"payload",
",",
"TCP",
")",
":",
"break",
"pkt",
"=",
"pkt",
".",
"payload",
"if",
"not",
"isinstance",
"(",
"pkt",
",",
"IP",
")",
"or",
"not",
"isinstance",
"(",
"pkt",
".",
"payload",
",",
"TCP",
")",
":",
"raise",
"TypeError",
"(",
"\"Not a TCP/IP packet\"",
")",
"db",
"=",
"p0f_selectdb",
"(",
"pkt",
".",
"payload",
".",
"flags",
")",
"if",
"osgenre",
":",
"pb",
"=",
"db",
".",
"get_base",
"(",
")",
"if",
"pb",
"is",
"None",
":",
"pb",
"=",
"[",
"]",
"pb",
"=",
"[",
"x",
"for",
"x",
"in",
"pb",
"if",
"x",
"[",
"6",
"]",
"==",
"osgenre",
"]",
"if",
"osdetails",
":",
"pb",
"=",
"[",
"x",
"for",
"x",
"in",
"pb",
"if",
"x",
"[",
"7",
"]",
"==",
"osdetails",
"]",
"elif",
"signature",
":",
"pb",
"=",
"[",
"signature",
"]",
"else",
":",
"pb",
"=",
"p0f_getlocalsigs",
"(",
")",
"[",
"db",
"]",
"if",
"db",
"==",
"p0fr_kdb",
":",
"# 'K' quirk <=> RST+ACK",
"if",
"pkt",
".",
"payload",
".",
"flags",
"&",
"0x4",
"==",
"0x4",
":",
"pb",
"=",
"[",
"x",
"for",
"x",
"in",
"pb",
"if",
"'K'",
"in",
"x",
"[",
"5",
"]",
"]",
"else",
":",
"pb",
"=",
"[",
"x",
"for",
"x",
"in",
"pb",
"if",
"'K'",
"not",
"in",
"x",
"[",
"5",
"]",
"]",
"if",
"not",
"pb",
":",
"raise",
"Scapy_Exception",
"(",
"\"No match in the p0f database\"",
")",
"pers",
"=",
"pb",
"[",
"random",
".",
"randint",
"(",
"0",
",",
"len",
"(",
"pb",
")",
"-",
"1",
")",
"]",
"# options (we start with options because of MSS)",
"# Take the options already set as \"hints\" to use in the new packet if we",
"# can. MSS, WScale and Timestamp can all be wildcarded in a signature, so",
"# we'll use the already-set values if they're valid integers.",
"orig_opts",
"=",
"dict",
"(",
"pkt",
".",
"payload",
".",
"options",
")",
"int_only",
"=",
"lambda",
"val",
":",
"val",
"if",
"isinstance",
"(",
"val",
",",
"six",
".",
"integer_types",
")",
"else",
"None",
"mss_hint",
"=",
"int_only",
"(",
"orig_opts",
".",
"get",
"(",
"'MSS'",
")",
")",
"wscale_hint",
"=",
"int_only",
"(",
"orig_opts",
".",
"get",
"(",
"'WScale'",
")",
")",
"ts_hint",
"=",
"[",
"int_only",
"(",
"o",
")",
"for",
"o",
"in",
"orig_opts",
".",
"get",
"(",
"'Timestamp'",
",",
"(",
"None",
",",
"None",
")",
")",
"]",
"options",
"=",
"[",
"]",
"if",
"pers",
"[",
"4",
"]",
"!=",
"'.'",
":",
"for",
"opt",
"in",
"pers",
"[",
"4",
"]",
".",
"split",
"(",
"','",
")",
":",
"if",
"opt",
"[",
"0",
"]",
"==",
"'M'",
":",
"# MSS might have a maximum size because of window size",
"# specification",
"if",
"pers",
"[",
"0",
"]",
"[",
"0",
"]",
"==",
"'S'",
":",
"maxmss",
"=",
"(",
"2",
"**",
"16",
"-",
"1",
")",
"//",
"int",
"(",
"pers",
"[",
"0",
"]",
"[",
"1",
":",
"]",
")",
"else",
":",
"maxmss",
"=",
"(",
"2",
"**",
"16",
"-",
"1",
")",
"# disregard hint if out of range",
"if",
"mss_hint",
"and",
"not",
"0",
"<=",
"mss_hint",
"<=",
"maxmss",
":",
"mss_hint",
"=",
"None",
"# If we have to randomly pick up a value, we cannot use",
"# scapy RandXXX() functions, because the value has to be",
"# set in case we need it for the window size value. That's",
"# why we use random.randint()",
"if",
"opt",
"[",
"1",
":",
"]",
"==",
"'*'",
":",
"if",
"mss_hint",
"is",
"not",
"None",
":",
"options",
".",
"append",
"(",
"(",
"'MSS'",
",",
"mss_hint",
")",
")",
"else",
":",
"options",
".",
"append",
"(",
"(",
"'MSS'",
",",
"random",
".",
"randint",
"(",
"1",
",",
"maxmss",
")",
")",
")",
"elif",
"opt",
"[",
"1",
"]",
"==",
"'%'",
":",
"coef",
"=",
"int",
"(",
"opt",
"[",
"2",
":",
"]",
")",
"if",
"mss_hint",
"is",
"not",
"None",
"and",
"mss_hint",
"%",
"coef",
"==",
"0",
":",
"options",
".",
"append",
"(",
"(",
"'MSS'",
",",
"mss_hint",
")",
")",
"else",
":",
"options",
".",
"append",
"(",
"(",
"'MSS'",
",",
"coef",
"*",
"random",
".",
"randint",
"(",
"1",
",",
"maxmss",
"//",
"coef",
")",
")",
")",
"else",
":",
"options",
".",
"append",
"(",
"(",
"'MSS'",
",",
"int",
"(",
"opt",
"[",
"1",
":",
"]",
")",
")",
")",
"elif",
"opt",
"[",
"0",
"]",
"==",
"'W'",
":",
"if",
"wscale_hint",
"and",
"not",
"0",
"<=",
"wscale_hint",
"<",
"2",
"**",
"8",
":",
"wscale_hint",
"=",
"None",
"if",
"opt",
"[",
"1",
":",
"]",
"==",
"'*'",
":",
"if",
"wscale_hint",
"is",
"not",
"None",
":",
"options",
".",
"append",
"(",
"(",
"'WScale'",
",",
"wscale_hint",
")",
")",
"else",
":",
"options",
".",
"append",
"(",
"(",
"'WScale'",
",",
"RandByte",
"(",
")",
")",
")",
"elif",
"opt",
"[",
"1",
"]",
"==",
"'%'",
":",
"coef",
"=",
"int",
"(",
"opt",
"[",
"2",
":",
"]",
")",
"if",
"wscale_hint",
"is",
"not",
"None",
"and",
"wscale_hint",
"%",
"coef",
"==",
"0",
":",
"options",
".",
"append",
"(",
"(",
"'WScale'",
",",
"wscale_hint",
")",
")",
"else",
":",
"options",
".",
"append",
"(",
"(",
"'WScale'",
",",
"coef",
"*",
"RandNum",
"(",
"min",
"=",
"1",
",",
"max",
"=",
"(",
"2",
"**",
"8",
"-",
"1",
")",
"//",
"coef",
")",
")",
")",
"# noqa: E501",
"else",
":",
"options",
".",
"append",
"(",
"(",
"'WScale'",
",",
"int",
"(",
"opt",
"[",
"1",
":",
"]",
")",
")",
")",
"elif",
"opt",
"==",
"'T0'",
":",
"options",
".",
"append",
"(",
"(",
"'Timestamp'",
",",
"(",
"0",
",",
"0",
")",
")",
")",
"elif",
"opt",
"==",
"'T'",
":",
"# Determine first timestamp.",
"if",
"uptime",
"is",
"not",
"None",
":",
"ts_a",
"=",
"uptime",
"elif",
"ts_hint",
"[",
"0",
"]",
"and",
"0",
"<",
"ts_hint",
"[",
"0",
"]",
"<",
"2",
"**",
"32",
":",
"# Note: if first ts is 0, p0f registers it as \"T0\" not \"T\",",
"# hence we don't want to use the hint if it was 0.",
"ts_a",
"=",
"ts_hint",
"[",
"0",
"]",
"else",
":",
"ts_a",
"=",
"random",
".",
"randint",
"(",
"120",
",",
"100",
"*",
"60",
"*",
"60",
"*",
"24",
"*",
"365",
")",
"# Determine second timestamp.",
"if",
"'T'",
"not",
"in",
"pers",
"[",
"5",
"]",
":",
"ts_b",
"=",
"0",
"elif",
"ts_hint",
"[",
"1",
"]",
"and",
"0",
"<",
"ts_hint",
"[",
"1",
"]",
"<",
"2",
"**",
"32",
":",
"ts_b",
"=",
"ts_hint",
"[",
"1",
"]",
"else",
":",
"# FIXME: RandInt() here does not work (bug (?) in",
"# TCPOptionsField.m2i often raises \"OverflowError:",
"# long int too large to convert to int\" in:",
"# oval = struct.pack(ofmt, *oval)\"",
"# Actually, this is enough to often raise the error:",
"# struct.pack('I', RandInt())",
"ts_b",
"=",
"random",
".",
"randint",
"(",
"1",
",",
"2",
"**",
"32",
"-",
"1",
")",
"options",
".",
"append",
"(",
"(",
"'Timestamp'",
",",
"(",
"ts_a",
",",
"ts_b",
")",
")",
")",
"elif",
"opt",
"==",
"'S'",
":",
"options",
".",
"append",
"(",
"(",
"'SAckOK'",
",",
"''",
")",
")",
"elif",
"opt",
"==",
"'N'",
":",
"options",
".",
"append",
"(",
"(",
"'NOP'",
",",
"None",
")",
")",
"elif",
"opt",
"==",
"'E'",
":",
"options",
".",
"append",
"(",
"(",
"'EOL'",
",",
"None",
")",
")",
"elif",
"opt",
"[",
"0",
"]",
"==",
"'?'",
":",
"if",
"int",
"(",
"opt",
"[",
"1",
":",
"]",
")",
"in",
"TCPOptions",
"[",
"0",
"]",
":",
"optname",
"=",
"TCPOptions",
"[",
"0",
"]",
"[",
"int",
"(",
"opt",
"[",
"1",
":",
"]",
")",
"]",
"[",
"0",
"]",
"optstruct",
"=",
"TCPOptions",
"[",
"0",
"]",
"[",
"int",
"(",
"opt",
"[",
"1",
":",
"]",
")",
"]",
"[",
"1",
"]",
"options",
".",
"append",
"(",
"(",
"optname",
",",
"struct",
".",
"unpack",
"(",
"optstruct",
",",
"RandString",
"(",
"struct",
".",
"calcsize",
"(",
"optstruct",
")",
")",
".",
"_fix",
"(",
")",
")",
")",
")",
"# noqa: E501",
"else",
":",
"options",
".",
"append",
"(",
"(",
"int",
"(",
"opt",
"[",
"1",
":",
"]",
")",
",",
"''",
")",
")",
"# FIXME: qqP not handled",
"else",
":",
"warning",
"(",
"\"unhandled TCP option \"",
"+",
"opt",
")",
"pkt",
".",
"payload",
".",
"options",
"=",
"options",
"# window size",
"if",
"pers",
"[",
"0",
"]",
"==",
"'*'",
":",
"pkt",
".",
"payload",
".",
"window",
"=",
"RandShort",
"(",
")",
"elif",
"pers",
"[",
"0",
"]",
".",
"isdigit",
"(",
")",
":",
"pkt",
".",
"payload",
".",
"window",
"=",
"int",
"(",
"pers",
"[",
"0",
"]",
")",
"elif",
"pers",
"[",
"0",
"]",
"[",
"0",
"]",
"==",
"'%'",
":",
"coef",
"=",
"int",
"(",
"pers",
"[",
"0",
"]",
"[",
"1",
":",
"]",
")",
"pkt",
".",
"payload",
".",
"window",
"=",
"coef",
"*",
"RandNum",
"(",
"min",
"=",
"1",
",",
"max",
"=",
"(",
"2",
"**",
"16",
"-",
"1",
")",
"//",
"coef",
")",
"elif",
"pers",
"[",
"0",
"]",
"[",
"0",
"]",
"==",
"'T'",
":",
"pkt",
".",
"payload",
".",
"window",
"=",
"mtu",
"*",
"int",
"(",
"pers",
"[",
"0",
"]",
"[",
"1",
":",
"]",
")",
"elif",
"pers",
"[",
"0",
"]",
"[",
"0",
"]",
"==",
"'S'",
":",
"# needs MSS set",
"mss",
"=",
"[",
"x",
"for",
"x",
"in",
"options",
"if",
"x",
"[",
"0",
"]",
"==",
"'MSS'",
"]",
"if",
"not",
"mss",
":",
"raise",
"Scapy_Exception",
"(",
"\"TCP window value requires MSS, and MSS option not set\"",
")",
"# noqa: E501",
"pkt",
".",
"payload",
".",
"window",
"=",
"mss",
"[",
"0",
"]",
"[",
"1",
"]",
"*",
"int",
"(",
"pers",
"[",
"0",
"]",
"[",
"1",
":",
"]",
")",
"else",
":",
"raise",
"Scapy_Exception",
"(",
"'Unhandled window size specification'",
")",
"# ttl",
"pkt",
".",
"ttl",
"=",
"pers",
"[",
"1",
"]",
"-",
"extrahops",
"# DF flag",
"pkt",
".",
"flags",
"|=",
"(",
"2",
"*",
"pers",
"[",
"2",
"]",
")",
"# FIXME: ss (packet size) not handled (how ? may be with D quirk",
"# if present)",
"# Quirks",
"if",
"pers",
"[",
"5",
"]",
"!=",
"'.'",
":",
"for",
"qq",
"in",
"pers",
"[",
"5",
"]",
":",
"# FIXME: not handled: P, I, X, !",
"# T handled with the Timestamp option",
"if",
"qq",
"==",
"'Z'",
":",
"pkt",
".",
"id",
"=",
"0",
"elif",
"qq",
"==",
"'U'",
":",
"pkt",
".",
"payload",
".",
"urgptr",
"=",
"RandShort",
"(",
")",
"elif",
"qq",
"==",
"'A'",
":",
"pkt",
".",
"payload",
".",
"ack",
"=",
"RandInt",
"(",
")",
"elif",
"qq",
"==",
"'F'",
":",
"if",
"db",
"==",
"p0fo_kdb",
":",
"pkt",
".",
"payload",
".",
"flags",
"|=",
"0x20",
"# U",
"else",
":",
"pkt",
".",
"payload",
".",
"flags",
"|=",
"random",
".",
"choice",
"(",
"[",
"8",
",",
"32",
",",
"40",
"]",
")",
"# P/U/PU",
"elif",
"qq",
"==",
"'D'",
"and",
"db",
"!=",
"p0fo_kdb",
":",
"pkt",
"/=",
"conf",
".",
"raw_layer",
"(",
"load",
"=",
"RandString",
"(",
"random",
".",
"randint",
"(",
"1",
",",
"10",
")",
")",
")",
"# XXX p0fo.fp # noqa: E501",
"elif",
"qq",
"==",
"'Q'",
":",
"pkt",
".",
"payload",
".",
"seq",
"=",
"pkt",
".",
"payload",
".",
"ack",
"# elif qq == '0': pkt.payload.seq = 0",
"# if db == p0fr_kdb:",
"# '0' quirk is actually not only for p0fr.fp (see",
"# packet2p0f())",
"if",
"'0'",
"in",
"pers",
"[",
"5",
"]",
":",
"pkt",
".",
"payload",
".",
"seq",
"=",
"0",
"elif",
"pkt",
".",
"payload",
".",
"seq",
"==",
"0",
":",
"pkt",
".",
"payload",
".",
"seq",
"=",
"RandInt",
"(",
")",
"while",
"pkt",
".",
"underlayer",
":",
"pkt",
"=",
"pkt",
".",
"underlayer",
"return",
"pkt"
] |
Modifies pkt so that p0f will think it has been sent by a
specific OS. If osdetails is None, then we randomly pick up a
personality matching osgenre. If osgenre and signature are also None,
we use a local signature (using p0f_getlocalsigs). If signature is
specified (as a tuple), we use the signature.
For now, only TCP Syn packets are supported.
Some specifications of the p0f.fp file are not (yet) implemented.
|
[
"Modifies",
"pkt",
"so",
"that",
"p0f",
"will",
"think",
"it",
"has",
"been",
"sent",
"by",
"a",
"specific",
"OS",
".",
"If",
"osdetails",
"is",
"None",
"then",
"we",
"randomly",
"pick",
"up",
"a",
"personality",
"matching",
"osgenre",
".",
"If",
"osgenre",
"and",
"signature",
"are",
"also",
"None",
"we",
"use",
"a",
"local",
"signature",
"(",
"using",
"p0f_getlocalsigs",
")",
".",
"If",
"signature",
"is",
"specified",
"(",
"as",
"a",
"tuple",
")",
"we",
"use",
"the",
"signature",
"."
] |
python
|
train
|
gwastro/pycbc
|
pycbc/workflow/jobsetup.py
|
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/workflow/jobsetup.py#L506-L518
|
def pick_tile_size(self, seg_size, data_lengths, valid_chunks, valid_lengths):
""" Choose job tiles size based on science segment length """
if len(valid_lengths) == 1:
return data_lengths[0], valid_chunks[0], valid_lengths[0]
else:
# Pick the tile size that is closest to 1/3 of the science segment
target_size = seg_size / 3
pick, pick_diff = 0, abs(valid_lengths[0] - target_size)
for i, size in enumerate(valid_lengths):
if abs(size - target_size) < pick_diff:
pick, pick_diff = i, abs(size - target_size)
return data_lengths[pick], valid_chunks[pick], valid_lengths[pick]
|
[
"def",
"pick_tile_size",
"(",
"self",
",",
"seg_size",
",",
"data_lengths",
",",
"valid_chunks",
",",
"valid_lengths",
")",
":",
"if",
"len",
"(",
"valid_lengths",
")",
"==",
"1",
":",
"return",
"data_lengths",
"[",
"0",
"]",
",",
"valid_chunks",
"[",
"0",
"]",
",",
"valid_lengths",
"[",
"0",
"]",
"else",
":",
"# Pick the tile size that is closest to 1/3 of the science segment",
"target_size",
"=",
"seg_size",
"/",
"3",
"pick",
",",
"pick_diff",
"=",
"0",
",",
"abs",
"(",
"valid_lengths",
"[",
"0",
"]",
"-",
"target_size",
")",
"for",
"i",
",",
"size",
"in",
"enumerate",
"(",
"valid_lengths",
")",
":",
"if",
"abs",
"(",
"size",
"-",
"target_size",
")",
"<",
"pick_diff",
":",
"pick",
",",
"pick_diff",
"=",
"i",
",",
"abs",
"(",
"size",
"-",
"target_size",
")",
"return",
"data_lengths",
"[",
"pick",
"]",
",",
"valid_chunks",
"[",
"pick",
"]",
",",
"valid_lengths",
"[",
"pick",
"]"
] |
Choose job tiles size based on science segment length
|
[
"Choose",
"job",
"tiles",
"size",
"based",
"on",
"science",
"segment",
"length"
] |
python
|
train
|
OpenKMIP/PyKMIP
|
kmip/core/objects.py
|
https://github.com/OpenKMIP/PyKMIP/blob/b51c5b044bd05f8c85a1d65d13a583a4d8fc1b0e/kmip/core/objects.py#L250-L310
|
def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_2_0):
"""
Read the data stream and decode the AttributeReference structure into
its parts.
Args:
input_buffer (stream): A data stream containing encoded object
data, supporting a read method.
kmip_version (enum): A KMIPVersion enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 2.0.
Raises:
InvalidKmipEncoding: Raised if the vendor identification or
attribute name is missing from the encoding.
VersionNotSupported: Raised when a KMIP version is provided that
does not support the AttributeReference structure.
"""
if kmip_version < enums.KMIPVersion.KMIP_2_0:
raise exceptions.VersionNotSupported(
"KMIP {} does not support the AttributeReference "
"object.".format(
kmip_version.value
)
)
super(AttributeReference, self).read(
input_buffer,
kmip_version=kmip_version
)
local_buffer = BytearrayStream(input_buffer.read(self.length))
if self.is_tag_next(enums.Tags.VENDOR_IDENTIFICATION, local_buffer):
self._vendor_identification = primitives.TextString(
tag=enums.Tags.VENDOR_IDENTIFICATION
)
self._vendor_identification.read(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidKmipEncoding(
"The AttributeReference encoding is missing the vendor "
"identification string."
)
if self.is_tag_next(enums.Tags.ATTRIBUTE_NAME, local_buffer):
self._attribute_name = primitives.TextString(
tag=enums.Tags.ATTRIBUTE_NAME
)
self._attribute_name.read(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidKmipEncoding(
"The AttributeReference encoding is missing the attribute "
"name string."
)
self.is_oversized(local_buffer)
|
[
"def",
"read",
"(",
"self",
",",
"input_buffer",
",",
"kmip_version",
"=",
"enums",
".",
"KMIPVersion",
".",
"KMIP_2_0",
")",
":",
"if",
"kmip_version",
"<",
"enums",
".",
"KMIPVersion",
".",
"KMIP_2_0",
":",
"raise",
"exceptions",
".",
"VersionNotSupported",
"(",
"\"KMIP {} does not support the AttributeReference \"",
"\"object.\"",
".",
"format",
"(",
"kmip_version",
".",
"value",
")",
")",
"super",
"(",
"AttributeReference",
",",
"self",
")",
".",
"read",
"(",
"input_buffer",
",",
"kmip_version",
"=",
"kmip_version",
")",
"local_buffer",
"=",
"BytearrayStream",
"(",
"input_buffer",
".",
"read",
"(",
"self",
".",
"length",
")",
")",
"if",
"self",
".",
"is_tag_next",
"(",
"enums",
".",
"Tags",
".",
"VENDOR_IDENTIFICATION",
",",
"local_buffer",
")",
":",
"self",
".",
"_vendor_identification",
"=",
"primitives",
".",
"TextString",
"(",
"tag",
"=",
"enums",
".",
"Tags",
".",
"VENDOR_IDENTIFICATION",
")",
"self",
".",
"_vendor_identification",
".",
"read",
"(",
"local_buffer",
",",
"kmip_version",
"=",
"kmip_version",
")",
"else",
":",
"raise",
"exceptions",
".",
"InvalidKmipEncoding",
"(",
"\"The AttributeReference encoding is missing the vendor \"",
"\"identification string.\"",
")",
"if",
"self",
".",
"is_tag_next",
"(",
"enums",
".",
"Tags",
".",
"ATTRIBUTE_NAME",
",",
"local_buffer",
")",
":",
"self",
".",
"_attribute_name",
"=",
"primitives",
".",
"TextString",
"(",
"tag",
"=",
"enums",
".",
"Tags",
".",
"ATTRIBUTE_NAME",
")",
"self",
".",
"_attribute_name",
".",
"read",
"(",
"local_buffer",
",",
"kmip_version",
"=",
"kmip_version",
")",
"else",
":",
"raise",
"exceptions",
".",
"InvalidKmipEncoding",
"(",
"\"The AttributeReference encoding is missing the attribute \"",
"\"name string.\"",
")",
"self",
".",
"is_oversized",
"(",
"local_buffer",
")"
] |
Read the data stream and decode the AttributeReference structure into
its parts.
Args:
input_buffer (stream): A data stream containing encoded object
data, supporting a read method.
kmip_version (enum): A KMIPVersion enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 2.0.
Raises:
InvalidKmipEncoding: Raised if the vendor identification or
attribute name is missing from the encoding.
VersionNotSupported: Raised when a KMIP version is provided that
does not support the AttributeReference structure.
|
[
"Read",
"the",
"data",
"stream",
"and",
"decode",
"the",
"AttributeReference",
"structure",
"into",
"its",
"parts",
"."
] |
python
|
test
|
saulpw/visidata
|
visidata/vdtui.py
|
https://github.com/saulpw/visidata/blob/32771e0cea6c24fc7902683d14558391395c591f/visidata/vdtui.py#L2142-L2158
|
def getValue(self, row):
'Memoize calcValue with key id(row)'
if self._cachedValues is None:
return self.calcValue(row)
k = id(row)
if k in self._cachedValues:
return self._cachedValues[k]
ret = self.calcValue(row)
self._cachedValues[k] = ret
cachesize = options.col_cache_size
if cachesize > 0 and len(self._cachedValues) > cachesize:
self._cachedValues.popitem(last=False)
return ret
|
[
"def",
"getValue",
"(",
"self",
",",
"row",
")",
":",
"if",
"self",
".",
"_cachedValues",
"is",
"None",
":",
"return",
"self",
".",
"calcValue",
"(",
"row",
")",
"k",
"=",
"id",
"(",
"row",
")",
"if",
"k",
"in",
"self",
".",
"_cachedValues",
":",
"return",
"self",
".",
"_cachedValues",
"[",
"k",
"]",
"ret",
"=",
"self",
".",
"calcValue",
"(",
"row",
")",
"self",
".",
"_cachedValues",
"[",
"k",
"]",
"=",
"ret",
"cachesize",
"=",
"options",
".",
"col_cache_size",
"if",
"cachesize",
">",
"0",
"and",
"len",
"(",
"self",
".",
"_cachedValues",
")",
">",
"cachesize",
":",
"self",
".",
"_cachedValues",
".",
"popitem",
"(",
"last",
"=",
"False",
")",
"return",
"ret"
] |
Memoize calcValue with key id(row)
|
[
"Memoize",
"calcValue",
"with",
"key",
"id",
"(",
"row",
")"
] |
python
|
train
|
jwkvam/bowtie
|
bowtie/auth.py
|
https://github.com/jwkvam/bowtie/blob/c494850671ac805bf186fbf2bdb07d2a34ae876d/bowtie/auth.py#L85-L96
|
def before_request(self) -> Optional[Response]:
"""Determine if a user is allowed to view this route."""
auth = request.authorization
if not auth or not self._check_auth(auth.username, auth.password):
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'}
)
session['logged_in'] = auth.username
# pylint wants this return statement
return None
|
[
"def",
"before_request",
"(",
"self",
")",
"->",
"Optional",
"[",
"Response",
"]",
":",
"auth",
"=",
"request",
".",
"authorization",
"if",
"not",
"auth",
"or",
"not",
"self",
".",
"_check_auth",
"(",
"auth",
".",
"username",
",",
"auth",
".",
"password",
")",
":",
"return",
"Response",
"(",
"'Could not verify your access level for that URL.\\n'",
"'You have to login with proper credentials'",
",",
"401",
",",
"{",
"'WWW-Authenticate'",
":",
"'Basic realm=\"Login Required\"'",
"}",
")",
"session",
"[",
"'logged_in'",
"]",
"=",
"auth",
".",
"username",
"# pylint wants this return statement",
"return",
"None"
] |
Determine if a user is allowed to view this route.
|
[
"Determine",
"if",
"a",
"user",
"is",
"allowed",
"to",
"view",
"this",
"route",
"."
] |
python
|
train
|
konstantint/pyliftover
|
pyliftover/intervaltree.py
|
https://github.com/konstantint/pyliftover/blob/5164eed9ae678ad0ddc164df8c2c5767e6a4b39f/pyliftover/intervaltree.py#L90-L101
|
def sort(self):
'''
Must be invoked after all intevals have been added to sort mid_** arrays.
'''
if self.single_interval is None or self.single_interval != 0:
return # Nothing to do for empty and leaf trees.
self.mid_sorted_by_start.sort(key = lambda x: x[0])
self.mid_sorted_by_end.sort(key = lambda x: x[1], reverse=True)
if self.left_subtree is not None:
self.left_subtree.sort()
if self.right_subtree is not None:
self.right_subtree.sort()
|
[
"def",
"sort",
"(",
"self",
")",
":",
"if",
"self",
".",
"single_interval",
"is",
"None",
"or",
"self",
".",
"single_interval",
"!=",
"0",
":",
"return",
"# Nothing to do for empty and leaf trees.",
"self",
".",
"mid_sorted_by_start",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
")",
"self",
".",
"mid_sorted_by_end",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"1",
"]",
",",
"reverse",
"=",
"True",
")",
"if",
"self",
".",
"left_subtree",
"is",
"not",
"None",
":",
"self",
".",
"left_subtree",
".",
"sort",
"(",
")",
"if",
"self",
".",
"right_subtree",
"is",
"not",
"None",
":",
"self",
".",
"right_subtree",
".",
"sort",
"(",
")"
] |
Must be invoked after all intevals have been added to sort mid_** arrays.
|
[
"Must",
"be",
"invoked",
"after",
"all",
"intevals",
"have",
"been",
"added",
"to",
"sort",
"mid_",
"**",
"arrays",
"."
] |
python
|
train
|
MaT1g3R/option
|
option/option_.py
|
https://github.com/MaT1g3R/option/blob/37c954e6e74273d48649b3236bc881a1286107d6/option/option_.py#L144-L168
|
def expect(self, msg) -> T:
"""
Unwraps the option. Raises an exception if the value is :py:data:`NONE`.
Args:
msg: The exception message.
Returns:
The wrapped value.
Raises:
``ValueError`` with message provided by ``msg`` if the value is :py:data:`NONE`.
Examples:
>>> Some(0).expect('sd')
0
>>> try:
... NONE.expect('Oh No!')
... except ValueError as e:
... print(e)
Oh No!
"""
if self._is_some:
return self._val
raise ValueError(msg)
|
[
"def",
"expect",
"(",
"self",
",",
"msg",
")",
"->",
"T",
":",
"if",
"self",
".",
"_is_some",
":",
"return",
"self",
".",
"_val",
"raise",
"ValueError",
"(",
"msg",
")"
] |
Unwraps the option. Raises an exception if the value is :py:data:`NONE`.
Args:
msg: The exception message.
Returns:
The wrapped value.
Raises:
``ValueError`` with message provided by ``msg`` if the value is :py:data:`NONE`.
Examples:
>>> Some(0).expect('sd')
0
>>> try:
... NONE.expect('Oh No!')
... except ValueError as e:
... print(e)
Oh No!
|
[
"Unwraps",
"the",
"option",
".",
"Raises",
"an",
"exception",
"if",
"the",
"value",
"is",
":",
"py",
":",
"data",
":",
"NONE",
"."
] |
python
|
train
|
RedHatInsights/insights-core
|
insights/util/__init__.py
|
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/util/__init__.py#L88-L104
|
def keys_in(items, *args):
"""
Use this utility function to ensure multiple keys are in one or more
dicts. Returns `True` if all keys are present in at least one of the
given dicts, otherwise returns `False`.
:Parameters:
- `items`: Iterable of required keys
- Variable number of subsequent arguments, each one being a dict to check.
"""
found = dict((key, False) for key in items)
for d in args:
for item in items:
if not found[item] and item in d:
found[item] = True
return all(found.values())
|
[
"def",
"keys_in",
"(",
"items",
",",
"*",
"args",
")",
":",
"found",
"=",
"dict",
"(",
"(",
"key",
",",
"False",
")",
"for",
"key",
"in",
"items",
")",
"for",
"d",
"in",
"args",
":",
"for",
"item",
"in",
"items",
":",
"if",
"not",
"found",
"[",
"item",
"]",
"and",
"item",
"in",
"d",
":",
"found",
"[",
"item",
"]",
"=",
"True",
"return",
"all",
"(",
"found",
".",
"values",
"(",
")",
")"
] |
Use this utility function to ensure multiple keys are in one or more
dicts. Returns `True` if all keys are present in at least one of the
given dicts, otherwise returns `False`.
:Parameters:
- `items`: Iterable of required keys
- Variable number of subsequent arguments, each one being a dict to check.
|
[
"Use",
"this",
"utility",
"function",
"to",
"ensure",
"multiple",
"keys",
"are",
"in",
"one",
"or",
"more",
"dicts",
".",
"Returns",
"True",
"if",
"all",
"keys",
"are",
"present",
"in",
"at",
"least",
"one",
"of",
"the",
"given",
"dicts",
"otherwise",
"returns",
"False",
"."
] |
python
|
train
|
woolfson-group/isambard
|
isambard/add_ons/filesystem.py
|
https://github.com/woolfson-group/isambard/blob/ebc33b48a28ad217e18f93b910dfba46e6e71e07/isambard/add_ons/filesystem.py#L381-L409
|
def get_mmcif(code, outfile=None):
""" Get mmcif file associated with code from PDBE.
Parameters
----------
code : str
PDB code.
outfile : str
Filepath. Writes returned value to this file.
Returns
-------
mmcif_file : str
Filepath to the mmcif file.
"""
pdbe_url = "http://www.ebi.ac.uk/pdbe/entry-files/download/{0}.cif".format(code)
r = requests.get(pdbe_url)
if r.status_code == 200:
mmcif_string = r.text
else:
print("Could not download mmcif file for {0}".format(code))
mmcif_string = None
# Write to file.
if outfile and mmcif_string:
with open(outfile, 'w') as foo:
foo.write(mmcif_string)
return mmcif_string
|
[
"def",
"get_mmcif",
"(",
"code",
",",
"outfile",
"=",
"None",
")",
":",
"pdbe_url",
"=",
"\"http://www.ebi.ac.uk/pdbe/entry-files/download/{0}.cif\"",
".",
"format",
"(",
"code",
")",
"r",
"=",
"requests",
".",
"get",
"(",
"pdbe_url",
")",
"if",
"r",
".",
"status_code",
"==",
"200",
":",
"mmcif_string",
"=",
"r",
".",
"text",
"else",
":",
"print",
"(",
"\"Could not download mmcif file for {0}\"",
".",
"format",
"(",
"code",
")",
")",
"mmcif_string",
"=",
"None",
"# Write to file.",
"if",
"outfile",
"and",
"mmcif_string",
":",
"with",
"open",
"(",
"outfile",
",",
"'w'",
")",
"as",
"foo",
":",
"foo",
".",
"write",
"(",
"mmcif_string",
")",
"return",
"mmcif_string"
] |
Get mmcif file associated with code from PDBE.
Parameters
----------
code : str
PDB code.
outfile : str
Filepath. Writes returned value to this file.
Returns
-------
mmcif_file : str
Filepath to the mmcif file.
|
[
"Get",
"mmcif",
"file",
"associated",
"with",
"code",
"from",
"PDBE",
"."
] |
python
|
train
|
totalgood/pugnlp
|
src/pugnlp/futil.py
|
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/futil.py#L225-L313
|
def generate_files(path='', ext='', level=None, dirs=False, files=True, verbosity=0):
""" Recursively generate files (and thier stats) in the indicated directory
Filter by the indicated file name extension (ext)
Args:
path (str): Root/base path to search.
ext (str or list of str): File name extension(s).
Only file paths that ".endswith()" this string will be returned
level (int, optional): Depth of file tree to halt recursion at.
None = full recursion to as deep as it goes
0 = nonrecursive, just provide a list of files at the root level of the tree
1 = one level of depth deeper in the tree
typ (type): output type (default: list). if a mapping type is provided the keys will be the full paths (unique)
dirs (bool): Whether to yield dir paths along with file paths (default: False)
files (bool): Whether to yield file paths (default: True)
`dirs=True`, `files=False` is equivalent to `ls -d`
Returns:
list of dicts: dict keys are { 'path', 'name', 'bytes', 'created', 'modified', 'accessed', 'permissions' }
path (str): Full, absolute paths to file beneath the indicated directory and ending with `ext`
name (str): File name only (everythin after the last slash in the path)
size (int): File size in bytes
changed_any (datetime): Timestamp for modification of either metadata (e.g. permissions) or content
modified (datetime): File content modification timestamp from file system
accessed (datetime): File access timestamp from file system
permissions (int): File permissions bytes as a chown-style integer with a maximum of 4 digits
type (str): One of 'file', 'dir', 'symlink->file', 'symlink->dir', 'symlink->broken'
e.g.: 777 or 1755
Examples:
>>> 'util.py' in [d['name'] for d in generate_files(os.path.dirname(__file__), ext='.py', level=0)]
True
>>> next(d for d in generate_files(os.path.dirname(__file__), ext='.py')
... if d['name'] == 'util.py')['size'] > 1000
True
>>> sorted(next(generate_files()).keys())
['accessed', 'changed_any', 'dir', 'mode', 'modified', 'name', 'path', 'size', 'type']
There should be an __init__ file in the same directory as this script.
And it should be at the top of the list.
>>> sorted(d['name'] for d in generate_files(os.path.dirname(__file__), ext='.py', level=0))[0]
'__init__.py'
>>> len(list(generate_files(__file__, ext='.')))
0
>>> len(list(generate_files(__file__, ext=['invalidexttesting123', False])))
0
>>> len(list(generate_files(__file__, ext=['.py', '.pyc', 'invalidexttesting123', False]))) > 0
True
>>> sorted(generate_files(__file__))[0]['name'] == os.path.basename(__file__)
True
>>> sorted(list(generate_files())[0].keys())
['accessed', 'changed_any', 'dir', 'mode', 'modified', 'name', 'path', 'size', 'type']
>>> all(d['type'] in ('file', 'dir',
... 'symlink->file', 'symlink->dir', 'symlink->broken',
... 'mount-point->file', 'mount-point->dir',
... 'block-device', 'pipe', 'special', 'socket', 'unknown')
... for d in generate_files(level=1, files=True, dirs=True))
True
"""
path = expand_path(path or '.')
# None interpreted as '', False is interpreted as '.' (no ext will be accepted)
ext = '.' if ext is False else ext
# multiple extensions can be specified in a list or tuple
ext = ext if ext and isinstance(ext, (list, tuple)) else [ext]
# case-insensitive extensions, '.' ext means only no-extensions are accepted
ext = set(x.lower() if x else '.' if x is False else '' for x in ext)
if os.path.isfile(path):
fn = os.path.basename(path)
# only yield the stat dict if the extension is among those that match or files without any ext are desired
if not ext or any(path.lower().endswith(x) or (x == '.' and '.' not in fn) for x in ext):
yield path_status(os.path.dirname(path), os.path.basename(path), verbosity=verbosity)
else:
for dir_path, dir_names, filenames in walk_level(path, level=level):
if verbosity > 0:
print('Checking path "{}"'.format(dir_path))
if files:
for fn in filenames: # itertools.chain(filenames, dir_names)
if ext and not any((fn.lower().endswith(x) or (x == '.' and x not in fn) for x in ext)):
continue
stat = path_status(dir_path, fn, verbosity=verbosity)
if stat and stat['name'] and stat['path']:
yield stat
if dirs:
for fn in dir_names:
if ext and not any((fn.lower().endswith(x) or (x == '.' and x not in fn) for x in ext)):
continue
yield path_status(dir_path, fn, verbosity=verbosity)
|
[
"def",
"generate_files",
"(",
"path",
"=",
"''",
",",
"ext",
"=",
"''",
",",
"level",
"=",
"None",
",",
"dirs",
"=",
"False",
",",
"files",
"=",
"True",
",",
"verbosity",
"=",
"0",
")",
":",
"path",
"=",
"expand_path",
"(",
"path",
"or",
"'.'",
")",
"# None interpreted as '', False is interpreted as '.' (no ext will be accepted)",
"ext",
"=",
"'.'",
"if",
"ext",
"is",
"False",
"else",
"ext",
"# multiple extensions can be specified in a list or tuple",
"ext",
"=",
"ext",
"if",
"ext",
"and",
"isinstance",
"(",
"ext",
",",
"(",
"list",
",",
"tuple",
")",
")",
"else",
"[",
"ext",
"]",
"# case-insensitive extensions, '.' ext means only no-extensions are accepted",
"ext",
"=",
"set",
"(",
"x",
".",
"lower",
"(",
")",
"if",
"x",
"else",
"'.'",
"if",
"x",
"is",
"False",
"else",
"''",
"for",
"x",
"in",
"ext",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
":",
"fn",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"path",
")",
"# only yield the stat dict if the extension is among those that match or files without any ext are desired",
"if",
"not",
"ext",
"or",
"any",
"(",
"path",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"x",
")",
"or",
"(",
"x",
"==",
"'.'",
"and",
"'.'",
"not",
"in",
"fn",
")",
"for",
"x",
"in",
"ext",
")",
":",
"yield",
"path_status",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
",",
"os",
".",
"path",
".",
"basename",
"(",
"path",
")",
",",
"verbosity",
"=",
"verbosity",
")",
"else",
":",
"for",
"dir_path",
",",
"dir_names",
",",
"filenames",
"in",
"walk_level",
"(",
"path",
",",
"level",
"=",
"level",
")",
":",
"if",
"verbosity",
">",
"0",
":",
"print",
"(",
"'Checking path \"{}\"'",
".",
"format",
"(",
"dir_path",
")",
")",
"if",
"files",
":",
"for",
"fn",
"in",
"filenames",
":",
"# itertools.chain(filenames, dir_names)",
"if",
"ext",
"and",
"not",
"any",
"(",
"(",
"fn",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"x",
")",
"or",
"(",
"x",
"==",
"'.'",
"and",
"x",
"not",
"in",
"fn",
")",
"for",
"x",
"in",
"ext",
")",
")",
":",
"continue",
"stat",
"=",
"path_status",
"(",
"dir_path",
",",
"fn",
",",
"verbosity",
"=",
"verbosity",
")",
"if",
"stat",
"and",
"stat",
"[",
"'name'",
"]",
"and",
"stat",
"[",
"'path'",
"]",
":",
"yield",
"stat",
"if",
"dirs",
":",
"for",
"fn",
"in",
"dir_names",
":",
"if",
"ext",
"and",
"not",
"any",
"(",
"(",
"fn",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"x",
")",
"or",
"(",
"x",
"==",
"'.'",
"and",
"x",
"not",
"in",
"fn",
")",
"for",
"x",
"in",
"ext",
")",
")",
":",
"continue",
"yield",
"path_status",
"(",
"dir_path",
",",
"fn",
",",
"verbosity",
"=",
"verbosity",
")"
] |
Recursively generate files (and thier stats) in the indicated directory
Filter by the indicated file name extension (ext)
Args:
path (str): Root/base path to search.
ext (str or list of str): File name extension(s).
Only file paths that ".endswith()" this string will be returned
level (int, optional): Depth of file tree to halt recursion at.
None = full recursion to as deep as it goes
0 = nonrecursive, just provide a list of files at the root level of the tree
1 = one level of depth deeper in the tree
typ (type): output type (default: list). if a mapping type is provided the keys will be the full paths (unique)
dirs (bool): Whether to yield dir paths along with file paths (default: False)
files (bool): Whether to yield file paths (default: True)
`dirs=True`, `files=False` is equivalent to `ls -d`
Returns:
list of dicts: dict keys are { 'path', 'name', 'bytes', 'created', 'modified', 'accessed', 'permissions' }
path (str): Full, absolute paths to file beneath the indicated directory and ending with `ext`
name (str): File name only (everythin after the last slash in the path)
size (int): File size in bytes
changed_any (datetime): Timestamp for modification of either metadata (e.g. permissions) or content
modified (datetime): File content modification timestamp from file system
accessed (datetime): File access timestamp from file system
permissions (int): File permissions bytes as a chown-style integer with a maximum of 4 digits
type (str): One of 'file', 'dir', 'symlink->file', 'symlink->dir', 'symlink->broken'
e.g.: 777 or 1755
Examples:
>>> 'util.py' in [d['name'] for d in generate_files(os.path.dirname(__file__), ext='.py', level=0)]
True
>>> next(d for d in generate_files(os.path.dirname(__file__), ext='.py')
... if d['name'] == 'util.py')['size'] > 1000
True
>>> sorted(next(generate_files()).keys())
['accessed', 'changed_any', 'dir', 'mode', 'modified', 'name', 'path', 'size', 'type']
There should be an __init__ file in the same directory as this script.
And it should be at the top of the list.
>>> sorted(d['name'] for d in generate_files(os.path.dirname(__file__), ext='.py', level=0))[0]
'__init__.py'
>>> len(list(generate_files(__file__, ext='.')))
0
>>> len(list(generate_files(__file__, ext=['invalidexttesting123', False])))
0
>>> len(list(generate_files(__file__, ext=['.py', '.pyc', 'invalidexttesting123', False]))) > 0
True
>>> sorted(generate_files(__file__))[0]['name'] == os.path.basename(__file__)
True
>>> sorted(list(generate_files())[0].keys())
['accessed', 'changed_any', 'dir', 'mode', 'modified', 'name', 'path', 'size', 'type']
>>> all(d['type'] in ('file', 'dir',
... 'symlink->file', 'symlink->dir', 'symlink->broken',
... 'mount-point->file', 'mount-point->dir',
... 'block-device', 'pipe', 'special', 'socket', 'unknown')
... for d in generate_files(level=1, files=True, dirs=True))
True
|
[
"Recursively",
"generate",
"files",
"(",
"and",
"thier",
"stats",
")",
"in",
"the",
"indicated",
"directory"
] |
python
|
train
|
spacetelescope/synphot_refactor
|
synphot/spectrum.py
|
https://github.com/spacetelescope/synphot_refactor/blob/9c064f3cff0c41dd8acadc0f67c6350931275b9f/synphot/spectrum.py#L608-L657
|
def taper(self, wavelengths=None):
"""Taper the spectrum or bandpass.
The wavelengths to use for the first and last points are
calculated by using the same ratio as for the 2 interior points.
Parameters
----------
wavelengths : array-like, `~astropy.units.quantity.Quantity`, or `None`
Wavelength values for tapering.
If not a Quantity, assumed to be in Angstrom.
If `None`, `waveset` is used.
Returns
-------
sp : `BaseSpectrum`
Tapered empirical spectrum or bandpass.
``self`` is returned if already tapered (e.g., box model).
"""
x = self._validate_wavelengths(wavelengths)
# Calculate new end points for tapering
w1 = x[0] ** 2 / x[1]
w2 = x[-1] ** 2 / x[-2]
# Special handling for empirical data.
# This is to be compatible with ASTROLIB PYSYNPHOT behavior.
if isinstance(self._model, Empirical1D):
y1 = self._model.lookup_table[0]
y2 = self._model.lookup_table[-1]
# Other models can just evaluate at new end points
else:
y1 = self(w1)
y2 = self(w2)
# Nothing to do
if y1 == 0 and y2 == 0:
return self # Do we need a deepcopy here?
y = self(x)
if y1 != 0:
x = np.insert(x, 0, w1)
y = np.insert(y, 0, 0.0)
if y2 != 0:
x = np.insert(x, x.size, w2)
y = np.insert(y, y.size, 0.0)
return self.__class__(Empirical1D, points=x, lookup_table=y)
|
[
"def",
"taper",
"(",
"self",
",",
"wavelengths",
"=",
"None",
")",
":",
"x",
"=",
"self",
".",
"_validate_wavelengths",
"(",
"wavelengths",
")",
"# Calculate new end points for tapering",
"w1",
"=",
"x",
"[",
"0",
"]",
"**",
"2",
"/",
"x",
"[",
"1",
"]",
"w2",
"=",
"x",
"[",
"-",
"1",
"]",
"**",
"2",
"/",
"x",
"[",
"-",
"2",
"]",
"# Special handling for empirical data.",
"# This is to be compatible with ASTROLIB PYSYNPHOT behavior.",
"if",
"isinstance",
"(",
"self",
".",
"_model",
",",
"Empirical1D",
")",
":",
"y1",
"=",
"self",
".",
"_model",
".",
"lookup_table",
"[",
"0",
"]",
"y2",
"=",
"self",
".",
"_model",
".",
"lookup_table",
"[",
"-",
"1",
"]",
"# Other models can just evaluate at new end points",
"else",
":",
"y1",
"=",
"self",
"(",
"w1",
")",
"y2",
"=",
"self",
"(",
"w2",
")",
"# Nothing to do",
"if",
"y1",
"==",
"0",
"and",
"y2",
"==",
"0",
":",
"return",
"self",
"# Do we need a deepcopy here?",
"y",
"=",
"self",
"(",
"x",
")",
"if",
"y1",
"!=",
"0",
":",
"x",
"=",
"np",
".",
"insert",
"(",
"x",
",",
"0",
",",
"w1",
")",
"y",
"=",
"np",
".",
"insert",
"(",
"y",
",",
"0",
",",
"0.0",
")",
"if",
"y2",
"!=",
"0",
":",
"x",
"=",
"np",
".",
"insert",
"(",
"x",
",",
"x",
".",
"size",
",",
"w2",
")",
"y",
"=",
"np",
".",
"insert",
"(",
"y",
",",
"y",
".",
"size",
",",
"0.0",
")",
"return",
"self",
".",
"__class__",
"(",
"Empirical1D",
",",
"points",
"=",
"x",
",",
"lookup_table",
"=",
"y",
")"
] |
Taper the spectrum or bandpass.
The wavelengths to use for the first and last points are
calculated by using the same ratio as for the 2 interior points.
Parameters
----------
wavelengths : array-like, `~astropy.units.quantity.Quantity`, or `None`
Wavelength values for tapering.
If not a Quantity, assumed to be in Angstrom.
If `None`, `waveset` is used.
Returns
-------
sp : `BaseSpectrum`
Tapered empirical spectrum or bandpass.
``self`` is returned if already tapered (e.g., box model).
|
[
"Taper",
"the",
"spectrum",
"or",
"bandpass",
"."
] |
python
|
train
|
mattlong/hermes
|
hermes/chatroom.py
|
https://github.com/mattlong/hermes/blob/63a5afcafe90ca99aeb44edeee9ed6f90baae431/hermes/chatroom.py#L65-L76
|
def is_member(self, m):
"""Check if a user is a member of the chatroom"""
if not m:
return False
elif isinstance(m, basestring):
jid = m
else:
jid = m['JID']
is_member = len(filter(lambda m: m['JID'] == jid and m.get('STATUS') in ('ACTIVE', 'INVITED'), self.params['MEMBERS'])) > 0
return is_member
|
[
"def",
"is_member",
"(",
"self",
",",
"m",
")",
":",
"if",
"not",
"m",
":",
"return",
"False",
"elif",
"isinstance",
"(",
"m",
",",
"basestring",
")",
":",
"jid",
"=",
"m",
"else",
":",
"jid",
"=",
"m",
"[",
"'JID'",
"]",
"is_member",
"=",
"len",
"(",
"filter",
"(",
"lambda",
"m",
":",
"m",
"[",
"'JID'",
"]",
"==",
"jid",
"and",
"m",
".",
"get",
"(",
"'STATUS'",
")",
"in",
"(",
"'ACTIVE'",
",",
"'INVITED'",
")",
",",
"self",
".",
"params",
"[",
"'MEMBERS'",
"]",
")",
")",
">",
"0",
"return",
"is_member"
] |
Check if a user is a member of the chatroom
|
[
"Check",
"if",
"a",
"user",
"is",
"a",
"member",
"of",
"the",
"chatroom"
] |
python
|
train
|
wecatch/app-turbo
|
turbo/model.py
|
https://github.com/wecatch/app-turbo/blob/75faf97371a9a138c53f92168d0a486636cb8a9c/turbo/model.py#L142-L146
|
def update_one(self, filter_, document, **kwargs):
"""update method
"""
self._valide_update_document(document)
return self.__collect.update_one(filter_, document, **kwargs)
|
[
"def",
"update_one",
"(",
"self",
",",
"filter_",
",",
"document",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_valide_update_document",
"(",
"document",
")",
"return",
"self",
".",
"__collect",
".",
"update_one",
"(",
"filter_",
",",
"document",
",",
"*",
"*",
"kwargs",
")"
] |
update method
|
[
"update",
"method"
] |
python
|
train
|
etal/biofrills
|
biofrills/pairutils.py
|
https://github.com/etal/biofrills/blob/36684bb6c7632f96215e8b2b4ebc86640f331bcd/biofrills/pairutils.py#L127-L133
|
def identity_avg(aseq, bseq):
"""Compute absolute identity (# matching sites) between sequence strings."""
match = identity_abs(aseq, bseq)
alen = len(aseq.replace('-', '').replace('.', ''))
blen = len(bseq.replace('-', '').replace('.', ''))
avg_len = 0.5 * (alen + blen)
return match / avg_len
|
[
"def",
"identity_avg",
"(",
"aseq",
",",
"bseq",
")",
":",
"match",
"=",
"identity_abs",
"(",
"aseq",
",",
"bseq",
")",
"alen",
"=",
"len",
"(",
"aseq",
".",
"replace",
"(",
"'-'",
",",
"''",
")",
".",
"replace",
"(",
"'.'",
",",
"''",
")",
")",
"blen",
"=",
"len",
"(",
"bseq",
".",
"replace",
"(",
"'-'",
",",
"''",
")",
".",
"replace",
"(",
"'.'",
",",
"''",
")",
")",
"avg_len",
"=",
"0.5",
"*",
"(",
"alen",
"+",
"blen",
")",
"return",
"match",
"/",
"avg_len"
] |
Compute absolute identity (# matching sites) between sequence strings.
|
[
"Compute",
"absolute",
"identity",
"(",
"#",
"matching",
"sites",
")",
"between",
"sequence",
"strings",
"."
] |
python
|
train
|
saltstack/salt
|
salt/states/virt.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/virt.py#L187-L210
|
def stopped(name, connection=None, username=None, password=None):
'''
Stops a VM by shutting it down nicely.
.. versionadded:: 2016.3.0
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
.. code-block:: yaml
domain_name:
virt.stopped
'''
return _virt_call(name, 'shutdown', 'stopped', "Machine has been shut down",
connection=connection, username=username, password=password)
|
[
"def",
"stopped",
"(",
"name",
",",
"connection",
"=",
"None",
",",
"username",
"=",
"None",
",",
"password",
"=",
"None",
")",
":",
"return",
"_virt_call",
"(",
"name",
",",
"'shutdown'",
",",
"'stopped'",
",",
"\"Machine has been shut down\"",
",",
"connection",
"=",
"connection",
",",
"username",
"=",
"username",
",",
"password",
"=",
"password",
")"
] |
Stops a VM by shutting it down nicely.
.. versionadded:: 2016.3.0
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
.. code-block:: yaml
domain_name:
virt.stopped
|
[
"Stops",
"a",
"VM",
"by",
"shutting",
"it",
"down",
"nicely",
"."
] |
python
|
train
|
carpedm20/fbchat
|
fbchat/_client.py
|
https://github.com/carpedm20/fbchat/blob/f480d68b5773473e6daba7f66075ee30e8d737a8/fbchat/_client.py#L445-L467
|
def setSession(self, session_cookies):
"""Loads session cookies
:param session_cookies: A dictionay containing session cookies
:type session_cookies: dict
:return: False if `session_cookies` does not contain proper cookies
:rtype: bool
"""
# Quick check to see if session_cookies is formatted properly
if not session_cookies or "c_user" not in session_cookies:
return False
try:
# Load cookies into current session
self._session.cookies = requests.cookies.merge_cookies(
self._session.cookies, session_cookies
)
self._postLogin()
except Exception as e:
log.exception("Failed loading session")
self._resetValues()
return False
return True
|
[
"def",
"setSession",
"(",
"self",
",",
"session_cookies",
")",
":",
"# Quick check to see if session_cookies is formatted properly",
"if",
"not",
"session_cookies",
"or",
"\"c_user\"",
"not",
"in",
"session_cookies",
":",
"return",
"False",
"try",
":",
"# Load cookies into current session",
"self",
".",
"_session",
".",
"cookies",
"=",
"requests",
".",
"cookies",
".",
"merge_cookies",
"(",
"self",
".",
"_session",
".",
"cookies",
",",
"session_cookies",
")",
"self",
".",
"_postLogin",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"log",
".",
"exception",
"(",
"\"Failed loading session\"",
")",
"self",
".",
"_resetValues",
"(",
")",
"return",
"False",
"return",
"True"
] |
Loads session cookies
:param session_cookies: A dictionay containing session cookies
:type session_cookies: dict
:return: False if `session_cookies` does not contain proper cookies
:rtype: bool
|
[
"Loads",
"session",
"cookies"
] |
python
|
train
|
Erotemic/utool
|
utool/util_type.py
|
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_type.py#L104-L133
|
def is_comparable_type(var, type_):
"""
Check to see if `var` is an instance of known compatible types for `type_`
Args:
var (?):
type_ (?):
Returns:
bool:
CommandLine:
python -m utool.util_type is_comparable_type --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_type import * # NOQA
>>> import utool as ut
>>> flags = []
>>> flags += [is_comparable_type(0, float)]
>>> flags += [is_comparable_type(0, np.float32)]
>>> flags += [is_comparable_type(0, np.int32)]
>>> flags += [is_comparable_type(0, int)]
>>> flags += [is_comparable_type(0.0, int)]
>>> result = ut.repr2(flags)
>>> print(result)
[True, True, True, True, False]
"""
other_types = COMPARABLE_TYPES.get(type_, type_)
return isinstance(var, other_types)
|
[
"def",
"is_comparable_type",
"(",
"var",
",",
"type_",
")",
":",
"other_types",
"=",
"COMPARABLE_TYPES",
".",
"get",
"(",
"type_",
",",
"type_",
")",
"return",
"isinstance",
"(",
"var",
",",
"other_types",
")"
] |
Check to see if `var` is an instance of known compatible types for `type_`
Args:
var (?):
type_ (?):
Returns:
bool:
CommandLine:
python -m utool.util_type is_comparable_type --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_type import * # NOQA
>>> import utool as ut
>>> flags = []
>>> flags += [is_comparable_type(0, float)]
>>> flags += [is_comparable_type(0, np.float32)]
>>> flags += [is_comparable_type(0, np.int32)]
>>> flags += [is_comparable_type(0, int)]
>>> flags += [is_comparable_type(0.0, int)]
>>> result = ut.repr2(flags)
>>> print(result)
[True, True, True, True, False]
|
[
"Check",
"to",
"see",
"if",
"var",
"is",
"an",
"instance",
"of",
"known",
"compatible",
"types",
"for",
"type_"
] |
python
|
train
|
koalalorenzo/python-digitalocean
|
digitalocean/baseapi.py
|
https://github.com/koalalorenzo/python-digitalocean/blob/d0221b57856fb1e131cafecf99d826f7b07a947c/digitalocean/baseapi.py#L68-L116
|
def __perform_request(self, url, type=GET, params=None):
"""
This method will perform the real request,
in this way we can customize only the "output" of the API call by
using self.__call_api method.
This method will return the request object.
"""
if params is None:
params = {}
if not self.token:
raise TokenError("No token provided. Please use a valid token")
url = urlparse.urljoin(self.end_point, url)
# lookup table to find out the appropriate requests method,
# headers and payload type (json or query parameters)
identity = lambda x: x
json_dumps = lambda x: json.dumps(x)
lookup = {
GET: (self._session.get, {}, 'params', identity),
POST: (self._session.post, {'Content-type': 'application/json'}, 'data',
json_dumps),
PUT: (self._session.put, {'Content-type': 'application/json'}, 'data',
json_dumps),
DELETE: (self._session.delete,
{'content-type': 'application/json'},
'data', json_dumps),
}
requests_method, headers, payload, transform = lookup[type]
agent = "{0}/{1} {2}/{3}".format('python-digitalocean',
__version__,
requests.__name__,
requests.__version__)
headers.update({'Authorization': 'Bearer ' + self.token,
'User-Agent': agent})
kwargs = {'headers': headers, payload: transform(params)}
timeout = self.get_timeout()
if timeout:
kwargs['timeout'] = timeout
# remove token from log
headers_str = str(headers).replace(self.token.strip(), 'TOKEN')
self._log.debug('%s %s %s:%s %s %s' %
(type, url, payload, params, headers_str, timeout))
return requests_method(url, **kwargs)
|
[
"def",
"__perform_request",
"(",
"self",
",",
"url",
",",
"type",
"=",
"GET",
",",
"params",
"=",
"None",
")",
":",
"if",
"params",
"is",
"None",
":",
"params",
"=",
"{",
"}",
"if",
"not",
"self",
".",
"token",
":",
"raise",
"TokenError",
"(",
"\"No token provided. Please use a valid token\"",
")",
"url",
"=",
"urlparse",
".",
"urljoin",
"(",
"self",
".",
"end_point",
",",
"url",
")",
"# lookup table to find out the appropriate requests method,",
"# headers and payload type (json or query parameters)",
"identity",
"=",
"lambda",
"x",
":",
"x",
"json_dumps",
"=",
"lambda",
"x",
":",
"json",
".",
"dumps",
"(",
"x",
")",
"lookup",
"=",
"{",
"GET",
":",
"(",
"self",
".",
"_session",
".",
"get",
",",
"{",
"}",
",",
"'params'",
",",
"identity",
")",
",",
"POST",
":",
"(",
"self",
".",
"_session",
".",
"post",
",",
"{",
"'Content-type'",
":",
"'application/json'",
"}",
",",
"'data'",
",",
"json_dumps",
")",
",",
"PUT",
":",
"(",
"self",
".",
"_session",
".",
"put",
",",
"{",
"'Content-type'",
":",
"'application/json'",
"}",
",",
"'data'",
",",
"json_dumps",
")",
",",
"DELETE",
":",
"(",
"self",
".",
"_session",
".",
"delete",
",",
"{",
"'content-type'",
":",
"'application/json'",
"}",
",",
"'data'",
",",
"json_dumps",
")",
",",
"}",
"requests_method",
",",
"headers",
",",
"payload",
",",
"transform",
"=",
"lookup",
"[",
"type",
"]",
"agent",
"=",
"\"{0}/{1} {2}/{3}\"",
".",
"format",
"(",
"'python-digitalocean'",
",",
"__version__",
",",
"requests",
".",
"__name__",
",",
"requests",
".",
"__version__",
")",
"headers",
".",
"update",
"(",
"{",
"'Authorization'",
":",
"'Bearer '",
"+",
"self",
".",
"token",
",",
"'User-Agent'",
":",
"agent",
"}",
")",
"kwargs",
"=",
"{",
"'headers'",
":",
"headers",
",",
"payload",
":",
"transform",
"(",
"params",
")",
"}",
"timeout",
"=",
"self",
".",
"get_timeout",
"(",
")",
"if",
"timeout",
":",
"kwargs",
"[",
"'timeout'",
"]",
"=",
"timeout",
"# remove token from log",
"headers_str",
"=",
"str",
"(",
"headers",
")",
".",
"replace",
"(",
"self",
".",
"token",
".",
"strip",
"(",
")",
",",
"'TOKEN'",
")",
"self",
".",
"_log",
".",
"debug",
"(",
"'%s %s %s:%s %s %s'",
"%",
"(",
"type",
",",
"url",
",",
"payload",
",",
"params",
",",
"headers_str",
",",
"timeout",
")",
")",
"return",
"requests_method",
"(",
"url",
",",
"*",
"*",
"kwargs",
")"
] |
This method will perform the real request,
in this way we can customize only the "output" of the API call by
using self.__call_api method.
This method will return the request object.
|
[
"This",
"method",
"will",
"perform",
"the",
"real",
"request",
"in",
"this",
"way",
"we",
"can",
"customize",
"only",
"the",
"output",
"of",
"the",
"API",
"call",
"by",
"using",
"self",
".",
"__call_api",
"method",
".",
"This",
"method",
"will",
"return",
"the",
"request",
"object",
"."
] |
python
|
valid
|
chaoss/grimoirelab-sortinghat
|
sortinghat/matching/email_name.py
|
https://github.com/chaoss/grimoirelab-sortinghat/blob/391cd37a75fea26311dc6908bc1c953c540a8e04/sortinghat/matching/email_name.py#L77-L112
|
def match(self, a, b):
"""Determine if two unique identities are the same.
This method compares the email addresses or the names of each
identity to check if the given unique identities are the same.
When the given unique identities are the same object or share
the same UUID, this will also produce a positive match.
Identities which their email addresses or names are in the blacklist
will be ignored during the matching.
:param a: unique identity to match
:param b: unique identity to match
:returns: True when both unique identities are likely to be the same.
Otherwise, returns False.
:raises ValueError: when any of the given unique identities is not
an instance of UniqueIdentity class
"""
if not isinstance(a, UniqueIdentity):
raise ValueError("<a> is not an instance of UniqueIdentity")
if not isinstance(b, UniqueIdentity):
raise ValueError("<b> is not an instance of UniqueIdentity")
if a.uuid and b.uuid and a.uuid == b.uuid:
return True
filtered_a = self.filter(a)
filtered_b = self.filter(b)
for fa in filtered_a:
for fb in filtered_b:
if self.match_filtered_identities(fa, fb):
return True
return False
|
[
"def",
"match",
"(",
"self",
",",
"a",
",",
"b",
")",
":",
"if",
"not",
"isinstance",
"(",
"a",
",",
"UniqueIdentity",
")",
":",
"raise",
"ValueError",
"(",
"\"<a> is not an instance of UniqueIdentity\"",
")",
"if",
"not",
"isinstance",
"(",
"b",
",",
"UniqueIdentity",
")",
":",
"raise",
"ValueError",
"(",
"\"<b> is not an instance of UniqueIdentity\"",
")",
"if",
"a",
".",
"uuid",
"and",
"b",
".",
"uuid",
"and",
"a",
".",
"uuid",
"==",
"b",
".",
"uuid",
":",
"return",
"True",
"filtered_a",
"=",
"self",
".",
"filter",
"(",
"a",
")",
"filtered_b",
"=",
"self",
".",
"filter",
"(",
"b",
")",
"for",
"fa",
"in",
"filtered_a",
":",
"for",
"fb",
"in",
"filtered_b",
":",
"if",
"self",
".",
"match_filtered_identities",
"(",
"fa",
",",
"fb",
")",
":",
"return",
"True",
"return",
"False"
] |
Determine if two unique identities are the same.
This method compares the email addresses or the names of each
identity to check if the given unique identities are the same.
When the given unique identities are the same object or share
the same UUID, this will also produce a positive match.
Identities which their email addresses or names are in the blacklist
will be ignored during the matching.
:param a: unique identity to match
:param b: unique identity to match
:returns: True when both unique identities are likely to be the same.
Otherwise, returns False.
:raises ValueError: when any of the given unique identities is not
an instance of UniqueIdentity class
|
[
"Determine",
"if",
"two",
"unique",
"identities",
"are",
"the",
"same",
"."
] |
python
|
train
|
zimeon/iiif
|
iiif/flask_utils.py
|
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/flask_utils.py#L270-L274
|
def add_compliance_header(self):
"""Add IIIF Compliance level header to response."""
if (self.manipulator.compliance_uri is not None):
self.headers['Link'] = '<' + \
self.manipulator.compliance_uri + '>;rel="profile"'
|
[
"def",
"add_compliance_header",
"(",
"self",
")",
":",
"if",
"(",
"self",
".",
"manipulator",
".",
"compliance_uri",
"is",
"not",
"None",
")",
":",
"self",
".",
"headers",
"[",
"'Link'",
"]",
"=",
"'<'",
"+",
"self",
".",
"manipulator",
".",
"compliance_uri",
"+",
"'>;rel=\"profile\"'"
] |
Add IIIF Compliance level header to response.
|
[
"Add",
"IIIF",
"Compliance",
"level",
"header",
"to",
"response",
"."
] |
python
|
train
|
keenlabs/KeenClient-Python
|
keen/api.py
|
https://github.com/keenlabs/KeenClient-Python/blob/266387c3376d1e000d117e17c45045ae3439d43f/keen/api.py#L353-L374
|
def add_access_key_permissions(self, access_key_id, permissions):
"""
Adds to the existing list of permissions on this key with the contents of this list.
Will not remove any existing permissions or modify the remainder of the key.
:param access_key_id: the 'key' value of the access key to add permissions to
:param permissions: the new permissions to add to the existing list of permissions
"""
# Get current state via HTTPS.
current_access_key = self.get_access_key(access_key_id)
# Copy and only change the single parameter.
payload_dict = KeenApi._build_access_key_dict(current_access_key)
# Turn into sets to avoid duplicates.
old_permissions = set(payload_dict["permitted"])
new_permissions = set(permissions)
combined_permissions = old_permissions.union(new_permissions)
payload_dict["permitted"] = list(combined_permissions)
# Now just treat it like a full update.
return self.update_access_key_full(access_key_id, **payload_dict)
|
[
"def",
"add_access_key_permissions",
"(",
"self",
",",
"access_key_id",
",",
"permissions",
")",
":",
"# Get current state via HTTPS.",
"current_access_key",
"=",
"self",
".",
"get_access_key",
"(",
"access_key_id",
")",
"# Copy and only change the single parameter.",
"payload_dict",
"=",
"KeenApi",
".",
"_build_access_key_dict",
"(",
"current_access_key",
")",
"# Turn into sets to avoid duplicates.",
"old_permissions",
"=",
"set",
"(",
"payload_dict",
"[",
"\"permitted\"",
"]",
")",
"new_permissions",
"=",
"set",
"(",
"permissions",
")",
"combined_permissions",
"=",
"old_permissions",
".",
"union",
"(",
"new_permissions",
")",
"payload_dict",
"[",
"\"permitted\"",
"]",
"=",
"list",
"(",
"combined_permissions",
")",
"# Now just treat it like a full update.",
"return",
"self",
".",
"update_access_key_full",
"(",
"access_key_id",
",",
"*",
"*",
"payload_dict",
")"
] |
Adds to the existing list of permissions on this key with the contents of this list.
Will not remove any existing permissions or modify the remainder of the key.
:param access_key_id: the 'key' value of the access key to add permissions to
:param permissions: the new permissions to add to the existing list of permissions
|
[
"Adds",
"to",
"the",
"existing",
"list",
"of",
"permissions",
"on",
"this",
"key",
"with",
"the",
"contents",
"of",
"this",
"list",
".",
"Will",
"not",
"remove",
"any",
"existing",
"permissions",
"or",
"modify",
"the",
"remainder",
"of",
"the",
"key",
"."
] |
python
|
train
|
althonos/pronto
|
pronto/term.py
|
https://github.com/althonos/pronto/blob/a768adcba19fb34f26f67cde4a03d317f932c274/pronto/term.py#L398-L403
|
def children(self):
"""~TermList: the children of all the terms in the list.
"""
return TermList(unique_everseen(
y for x in self for y in x.children
))
|
[
"def",
"children",
"(",
"self",
")",
":",
"return",
"TermList",
"(",
"unique_everseen",
"(",
"y",
"for",
"x",
"in",
"self",
"for",
"y",
"in",
"x",
".",
"children",
")",
")"
] |
~TermList: the children of all the terms in the list.
|
[
"~TermList",
":",
"the",
"children",
"of",
"all",
"the",
"terms",
"in",
"the",
"list",
"."
] |
python
|
train
|
jxtech/wechatpy
|
wechatpy/client/api/scan.py
|
https://github.com/jxtech/wechatpy/blob/4df0da795618c0895a10f1c2cde9e9d5c0a93aaa/wechatpy/client/api/scan.py#L94-L109
|
def get_product(self, standard, key):
"""
查询商品信息
详情请参考
http://mp.weixin.qq.com/wiki/15/7fa787701295b884410b5163e13313af.html
:param standard: 商品编码标准
:param key: 商品编码内容
:return: 返回的 JSON 数据包
"""
data = {
'keystandard': standard,
'keystr': key,
}
return self._post('product/get', data=data)
|
[
"def",
"get_product",
"(",
"self",
",",
"standard",
",",
"key",
")",
":",
"data",
"=",
"{",
"'keystandard'",
":",
"standard",
",",
"'keystr'",
":",
"key",
",",
"}",
"return",
"self",
".",
"_post",
"(",
"'product/get'",
",",
"data",
"=",
"data",
")"
] |
查询商品信息
详情请参考
http://mp.weixin.qq.com/wiki/15/7fa787701295b884410b5163e13313af.html
:param standard: 商品编码标准
:param key: 商品编码内容
:return: 返回的 JSON 数据包
|
[
"查询商品信息"
] |
python
|
train
|
TrafficSenseMSD/SumoTools
|
traci/_polygon.py
|
https://github.com/TrafficSenseMSD/SumoTools/blob/8607b4f885f1d1798e43240be643efe6dccccdaa/traci/_polygon.py#L61-L69
|
def setType(self, polygonID, polygonType):
"""setType(string, string) -> None
Sets the (abstract) type of the polygon.
"""
self._connection._beginMessage(
tc.CMD_SET_POLYGON_VARIABLE, tc.VAR_TYPE, polygonID, 1 + 4 + len(polygonType))
self._connection._packString(polygonType)
self._connection._sendExact()
|
[
"def",
"setType",
"(",
"self",
",",
"polygonID",
",",
"polygonType",
")",
":",
"self",
".",
"_connection",
".",
"_beginMessage",
"(",
"tc",
".",
"CMD_SET_POLYGON_VARIABLE",
",",
"tc",
".",
"VAR_TYPE",
",",
"polygonID",
",",
"1",
"+",
"4",
"+",
"len",
"(",
"polygonType",
")",
")",
"self",
".",
"_connection",
".",
"_packString",
"(",
"polygonType",
")",
"self",
".",
"_connection",
".",
"_sendExact",
"(",
")"
] |
setType(string, string) -> None
Sets the (abstract) type of the polygon.
|
[
"setType",
"(",
"string",
"string",
")",
"-",
">",
"None"
] |
python
|
train
|
shmir/PyIxNetwork
|
ixnetwork/ixn_port.py
|
https://github.com/shmir/PyIxNetwork/blob/e7d7a89c08a5d3a1382b4dcfd915bbfc7eedd33f/ixnetwork/ixn_port.py#L52-L62
|
def wait_for_up(self, timeout=40):
""" Wait until port is up and running, including all parameters (admin state, oper state, license etc.).
:param timeout: max time to wait for port up.
"""
self.wait_for_states(timeout, 'up')
connectionStatus = self.get_attribute('connectionStatus').strip()
if connectionStatus.split(':')[0] != self.get_attribute('assignedTo').split(':')[0]:
raise TgnError('Failed to reach up state, port connection status is {} after {} seconds'.
format(connectionStatus, timeout))
|
[
"def",
"wait_for_up",
"(",
"self",
",",
"timeout",
"=",
"40",
")",
":",
"self",
".",
"wait_for_states",
"(",
"timeout",
",",
"'up'",
")",
"connectionStatus",
"=",
"self",
".",
"get_attribute",
"(",
"'connectionStatus'",
")",
".",
"strip",
"(",
")",
"if",
"connectionStatus",
".",
"split",
"(",
"':'",
")",
"[",
"0",
"]",
"!=",
"self",
".",
"get_attribute",
"(",
"'assignedTo'",
")",
".",
"split",
"(",
"':'",
")",
"[",
"0",
"]",
":",
"raise",
"TgnError",
"(",
"'Failed to reach up state, port connection status is {} after {} seconds'",
".",
"format",
"(",
"connectionStatus",
",",
"timeout",
")",
")"
] |
Wait until port is up and running, including all parameters (admin state, oper state, license etc.).
:param timeout: max time to wait for port up.
|
[
"Wait",
"until",
"port",
"is",
"up",
"and",
"running",
"including",
"all",
"parameters",
"(",
"admin",
"state",
"oper",
"state",
"license",
"etc",
".",
")",
"."
] |
python
|
train
|
holtjma/msbwt
|
MUS/MultiStringBWT.py
|
https://github.com/holtjma/msbwt/blob/7503346ec072ddb89520db86fef85569a9ba093a/MUS/MultiStringBWT.py#L555-L593
|
def getCharAtIndex(self, index):
'''
Used for searching, this function masks the complexity behind retrieving a specific character at a specific index
in our compressed BWT.
@param index - the index to retrieve the character from
@param return - return the character in our BWT that's at a particular index (integer format)
'''
#get the bin we should start from
binID = index >> self.bitPower
bwtIndex = self.refFM[binID]
#these are the values that indicate how far in we really are
trueIndex = np.sum(self.partialFM[binID])-self.offsetSum
dist = index-trueIndex
#calculate how big of a region we actually need to 'decompress'
if binID == self.refFM.shape[0]-1:
endRange = self.bwt.shape[0]
else:
endRange = self.refFM[binID+1]+1
while endRange < self.bwt.shape[0] and (self.bwt[endRange] & self.mask) == (self.bwt[endRange-1] & self.mask):
endRange += 1
#extract the symbols and counts associated with each byte
letters = np.bitwise_and(self.bwt[bwtIndex:endRange], self.mask)
counts = np.right_shift(self.bwt[bwtIndex:endRange], self.letterBits, dtype='<u8')
#numpy methods for find the powers
i = 1
same = (letters[0:-1] == letters[1:])
while np.count_nonzero(same) > 0:
(counts[i:])[same] *= self.numPower
i += 1
same = np.bitwise_and(same[0:-1], same[1:])
#these are the true counts after raising to the appropriate power
cs = np.cumsum(counts)
x = np.searchsorted(cs, dist, 'right')
return letters[x]
|
[
"def",
"getCharAtIndex",
"(",
"self",
",",
"index",
")",
":",
"#get the bin we should start from",
"binID",
"=",
"index",
">>",
"self",
".",
"bitPower",
"bwtIndex",
"=",
"self",
".",
"refFM",
"[",
"binID",
"]",
"#these are the values that indicate how far in we really are",
"trueIndex",
"=",
"np",
".",
"sum",
"(",
"self",
".",
"partialFM",
"[",
"binID",
"]",
")",
"-",
"self",
".",
"offsetSum",
"dist",
"=",
"index",
"-",
"trueIndex",
"#calculate how big of a region we actually need to 'decompress'",
"if",
"binID",
"==",
"self",
".",
"refFM",
".",
"shape",
"[",
"0",
"]",
"-",
"1",
":",
"endRange",
"=",
"self",
".",
"bwt",
".",
"shape",
"[",
"0",
"]",
"else",
":",
"endRange",
"=",
"self",
".",
"refFM",
"[",
"binID",
"+",
"1",
"]",
"+",
"1",
"while",
"endRange",
"<",
"self",
".",
"bwt",
".",
"shape",
"[",
"0",
"]",
"and",
"(",
"self",
".",
"bwt",
"[",
"endRange",
"]",
"&",
"self",
".",
"mask",
")",
"==",
"(",
"self",
".",
"bwt",
"[",
"endRange",
"-",
"1",
"]",
"&",
"self",
".",
"mask",
")",
":",
"endRange",
"+=",
"1",
"#extract the symbols and counts associated with each byte",
"letters",
"=",
"np",
".",
"bitwise_and",
"(",
"self",
".",
"bwt",
"[",
"bwtIndex",
":",
"endRange",
"]",
",",
"self",
".",
"mask",
")",
"counts",
"=",
"np",
".",
"right_shift",
"(",
"self",
".",
"bwt",
"[",
"bwtIndex",
":",
"endRange",
"]",
",",
"self",
".",
"letterBits",
",",
"dtype",
"=",
"'<u8'",
")",
"#numpy methods for find the powers",
"i",
"=",
"1",
"same",
"=",
"(",
"letters",
"[",
"0",
":",
"-",
"1",
"]",
"==",
"letters",
"[",
"1",
":",
"]",
")",
"while",
"np",
".",
"count_nonzero",
"(",
"same",
")",
">",
"0",
":",
"(",
"counts",
"[",
"i",
":",
"]",
")",
"[",
"same",
"]",
"*=",
"self",
".",
"numPower",
"i",
"+=",
"1",
"same",
"=",
"np",
".",
"bitwise_and",
"(",
"same",
"[",
"0",
":",
"-",
"1",
"]",
",",
"same",
"[",
"1",
":",
"]",
")",
"#these are the true counts after raising to the appropriate power",
"cs",
"=",
"np",
".",
"cumsum",
"(",
"counts",
")",
"x",
"=",
"np",
".",
"searchsorted",
"(",
"cs",
",",
"dist",
",",
"'right'",
")",
"return",
"letters",
"[",
"x",
"]"
] |
Used for searching, this function masks the complexity behind retrieving a specific character at a specific index
in our compressed BWT.
@param index - the index to retrieve the character from
@param return - return the character in our BWT that's at a particular index (integer format)
|
[
"Used",
"for",
"searching",
"this",
"function",
"masks",
"the",
"complexity",
"behind",
"retrieving",
"a",
"specific",
"character",
"at",
"a",
"specific",
"index",
"in",
"our",
"compressed",
"BWT",
"."
] |
python
|
train
|
sdispater/orator
|
orator/schema/blueprint.py
|
https://github.com/sdispater/orator/blob/bd90bf198ee897751848f9a92e49d18e60a74136/orator/schema/blueprint.py#L128-L139
|
def drop_column(self, *columns):
"""
Indicates that the given columns should be dropped.
:param columns: The columns to drop
:type columns: tuple
:rtype: Fluent
"""
columns = list(columns)
return self._add_command("drop_column", columns=columns)
|
[
"def",
"drop_column",
"(",
"self",
",",
"*",
"columns",
")",
":",
"columns",
"=",
"list",
"(",
"columns",
")",
"return",
"self",
".",
"_add_command",
"(",
"\"drop_column\"",
",",
"columns",
"=",
"columns",
")"
] |
Indicates that the given columns should be dropped.
:param columns: The columns to drop
:type columns: tuple
:rtype: Fluent
|
[
"Indicates",
"that",
"the",
"given",
"columns",
"should",
"be",
"dropped",
"."
] |
python
|
train
|
pydata/xarray
|
xarray/core/computation.py
|
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/computation.py#L339-L384
|
def apply_dataset_vfunc(
func,
*args,
signature,
join='inner',
dataset_join='exact',
fill_value=_NO_FILL_VALUE,
exclude_dims=frozenset(),
keep_attrs=False
):
"""Apply a variable level function over Dataset, dict of DataArray,
DataArray, Variable and/or ndarray objects.
"""
from .dataset import Dataset
first_obj = args[0] # we'll copy attrs from this in case keep_attrs=True
if (dataset_join not in _JOINS_WITHOUT_FILL_VALUES and
fill_value is _NO_FILL_VALUE):
raise TypeError('to apply an operation to datasets with different '
'data variables with apply_ufunc, you must supply the '
'dataset_fill_value argument.')
if len(args) > 1:
args = deep_align(args, join=join, copy=False, exclude=exclude_dims,
raise_on_invalid=False)
list_of_coords = build_output_coords(args, signature, exclude_dims)
args = [getattr(arg, 'data_vars', arg) for arg in args]
result_vars = apply_dict_of_variables_vfunc(
func, *args, signature=signature, join=dataset_join,
fill_value=fill_value)
if signature.num_outputs > 1:
out = tuple(_fast_dataset(*args)
for args in zip(result_vars, list_of_coords))
else:
coord_vars, = list_of_coords
out = _fast_dataset(result_vars, coord_vars)
if keep_attrs and isinstance(first_obj, Dataset):
if isinstance(out, tuple):
out = tuple(ds._copy_attrs_from(first_obj) for ds in out)
else:
out._copy_attrs_from(first_obj)
return out
|
[
"def",
"apply_dataset_vfunc",
"(",
"func",
",",
"*",
"args",
",",
"signature",
",",
"join",
"=",
"'inner'",
",",
"dataset_join",
"=",
"'exact'",
",",
"fill_value",
"=",
"_NO_FILL_VALUE",
",",
"exclude_dims",
"=",
"frozenset",
"(",
")",
",",
"keep_attrs",
"=",
"False",
")",
":",
"from",
".",
"dataset",
"import",
"Dataset",
"first_obj",
"=",
"args",
"[",
"0",
"]",
"# we'll copy attrs from this in case keep_attrs=True",
"if",
"(",
"dataset_join",
"not",
"in",
"_JOINS_WITHOUT_FILL_VALUES",
"and",
"fill_value",
"is",
"_NO_FILL_VALUE",
")",
":",
"raise",
"TypeError",
"(",
"'to apply an operation to datasets with different '",
"'data variables with apply_ufunc, you must supply the '",
"'dataset_fill_value argument.'",
")",
"if",
"len",
"(",
"args",
")",
">",
"1",
":",
"args",
"=",
"deep_align",
"(",
"args",
",",
"join",
"=",
"join",
",",
"copy",
"=",
"False",
",",
"exclude",
"=",
"exclude_dims",
",",
"raise_on_invalid",
"=",
"False",
")",
"list_of_coords",
"=",
"build_output_coords",
"(",
"args",
",",
"signature",
",",
"exclude_dims",
")",
"args",
"=",
"[",
"getattr",
"(",
"arg",
",",
"'data_vars'",
",",
"arg",
")",
"for",
"arg",
"in",
"args",
"]",
"result_vars",
"=",
"apply_dict_of_variables_vfunc",
"(",
"func",
",",
"*",
"args",
",",
"signature",
"=",
"signature",
",",
"join",
"=",
"dataset_join",
",",
"fill_value",
"=",
"fill_value",
")",
"if",
"signature",
".",
"num_outputs",
">",
"1",
":",
"out",
"=",
"tuple",
"(",
"_fast_dataset",
"(",
"*",
"args",
")",
"for",
"args",
"in",
"zip",
"(",
"result_vars",
",",
"list_of_coords",
")",
")",
"else",
":",
"coord_vars",
",",
"=",
"list_of_coords",
"out",
"=",
"_fast_dataset",
"(",
"result_vars",
",",
"coord_vars",
")",
"if",
"keep_attrs",
"and",
"isinstance",
"(",
"first_obj",
",",
"Dataset",
")",
":",
"if",
"isinstance",
"(",
"out",
",",
"tuple",
")",
":",
"out",
"=",
"tuple",
"(",
"ds",
".",
"_copy_attrs_from",
"(",
"first_obj",
")",
"for",
"ds",
"in",
"out",
")",
"else",
":",
"out",
".",
"_copy_attrs_from",
"(",
"first_obj",
")",
"return",
"out"
] |
Apply a variable level function over Dataset, dict of DataArray,
DataArray, Variable and/or ndarray objects.
|
[
"Apply",
"a",
"variable",
"level",
"function",
"over",
"Dataset",
"dict",
"of",
"DataArray",
"DataArray",
"Variable",
"and",
"/",
"or",
"ndarray",
"objects",
"."
] |
python
|
train
|
greenbender/pynntp
|
nntp/nntp.py
|
https://github.com/greenbender/pynntp/blob/991a76331cdf5d8f9dbf5b18f6e29adc80749a2f/nntp/nntp.py#L395-L441
|
def command(self, verb, args=None):
"""Call a command on the server.
If the user has not authenticated then authentication will be done
as part of calling the command on the server.
For commands that don't return a status message the status message
will default to an empty string.
Args:
verb: The verb of the command to call.
args: The arguments of the command as a string (default None).
Returns:
A tuple of status code (as an integer) and status message.
Note:
You can run raw commands by supplying the full command (including
args) in the verb.
Note: Although it is possible you shouldn't issue more than one command
at a time by adding newlines to the verb as it will most likely lead
to undesirable results.
"""
if self.__generating:
raise NNTPSyncError("Command issued while a generator is active")
cmd = verb
if args:
cmd += " " + args
cmd += "\r\n"
self.socket.sendall(cmd)
try:
code, message = self.status()
except NNTPTemporaryError as e:
if e.code() != 480:
raise e
code, message = self.command("AUTHINFO USER", self.username)
if code == 381:
code, message = self.command("AUTHINFO PASS", self.password)
if code != 281:
raise NNTPReplyError(code, message)
code, message = self.command(verb, args)
return code, message
|
[
"def",
"command",
"(",
"self",
",",
"verb",
",",
"args",
"=",
"None",
")",
":",
"if",
"self",
".",
"__generating",
":",
"raise",
"NNTPSyncError",
"(",
"\"Command issued while a generator is active\"",
")",
"cmd",
"=",
"verb",
"if",
"args",
":",
"cmd",
"+=",
"\" \"",
"+",
"args",
"cmd",
"+=",
"\"\\r\\n\"",
"self",
".",
"socket",
".",
"sendall",
"(",
"cmd",
")",
"try",
":",
"code",
",",
"message",
"=",
"self",
".",
"status",
"(",
")",
"except",
"NNTPTemporaryError",
"as",
"e",
":",
"if",
"e",
".",
"code",
"(",
")",
"!=",
"480",
":",
"raise",
"e",
"code",
",",
"message",
"=",
"self",
".",
"command",
"(",
"\"AUTHINFO USER\"",
",",
"self",
".",
"username",
")",
"if",
"code",
"==",
"381",
":",
"code",
",",
"message",
"=",
"self",
".",
"command",
"(",
"\"AUTHINFO PASS\"",
",",
"self",
".",
"password",
")",
"if",
"code",
"!=",
"281",
":",
"raise",
"NNTPReplyError",
"(",
"code",
",",
"message",
")",
"code",
",",
"message",
"=",
"self",
".",
"command",
"(",
"verb",
",",
"args",
")",
"return",
"code",
",",
"message"
] |
Call a command on the server.
If the user has not authenticated then authentication will be done
as part of calling the command on the server.
For commands that don't return a status message the status message
will default to an empty string.
Args:
verb: The verb of the command to call.
args: The arguments of the command as a string (default None).
Returns:
A tuple of status code (as an integer) and status message.
Note:
You can run raw commands by supplying the full command (including
args) in the verb.
Note: Although it is possible you shouldn't issue more than one command
at a time by adding newlines to the verb as it will most likely lead
to undesirable results.
|
[
"Call",
"a",
"command",
"on",
"the",
"server",
"."
] |
python
|
test
|
thiezn/iperf3-python
|
iperf3/iperf3.py
|
https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L508-L511
|
def bandwidth(self):
"""Target bandwidth in bits/sec"""
self._bandwidth = self.lib.iperf_get_test_rate(self._test)
return self._bandwidth
|
[
"def",
"bandwidth",
"(",
"self",
")",
":",
"self",
".",
"_bandwidth",
"=",
"self",
".",
"lib",
".",
"iperf_get_test_rate",
"(",
"self",
".",
"_test",
")",
"return",
"self",
".",
"_bandwidth"
] |
Target bandwidth in bits/sec
|
[
"Target",
"bandwidth",
"in",
"bits",
"/",
"sec"
] |
python
|
train
|
isislovecruft/python-gnupg
|
pretty_bad_protocol/_logger.py
|
https://github.com/isislovecruft/python-gnupg/blob/784571449032e811587249743e183fc5e908a673/pretty_bad_protocol/_logger.py#L48-L99
|
def create_logger(level=logging.NOTSET):
"""Create a logger for python-gnupg at a specific message level.
:type level: :obj:`int` or :obj:`str`
:param level: A string or an integer for the lowest level to include in
logs.
**Available levels:**
==== ======== ========================================
int str description
==== ======== ========================================
0 NOTSET Disable all logging.
9 GNUPG Log GnuPG's internal status messages.
10 DEBUG Log module level debuging messages.
20 INFO Normal user-level messages.
30 WARN Warning messages.
40 ERROR Error messages and tracebacks.
50 CRITICAL Unhandled exceptions and tracebacks.
==== ======== ========================================
"""
_test = os.path.join(os.path.join(os.getcwd(), 'pretty_bad_protocol'), 'test')
_now = datetime.now().strftime("%Y-%m-%d_%H%M%S")
_fn = os.path.join(_test, "%s_test_gnupg.log" % _now)
_fmt = "%(relativeCreated)-4d L%(lineno)-4d:%(funcName)-18.18s %(levelname)-7.7s %(message)s"
## Add the GNUPG_STATUS_LEVEL LogRecord to all Loggers in the module:
logging.addLevelName(GNUPG_STATUS_LEVEL, "GNUPG")
logging.Logger.status = status
if level > logging.NOTSET:
logging.basicConfig(level=level, filename=_fn,
filemode="a", format=_fmt)
logging.logThreads = True
if hasattr(logging,'captureWarnings'):
logging.captureWarnings(True)
colouriser = _ansistrm.ColorizingStreamHandler
colouriser.level_map[9] = (None, 'blue', False)
colouriser.level_map[10] = (None, 'cyan', False)
handler = colouriser(sys.stderr)
handler.setLevel(level)
formatr = logging.Formatter(_fmt)
handler.setFormatter(formatr)
else:
handler = NullHandler()
log = logging.getLogger('gnupg')
log.addHandler(handler)
log.setLevel(level)
log.info("Log opened: %s UTC" % datetime.ctime(datetime.utcnow()))
return log
|
[
"def",
"create_logger",
"(",
"level",
"=",
"logging",
".",
"NOTSET",
")",
":",
"_test",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"'pretty_bad_protocol'",
")",
",",
"'test'",
")",
"_now",
"=",
"datetime",
".",
"now",
"(",
")",
".",
"strftime",
"(",
"\"%Y-%m-%d_%H%M%S\"",
")",
"_fn",
"=",
"os",
".",
"path",
".",
"join",
"(",
"_test",
",",
"\"%s_test_gnupg.log\"",
"%",
"_now",
")",
"_fmt",
"=",
"\"%(relativeCreated)-4d L%(lineno)-4d:%(funcName)-18.18s %(levelname)-7.7s %(message)s\"",
"## Add the GNUPG_STATUS_LEVEL LogRecord to all Loggers in the module:",
"logging",
".",
"addLevelName",
"(",
"GNUPG_STATUS_LEVEL",
",",
"\"GNUPG\"",
")",
"logging",
".",
"Logger",
".",
"status",
"=",
"status",
"if",
"level",
">",
"logging",
".",
"NOTSET",
":",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"level",
",",
"filename",
"=",
"_fn",
",",
"filemode",
"=",
"\"a\"",
",",
"format",
"=",
"_fmt",
")",
"logging",
".",
"logThreads",
"=",
"True",
"if",
"hasattr",
"(",
"logging",
",",
"'captureWarnings'",
")",
":",
"logging",
".",
"captureWarnings",
"(",
"True",
")",
"colouriser",
"=",
"_ansistrm",
".",
"ColorizingStreamHandler",
"colouriser",
".",
"level_map",
"[",
"9",
"]",
"=",
"(",
"None",
",",
"'blue'",
",",
"False",
")",
"colouriser",
".",
"level_map",
"[",
"10",
"]",
"=",
"(",
"None",
",",
"'cyan'",
",",
"False",
")",
"handler",
"=",
"colouriser",
"(",
"sys",
".",
"stderr",
")",
"handler",
".",
"setLevel",
"(",
"level",
")",
"formatr",
"=",
"logging",
".",
"Formatter",
"(",
"_fmt",
")",
"handler",
".",
"setFormatter",
"(",
"formatr",
")",
"else",
":",
"handler",
"=",
"NullHandler",
"(",
")",
"log",
"=",
"logging",
".",
"getLogger",
"(",
"'gnupg'",
")",
"log",
".",
"addHandler",
"(",
"handler",
")",
"log",
".",
"setLevel",
"(",
"level",
")",
"log",
".",
"info",
"(",
"\"Log opened: %s UTC\"",
"%",
"datetime",
".",
"ctime",
"(",
"datetime",
".",
"utcnow",
"(",
")",
")",
")",
"return",
"log"
] |
Create a logger for python-gnupg at a specific message level.
:type level: :obj:`int` or :obj:`str`
:param level: A string or an integer for the lowest level to include in
logs.
**Available levels:**
==== ======== ========================================
int str description
==== ======== ========================================
0 NOTSET Disable all logging.
9 GNUPG Log GnuPG's internal status messages.
10 DEBUG Log module level debuging messages.
20 INFO Normal user-level messages.
30 WARN Warning messages.
40 ERROR Error messages and tracebacks.
50 CRITICAL Unhandled exceptions and tracebacks.
==== ======== ========================================
|
[
"Create",
"a",
"logger",
"for",
"python",
"-",
"gnupg",
"at",
"a",
"specific",
"message",
"level",
"."
] |
python
|
train
|
Alignak-monitoring/alignak
|
alignak/objects/itemgroup.py
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/itemgroup.py#L144-L157
|
def add_members(self, members):
"""Add a new member to the members list
:param members: member name
:type members: str
:return: None
"""
if not isinstance(members, list):
members = [members]
if not getattr(self, 'members', None):
self.members = members
else:
self.members.extend(members)
|
[
"def",
"add_members",
"(",
"self",
",",
"members",
")",
":",
"if",
"not",
"isinstance",
"(",
"members",
",",
"list",
")",
":",
"members",
"=",
"[",
"members",
"]",
"if",
"not",
"getattr",
"(",
"self",
",",
"'members'",
",",
"None",
")",
":",
"self",
".",
"members",
"=",
"members",
"else",
":",
"self",
".",
"members",
".",
"extend",
"(",
"members",
")"
] |
Add a new member to the members list
:param members: member name
:type members: str
:return: None
|
[
"Add",
"a",
"new",
"member",
"to",
"the",
"members",
"list"
] |
python
|
train
|
watson-developer-cloud/python-sdk
|
ibm_watson/text_to_speech_v1.py
|
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/text_to_speech_v1.py#L180-L295
|
def synthesize(self,
text,
voice=None,
customization_id=None,
accept=None,
**kwargs):
"""
Synthesize audio.
Synthesizes text to audio that is spoken in the specified voice. The service bases
its understanding of the language for the input text on the specified voice. Use a
voice that matches the language of the input text.
The method accepts a maximum of 5 KB of input text in the body of the request, and
8 KB for the URL and headers. The 5 KB limit includes any SSML tags that you
specify. The service returns the synthesized audio stream as an array of bytes.
**See also:** [The HTTP
interface](https://cloud.ibm.com/docs/services/text-to-speech/http.html).
### Audio formats (accept types)
The service can return audio in the following formats (MIME types).
* Where indicated, you can optionally specify the sampling rate (`rate`) of the
audio. You must specify a sampling rate for the `audio/l16` and `audio/mulaw`
formats. A specified sampling rate must lie in the range of 8 kHz to 192 kHz.
* For the `audio/l16` format, you can optionally specify the endianness
(`endianness`) of the audio: `endianness=big-endian` or
`endianness=little-endian`.
Use the `Accept` header or the `accept` parameter to specify the requested format
of the response audio. If you omit an audio format altogether, the service returns
the audio in Ogg format with the Opus codec (`audio/ogg;codecs=opus`). The service
always returns single-channel audio.
* `audio/basic`
The service returns audio with a sampling rate of 8000 Hz.
* `audio/flac`
You can optionally specify the `rate` of the audio. The default sampling rate is
22,050 Hz.
* `audio/l16`
You must specify the `rate` of the audio. You can optionally specify the
`endianness` of the audio. The default endianness is `little-endian`.
* `audio/mp3`
You can optionally specify the `rate` of the audio. The default sampling rate is
22,050 Hz.
* `audio/mpeg`
You can optionally specify the `rate` of the audio. The default sampling rate is
22,050 Hz.
* `audio/mulaw`
You must specify the `rate` of the audio.
* `audio/ogg`
The service returns the audio in the `vorbis` codec. You can optionally specify
the `rate` of the audio. The default sampling rate is 22,050 Hz.
* `audio/ogg;codecs=opus`
You can optionally specify the `rate` of the audio. The default sampling rate is
22,050 Hz.
* `audio/ogg;codecs=vorbis`
You can optionally specify the `rate` of the audio. The default sampling rate is
22,050 Hz.
* `audio/wav`
You can optionally specify the `rate` of the audio. The default sampling rate is
22,050 Hz.
* `audio/webm`
The service returns the audio in the `opus` codec. The service returns audio
with a sampling rate of 48,000 Hz.
* `audio/webm;codecs=opus`
The service returns audio with a sampling rate of 48,000 Hz.
* `audio/webm;codecs=vorbis`
You can optionally specify the `rate` of the audio. The default sampling rate is
22,050 Hz.
For more information about specifying an audio format, including additional
details about some of the formats, see [Audio
formats](https://cloud.ibm.com/docs/services/text-to-speech/audio-formats.html).
### Warning messages
If a request includes invalid query parameters, the service returns a `Warnings`
response header that provides messages about the invalid parameters. The warning
includes a descriptive message and a list of invalid argument strings. For
example, a message such as `\"Unknown arguments:\"` or `\"Unknown url query
arguments:\"` followed by a list of the form `\"{invalid_arg_1},
{invalid_arg_2}.\"` The request succeeds despite the warnings.
:param str text: The text to synthesize.
:param str voice: The voice to use for synthesis.
:param str customization_id: The customization ID (GUID) of a custom voice model
to use for the synthesis. If a custom voice model is specified, it is guaranteed
to work only if it matches the language of the indicated voice. You must make the
request with service credentials created for the instance of the service that owns
the custom model. Omit the parameter to use the specified voice with no
customization.
:param str accept: The requested format (MIME type) of the audio. You can use the
`Accept` header or the `accept` parameter to specify the audio format. For more
information about specifying an audio format, see **Audio formats (accept types)**
in the method description.
Default: `audio/ogg;codecs=opus`.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if text is None:
raise ValueError('text must be provided')
headers = {'Accept': accept}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers('text_to_speech', 'V1', 'synthesize')
headers.update(sdk_headers)
params = {'voice': voice, 'customization_id': customization_id}
data = {'text': text}
url = '/v1/synthesize'
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
json=data,
accept_json=False)
return response
|
[
"def",
"synthesize",
"(",
"self",
",",
"text",
",",
"voice",
"=",
"None",
",",
"customization_id",
"=",
"None",
",",
"accept",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"text",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'text must be provided'",
")",
"headers",
"=",
"{",
"'Accept'",
":",
"accept",
"}",
"if",
"'headers'",
"in",
"kwargs",
":",
"headers",
".",
"update",
"(",
"kwargs",
".",
"get",
"(",
"'headers'",
")",
")",
"sdk_headers",
"=",
"get_sdk_headers",
"(",
"'text_to_speech'",
",",
"'V1'",
",",
"'synthesize'",
")",
"headers",
".",
"update",
"(",
"sdk_headers",
")",
"params",
"=",
"{",
"'voice'",
":",
"voice",
",",
"'customization_id'",
":",
"customization_id",
"}",
"data",
"=",
"{",
"'text'",
":",
"text",
"}",
"url",
"=",
"'/v1/synthesize'",
"response",
"=",
"self",
".",
"request",
"(",
"method",
"=",
"'POST'",
",",
"url",
"=",
"url",
",",
"headers",
"=",
"headers",
",",
"params",
"=",
"params",
",",
"json",
"=",
"data",
",",
"accept_json",
"=",
"False",
")",
"return",
"response"
] |
Synthesize audio.
Synthesizes text to audio that is spoken in the specified voice. The service bases
its understanding of the language for the input text on the specified voice. Use a
voice that matches the language of the input text.
The method accepts a maximum of 5 KB of input text in the body of the request, and
8 KB for the URL and headers. The 5 KB limit includes any SSML tags that you
specify. The service returns the synthesized audio stream as an array of bytes.
**See also:** [The HTTP
interface](https://cloud.ibm.com/docs/services/text-to-speech/http.html).
### Audio formats (accept types)
The service can return audio in the following formats (MIME types).
* Where indicated, you can optionally specify the sampling rate (`rate`) of the
audio. You must specify a sampling rate for the `audio/l16` and `audio/mulaw`
formats. A specified sampling rate must lie in the range of 8 kHz to 192 kHz.
* For the `audio/l16` format, you can optionally specify the endianness
(`endianness`) of the audio: `endianness=big-endian` or
`endianness=little-endian`.
Use the `Accept` header or the `accept` parameter to specify the requested format
of the response audio. If you omit an audio format altogether, the service returns
the audio in Ogg format with the Opus codec (`audio/ogg;codecs=opus`). The service
always returns single-channel audio.
* `audio/basic`
The service returns audio with a sampling rate of 8000 Hz.
* `audio/flac`
You can optionally specify the `rate` of the audio. The default sampling rate is
22,050 Hz.
* `audio/l16`
You must specify the `rate` of the audio. You can optionally specify the
`endianness` of the audio. The default endianness is `little-endian`.
* `audio/mp3`
You can optionally specify the `rate` of the audio. The default sampling rate is
22,050 Hz.
* `audio/mpeg`
You can optionally specify the `rate` of the audio. The default sampling rate is
22,050 Hz.
* `audio/mulaw`
You must specify the `rate` of the audio.
* `audio/ogg`
The service returns the audio in the `vorbis` codec. You can optionally specify
the `rate` of the audio. The default sampling rate is 22,050 Hz.
* `audio/ogg;codecs=opus`
You can optionally specify the `rate` of the audio. The default sampling rate is
22,050 Hz.
* `audio/ogg;codecs=vorbis`
You can optionally specify the `rate` of the audio. The default sampling rate is
22,050 Hz.
* `audio/wav`
You can optionally specify the `rate` of the audio. The default sampling rate is
22,050 Hz.
* `audio/webm`
The service returns the audio in the `opus` codec. The service returns audio
with a sampling rate of 48,000 Hz.
* `audio/webm;codecs=opus`
The service returns audio with a sampling rate of 48,000 Hz.
* `audio/webm;codecs=vorbis`
You can optionally specify the `rate` of the audio. The default sampling rate is
22,050 Hz.
For more information about specifying an audio format, including additional
details about some of the formats, see [Audio
formats](https://cloud.ibm.com/docs/services/text-to-speech/audio-formats.html).
### Warning messages
If a request includes invalid query parameters, the service returns a `Warnings`
response header that provides messages about the invalid parameters. The warning
includes a descriptive message and a list of invalid argument strings. For
example, a message such as `\"Unknown arguments:\"` or `\"Unknown url query
arguments:\"` followed by a list of the form `\"{invalid_arg_1},
{invalid_arg_2}.\"` The request succeeds despite the warnings.
:param str text: The text to synthesize.
:param str voice: The voice to use for synthesis.
:param str customization_id: The customization ID (GUID) of a custom voice model
to use for the synthesis. If a custom voice model is specified, it is guaranteed
to work only if it matches the language of the indicated voice. You must make the
request with service credentials created for the instance of the service that owns
the custom model. Omit the parameter to use the specified voice with no
customization.
:param str accept: The requested format (MIME type) of the audio. You can use the
`Accept` header or the `accept` parameter to specify the audio format. For more
information about specifying an audio format, see **Audio formats (accept types)**
in the method description.
Default: `audio/ogg;codecs=opus`.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
|
[
"Synthesize",
"audio",
"."
] |
python
|
train
|
isislovecruft/python-gnupg
|
pretty_bad_protocol/_parsers.py
|
https://github.com/isislovecruft/python-gnupg/blob/784571449032e811587249743e183fc5e908a673/pretty_bad_protocol/_parsers.py#L49-L75
|
def _check_keyserver(location):
"""Check that a given keyserver is a known protocol and does not contain
shell escape characters.
:param str location: A string containing the default keyserver. This
should contain the desired keyserver protocol which
is supported by the keyserver, for example, the
default is ``'hkp://wwwkeys .pgp.net'``.
:rtype: :obj:`str` or :obj:`None`
:returns: A string specifying the protocol and keyserver hostname, if the
checks passed. If not, returns None.
"""
protocols = ['hkp://', 'hkps://', 'http://', 'https://', 'ldap://',
'mailto:'] ## xxx feels like i´m forgetting one...
for proto in protocols:
if location.startswith(proto):
url = location.replace(proto, str())
host, slash, extra = url.partition('/')
if extra: log.warn("URI text for %s: '%s'" % (host, extra))
log.debug("Got host string for keyserver setting: '%s'" % host)
host = _fix_unsafe(host)
if host:
log.debug("Cleaned host string: '%s'" % host)
keyserver = proto + host
return keyserver
return None
|
[
"def",
"_check_keyserver",
"(",
"location",
")",
":",
"protocols",
"=",
"[",
"'hkp://'",
",",
"'hkps://'",
",",
"'http://'",
",",
"'https://'",
",",
"'ldap://'",
",",
"'mailto:'",
"]",
"## xxx feels like i´m forgetting one...",
"for",
"proto",
"in",
"protocols",
":",
"if",
"location",
".",
"startswith",
"(",
"proto",
")",
":",
"url",
"=",
"location",
".",
"replace",
"(",
"proto",
",",
"str",
"(",
")",
")",
"host",
",",
"slash",
",",
"extra",
"=",
"url",
".",
"partition",
"(",
"'/'",
")",
"if",
"extra",
":",
"log",
".",
"warn",
"(",
"\"URI text for %s: '%s'\"",
"%",
"(",
"host",
",",
"extra",
")",
")",
"log",
".",
"debug",
"(",
"\"Got host string for keyserver setting: '%s'\"",
"%",
"host",
")",
"host",
"=",
"_fix_unsafe",
"(",
"host",
")",
"if",
"host",
":",
"log",
".",
"debug",
"(",
"\"Cleaned host string: '%s'\"",
"%",
"host",
")",
"keyserver",
"=",
"proto",
"+",
"host",
"return",
"keyserver",
"return",
"None"
] |
Check that a given keyserver is a known protocol and does not contain
shell escape characters.
:param str location: A string containing the default keyserver. This
should contain the desired keyserver protocol which
is supported by the keyserver, for example, the
default is ``'hkp://wwwkeys .pgp.net'``.
:rtype: :obj:`str` or :obj:`None`
:returns: A string specifying the protocol and keyserver hostname, if the
checks passed. If not, returns None.
|
[
"Check",
"that",
"a",
"given",
"keyserver",
"is",
"a",
"known",
"protocol",
"and",
"does",
"not",
"contain",
"shell",
"escape",
"characters",
"."
] |
python
|
train
|
ArduPilot/MAVProxy
|
MAVProxy/modules/mavproxy_relay.py
|
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_relay.py#L15-L37
|
def cmd_relay(self, args):
'''set relays'''
if len(args) == 0 or args[0] not in ['set', 'repeat']:
print("Usage: relay <set|repeat>")
return
if args[0] == "set":
if len(args) < 3:
print("Usage: relay set <RELAY_NUM> <0|1>")
return
self.master.mav.command_long_send(self.target_system,
self.target_component,
mavutil.mavlink.MAV_CMD_DO_SET_RELAY, 0,
int(args[1]), int(args[2]),
0, 0, 0, 0, 0)
if args[0] == "repeat":
if len(args) < 4:
print("Usage: relay repeat <RELAY_NUM> <COUNT> <PERIOD>")
return
self.master.mav.command_long_send(self.target_system,
self.target_component,
mavutil.mavlink.MAV_CMD_DO_REPEAT_RELAY, 0,
int(args[1]), int(args[2]), float(args[3]),
0, 0, 0, 0)
|
[
"def",
"cmd_relay",
"(",
"self",
",",
"args",
")",
":",
"if",
"len",
"(",
"args",
")",
"==",
"0",
"or",
"args",
"[",
"0",
"]",
"not",
"in",
"[",
"'set'",
",",
"'repeat'",
"]",
":",
"print",
"(",
"\"Usage: relay <set|repeat>\"",
")",
"return",
"if",
"args",
"[",
"0",
"]",
"==",
"\"set\"",
":",
"if",
"len",
"(",
"args",
")",
"<",
"3",
":",
"print",
"(",
"\"Usage: relay set <RELAY_NUM> <0|1>\"",
")",
"return",
"self",
".",
"master",
".",
"mav",
".",
"command_long_send",
"(",
"self",
".",
"target_system",
",",
"self",
".",
"target_component",
",",
"mavutil",
".",
"mavlink",
".",
"MAV_CMD_DO_SET_RELAY",
",",
"0",
",",
"int",
"(",
"args",
"[",
"1",
"]",
")",
",",
"int",
"(",
"args",
"[",
"2",
"]",
")",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
")",
"if",
"args",
"[",
"0",
"]",
"==",
"\"repeat\"",
":",
"if",
"len",
"(",
"args",
")",
"<",
"4",
":",
"print",
"(",
"\"Usage: relay repeat <RELAY_NUM> <COUNT> <PERIOD>\"",
")",
"return",
"self",
".",
"master",
".",
"mav",
".",
"command_long_send",
"(",
"self",
".",
"target_system",
",",
"self",
".",
"target_component",
",",
"mavutil",
".",
"mavlink",
".",
"MAV_CMD_DO_REPEAT_RELAY",
",",
"0",
",",
"int",
"(",
"args",
"[",
"1",
"]",
")",
",",
"int",
"(",
"args",
"[",
"2",
"]",
")",
",",
"float",
"(",
"args",
"[",
"3",
"]",
")",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
")"
] |
set relays
|
[
"set",
"relays"
] |
python
|
train
|
orbingol/NURBS-Python
|
geomdl/visualization/VisMPL.py
|
https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/visualization/VisMPL.py#L298-L402
|
def animate(self, **kwargs):
""" Animates the surface.
This function only animates the triangulated surface. There will be no other elements, such as control points
grid or bounding box.
Keyword arguments:
* ``colormap``: applies colormap to the surface
Colormaps are a visualization feature of Matplotlib. They can be used for several types of surface plots via
the following import statement: ``from matplotlib import cm``
The following link displays the list of Matplolib colormaps and some examples on colormaps:
https://matplotlib.org/tutorials/colors/colormaps.html
"""
# Calling parent render function
super(VisSurface, self).render(**kwargs)
# Colormaps
surf_cmaps = kwargs.get('colormap', None)
# Initialize variables
tri_idxs = []
vert_coords = []
trisurf_params = []
frames = []
frames_tris = []
num_vertices = 0
# Start plotting of the surface and the control points grid
fig = plt.figure(figsize=self.vconf.figure_size, dpi=self.vconf.figure_dpi)
ax = Axes3D(fig)
# Start plotting
surf_count = 0
for plot in self._plots:
# Plot evaluated points
if plot['type'] == 'evalpts' and self.vconf.display_evalpts:
# Use internal triangulation algorithm instead of Qhull (MPL default)
verts = plot['ptsarr'][0]
tris = plot['ptsarr'][1]
# Extract zero-indexed vertex number list
tri_idxs += [[ti + num_vertices for ti in tri.data] for tri in tris]
# Extract vertex coordinates
vert_coords += [vert.data for vert in verts]
# Update number of vertices
num_vertices = len(vert_coords)
# Determine the color or the colormap of the triangulated plot
params = {}
if surf_cmaps:
try:
params['cmap'] = surf_cmaps[surf_count]
surf_count += 1
except IndexError:
params['color'] = plot['color']
else:
params['color'] = plot['color']
trisurf_params += [params for _ in range(len(tris))]
# Pre-processing for the animation
pts = np.array(vert_coords, dtype=self.vconf.dtype)
# Create the frames (Artists)
for tidx, pidx in zip(tri_idxs, trisurf_params):
frames_tris.append(tidx)
# Create MPL Triangulation object
triangulation = mpltri.Triangulation(pts[:, 0], pts[:, 1], triangles=frames_tris)
# Use custom Triangulation object and the choice of color/colormap to plot the surface
p3df = ax.plot_trisurf(triangulation, pts[:, 2], alpha=self.vconf.alpha, **pidx)
# Add to frames list
frames.append([p3df])
# Create MPL ArtistAnimation
ani = animation.ArtistAnimation(fig, frames, interval=100, blit=True, repeat_delay=1000)
# Remove axes
if not self.vconf.display_axes:
plt.axis('off')
# Set axes equal
if self.vconf.axes_equal:
self.vconf.set_axes_equal(ax)
# Axis labels
if self.vconf.display_labels:
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
# Process keyword arguments
fig_filename = kwargs.get('fig_save_as', None)
fig_display = kwargs.get('display_plot', True)
# Display the plot
if fig_display:
plt.show()
else:
fig_filename = self.vconf.figure_image_filename if fig_filename is None else fig_filename
# Save the figure
self.vconf.save_figure_as(fig, fig_filename)
# Return the figure object
return fig
|
[
"def",
"animate",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"# Calling parent render function",
"super",
"(",
"VisSurface",
",",
"self",
")",
".",
"render",
"(",
"*",
"*",
"kwargs",
")",
"# Colormaps",
"surf_cmaps",
"=",
"kwargs",
".",
"get",
"(",
"'colormap'",
",",
"None",
")",
"# Initialize variables",
"tri_idxs",
"=",
"[",
"]",
"vert_coords",
"=",
"[",
"]",
"trisurf_params",
"=",
"[",
"]",
"frames",
"=",
"[",
"]",
"frames_tris",
"=",
"[",
"]",
"num_vertices",
"=",
"0",
"# Start plotting of the surface and the control points grid",
"fig",
"=",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"self",
".",
"vconf",
".",
"figure_size",
",",
"dpi",
"=",
"self",
".",
"vconf",
".",
"figure_dpi",
")",
"ax",
"=",
"Axes3D",
"(",
"fig",
")",
"# Start plotting",
"surf_count",
"=",
"0",
"for",
"plot",
"in",
"self",
".",
"_plots",
":",
"# Plot evaluated points",
"if",
"plot",
"[",
"'type'",
"]",
"==",
"'evalpts'",
"and",
"self",
".",
"vconf",
".",
"display_evalpts",
":",
"# Use internal triangulation algorithm instead of Qhull (MPL default)",
"verts",
"=",
"plot",
"[",
"'ptsarr'",
"]",
"[",
"0",
"]",
"tris",
"=",
"plot",
"[",
"'ptsarr'",
"]",
"[",
"1",
"]",
"# Extract zero-indexed vertex number list",
"tri_idxs",
"+=",
"[",
"[",
"ti",
"+",
"num_vertices",
"for",
"ti",
"in",
"tri",
".",
"data",
"]",
"for",
"tri",
"in",
"tris",
"]",
"# Extract vertex coordinates",
"vert_coords",
"+=",
"[",
"vert",
".",
"data",
"for",
"vert",
"in",
"verts",
"]",
"# Update number of vertices",
"num_vertices",
"=",
"len",
"(",
"vert_coords",
")",
"# Determine the color or the colormap of the triangulated plot",
"params",
"=",
"{",
"}",
"if",
"surf_cmaps",
":",
"try",
":",
"params",
"[",
"'cmap'",
"]",
"=",
"surf_cmaps",
"[",
"surf_count",
"]",
"surf_count",
"+=",
"1",
"except",
"IndexError",
":",
"params",
"[",
"'color'",
"]",
"=",
"plot",
"[",
"'color'",
"]",
"else",
":",
"params",
"[",
"'color'",
"]",
"=",
"plot",
"[",
"'color'",
"]",
"trisurf_params",
"+=",
"[",
"params",
"for",
"_",
"in",
"range",
"(",
"len",
"(",
"tris",
")",
")",
"]",
"# Pre-processing for the animation",
"pts",
"=",
"np",
".",
"array",
"(",
"vert_coords",
",",
"dtype",
"=",
"self",
".",
"vconf",
".",
"dtype",
")",
"# Create the frames (Artists)",
"for",
"tidx",
",",
"pidx",
"in",
"zip",
"(",
"tri_idxs",
",",
"trisurf_params",
")",
":",
"frames_tris",
".",
"append",
"(",
"tidx",
")",
"# Create MPL Triangulation object",
"triangulation",
"=",
"mpltri",
".",
"Triangulation",
"(",
"pts",
"[",
":",
",",
"0",
"]",
",",
"pts",
"[",
":",
",",
"1",
"]",
",",
"triangles",
"=",
"frames_tris",
")",
"# Use custom Triangulation object and the choice of color/colormap to plot the surface",
"p3df",
"=",
"ax",
".",
"plot_trisurf",
"(",
"triangulation",
",",
"pts",
"[",
":",
",",
"2",
"]",
",",
"alpha",
"=",
"self",
".",
"vconf",
".",
"alpha",
",",
"*",
"*",
"pidx",
")",
"# Add to frames list",
"frames",
".",
"append",
"(",
"[",
"p3df",
"]",
")",
"# Create MPL ArtistAnimation",
"ani",
"=",
"animation",
".",
"ArtistAnimation",
"(",
"fig",
",",
"frames",
",",
"interval",
"=",
"100",
",",
"blit",
"=",
"True",
",",
"repeat_delay",
"=",
"1000",
")",
"# Remove axes",
"if",
"not",
"self",
".",
"vconf",
".",
"display_axes",
":",
"plt",
".",
"axis",
"(",
"'off'",
")",
"# Set axes equal",
"if",
"self",
".",
"vconf",
".",
"axes_equal",
":",
"self",
".",
"vconf",
".",
"set_axes_equal",
"(",
"ax",
")",
"# Axis labels",
"if",
"self",
".",
"vconf",
".",
"display_labels",
":",
"ax",
".",
"set_xlabel",
"(",
"'x'",
")",
"ax",
".",
"set_ylabel",
"(",
"'y'",
")",
"ax",
".",
"set_zlabel",
"(",
"'z'",
")",
"# Process keyword arguments",
"fig_filename",
"=",
"kwargs",
".",
"get",
"(",
"'fig_save_as'",
",",
"None",
")",
"fig_display",
"=",
"kwargs",
".",
"get",
"(",
"'display_plot'",
",",
"True",
")",
"# Display the plot",
"if",
"fig_display",
":",
"plt",
".",
"show",
"(",
")",
"else",
":",
"fig_filename",
"=",
"self",
".",
"vconf",
".",
"figure_image_filename",
"if",
"fig_filename",
"is",
"None",
"else",
"fig_filename",
"# Save the figure",
"self",
".",
"vconf",
".",
"save_figure_as",
"(",
"fig",
",",
"fig_filename",
")",
"# Return the figure object",
"return",
"fig"
] |
Animates the surface.
This function only animates the triangulated surface. There will be no other elements, such as control points
grid or bounding box.
Keyword arguments:
* ``colormap``: applies colormap to the surface
Colormaps are a visualization feature of Matplotlib. They can be used for several types of surface plots via
the following import statement: ``from matplotlib import cm``
The following link displays the list of Matplolib colormaps and some examples on colormaps:
https://matplotlib.org/tutorials/colors/colormaps.html
|
[
"Animates",
"the",
"surface",
"."
] |
python
|
train
|
paylogic/pip-accel
|
pip_accel/__init__.py
|
https://github.com/paylogic/pip-accel/blob/ccad1b784927a322d996db593403b1d2d2e22666/pip_accel/__init__.py#L618-L627
|
def cleanup_temporary_directories(self):
"""Delete the build directories and any temporary directories created by pip."""
while self.build_directories:
shutil.rmtree(self.build_directories.pop())
for requirement in self.reported_requirements:
requirement.remove_temporary_source()
while self.eggs_links:
symbolic_link = self.eggs_links.pop()
if os.path.islink(symbolic_link):
os.unlink(symbolic_link)
|
[
"def",
"cleanup_temporary_directories",
"(",
"self",
")",
":",
"while",
"self",
".",
"build_directories",
":",
"shutil",
".",
"rmtree",
"(",
"self",
".",
"build_directories",
".",
"pop",
"(",
")",
")",
"for",
"requirement",
"in",
"self",
".",
"reported_requirements",
":",
"requirement",
".",
"remove_temporary_source",
"(",
")",
"while",
"self",
".",
"eggs_links",
":",
"symbolic_link",
"=",
"self",
".",
"eggs_links",
".",
"pop",
"(",
")",
"if",
"os",
".",
"path",
".",
"islink",
"(",
"symbolic_link",
")",
":",
"os",
".",
"unlink",
"(",
"symbolic_link",
")"
] |
Delete the build directories and any temporary directories created by pip.
|
[
"Delete",
"the",
"build",
"directories",
"and",
"any",
"temporary",
"directories",
"created",
"by",
"pip",
"."
] |
python
|
train
|
Accelize/pycosio
|
pycosio/_core/io_base_raw.py
|
https://github.com/Accelize/pycosio/blob/1cc1f8fdf5394d92918b7bae2bfa682169ccc48c/pycosio/_core/io_base_raw.py#L401-L428
|
def write(self, b):
"""
Write the given bytes-like object, b, to the underlying raw stream,
and return the number of bytes written.
Args:
b (bytes-like object): Bytes to write.
Returns:
int: The number of bytes written.
"""
if not self._writable:
raise UnsupportedOperation('write')
# This function write data in a buffer
# "flush()" need to be called to really write content on
# Cloud Storage
size = len(b)
with self._seek_lock:
start = self._seek
end = start + size
self._seek = end
buffer = self._write_buffer
if end <= len(buffer):
buffer = memoryview(buffer)
buffer[start:end] = b
return size
|
[
"def",
"write",
"(",
"self",
",",
"b",
")",
":",
"if",
"not",
"self",
".",
"_writable",
":",
"raise",
"UnsupportedOperation",
"(",
"'write'",
")",
"# This function write data in a buffer",
"# \"flush()\" need to be called to really write content on",
"# Cloud Storage",
"size",
"=",
"len",
"(",
"b",
")",
"with",
"self",
".",
"_seek_lock",
":",
"start",
"=",
"self",
".",
"_seek",
"end",
"=",
"start",
"+",
"size",
"self",
".",
"_seek",
"=",
"end",
"buffer",
"=",
"self",
".",
"_write_buffer",
"if",
"end",
"<=",
"len",
"(",
"buffer",
")",
":",
"buffer",
"=",
"memoryview",
"(",
"buffer",
")",
"buffer",
"[",
"start",
":",
"end",
"]",
"=",
"b",
"return",
"size"
] |
Write the given bytes-like object, b, to the underlying raw stream,
and return the number of bytes written.
Args:
b (bytes-like object): Bytes to write.
Returns:
int: The number of bytes written.
|
[
"Write",
"the",
"given",
"bytes",
"-",
"like",
"object",
"b",
"to",
"the",
"underlying",
"raw",
"stream",
"and",
"return",
"the",
"number",
"of",
"bytes",
"written",
"."
] |
python
|
train
|
brutus/wtforms-html5
|
wtforms_html5.py
|
https://github.com/brutus/wtforms-html5/blob/a00ab7c68e6238bfa317f40ec3de807dae8ed85e/wtforms_html5.py#L208-L254
|
def get_html5_kwargs(field, render_kw=None, force=False):
"""
Returns a copy of *render_kw* with keys added for a bound *field*.
If some *render_kw* are given, the new keys are added to a copy of them,
which is then returned. If none are given, a dictionary containing only
the automatically generated keys is returned.
.. important::
This might add new keys but won't changes any values if a key is
already in *render_kw*, unless *force* is used.
Raises:
ValueError: if *field* is an :cls:`UnboundField`.
The following keys are set automatically:
:required:
Sets the *required* key if the `required` flag is set for the
field (this is mostly the case if it is set by validators). The
`required` attribute is used by browsers to indicate a required field.
:invalid:
Set (or appends) 'invalid' to the fields CSS class(es), if the *field*
got any errors. 'invalid' is also set by browsers if they detect
errors on a field.
:min / max:
Sets *min* and / or *max* keys if a `Length` or `NumberRange`
validator is using them.
:title:
If the field got a *description* but no *title* key is set, the
*title* is set to *description*.
"""
if isinstance(field, UnboundField):
msg = 'This function needs a bound field not: {}'
raise ValueError(msg.format(field))
kwargs = render_kw.copy() if render_kw else {}
kwargs = set_required(field, kwargs, force) # is field required?
kwargs = set_invalid(field, kwargs) # is field invalid?
kwargs = set_minmax(field, kwargs, force) # check validators for min/max
kwargs = set_title(field, kwargs) # missing tile?
return kwargs
|
[
"def",
"get_html5_kwargs",
"(",
"field",
",",
"render_kw",
"=",
"None",
",",
"force",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"field",
",",
"UnboundField",
")",
":",
"msg",
"=",
"'This function needs a bound field not: {}'",
"raise",
"ValueError",
"(",
"msg",
".",
"format",
"(",
"field",
")",
")",
"kwargs",
"=",
"render_kw",
".",
"copy",
"(",
")",
"if",
"render_kw",
"else",
"{",
"}",
"kwargs",
"=",
"set_required",
"(",
"field",
",",
"kwargs",
",",
"force",
")",
"# is field required?",
"kwargs",
"=",
"set_invalid",
"(",
"field",
",",
"kwargs",
")",
"# is field invalid?",
"kwargs",
"=",
"set_minmax",
"(",
"field",
",",
"kwargs",
",",
"force",
")",
"# check validators for min/max",
"kwargs",
"=",
"set_title",
"(",
"field",
",",
"kwargs",
")",
"# missing tile?",
"return",
"kwargs"
] |
Returns a copy of *render_kw* with keys added for a bound *field*.
If some *render_kw* are given, the new keys are added to a copy of them,
which is then returned. If none are given, a dictionary containing only
the automatically generated keys is returned.
.. important::
This might add new keys but won't changes any values if a key is
already in *render_kw*, unless *force* is used.
Raises:
ValueError: if *field* is an :cls:`UnboundField`.
The following keys are set automatically:
:required:
Sets the *required* key if the `required` flag is set for the
field (this is mostly the case if it is set by validators). The
`required` attribute is used by browsers to indicate a required field.
:invalid:
Set (or appends) 'invalid' to the fields CSS class(es), if the *field*
got any errors. 'invalid' is also set by browsers if they detect
errors on a field.
:min / max:
Sets *min* and / or *max* keys if a `Length` or `NumberRange`
validator is using them.
:title:
If the field got a *description* but no *title* key is set, the
*title* is set to *description*.
|
[
"Returns",
"a",
"copy",
"of",
"*",
"render_kw",
"*",
"with",
"keys",
"added",
"for",
"a",
"bound",
"*",
"field",
"*",
"."
] |
python
|
train
|
EUDAT-B2SAFE/B2HANDLE
|
b2handle/handleclient.py
|
https://github.com/EUDAT-B2SAFE/B2HANDLE/blob/a6d216d459644e01fbdfd5b318a535950bc5cdbb/b2handle/handleclient.py#L731-L781
|
def add_additional_URL(self, handle, *urls, **attributes):
'''
Add a URL entry to the handle record's 10320/LOC entry. If 10320/LOC
does not exist yet, it is created. If the 10320/LOC entry already
contains the URL, it is not added a second time.
:param handle: The handle to add the URL to.
:param urls: The URL(s) to be added. Several URLs may be specified.
:param attributes: Optional. Additional key-value pairs to set as
attributes to the <location> elements, e.g. weight, http_role or
custom attributes. Note: If the URL already exists but the
attributes are different, they are updated!
:raises: :exc:`~b2handle.handleexceptions.HandleNotFoundException`
:raises: :exc:`~b2handle.handleexceptions.HandleSyntaxError`
:raises: :exc:`~b2handle.handleexceptions.HandleAuthenticationError`
'''
LOGGER.debug('add_additional_URL...')
handlerecord_json = self.retrieve_handle_record_json(handle)
if handlerecord_json is None:
msg = 'Cannot add URLS to unexisting handle!'
raise HandleNotFoundException(handle=handle, msg=msg)
list_of_entries = handlerecord_json['values']
is_new = False
for url in urls:
if not self.is_URL_contained_in_10320LOC(handle, url, handlerecord_json):
is_new = True
if not is_new:
LOGGER.debug("add_additional_URL: No new URL to be added (so no URL is added at all).")
else:
for url in urls:
self.__add_URL_to_10320LOC(url, list_of_entries, handle)
op = 'adding URLs'
resp, put_payload = self.__send_handle_put_request(handle, list_of_entries, overwrite=True, op=op)
# TODO FIXME (one day) Overwrite by index.
if hsresponses.handle_success(resp):
pass
else:
msg = 'Could not add URLs ' + str(urls)
raise GenericHandleError(
operation=op,
handle=handle,
reponse=resp,
msg=msg,
payload=put_payload
)
|
[
"def",
"add_additional_URL",
"(",
"self",
",",
"handle",
",",
"*",
"urls",
",",
"*",
"*",
"attributes",
")",
":",
"LOGGER",
".",
"debug",
"(",
"'add_additional_URL...'",
")",
"handlerecord_json",
"=",
"self",
".",
"retrieve_handle_record_json",
"(",
"handle",
")",
"if",
"handlerecord_json",
"is",
"None",
":",
"msg",
"=",
"'Cannot add URLS to unexisting handle!'",
"raise",
"HandleNotFoundException",
"(",
"handle",
"=",
"handle",
",",
"msg",
"=",
"msg",
")",
"list_of_entries",
"=",
"handlerecord_json",
"[",
"'values'",
"]",
"is_new",
"=",
"False",
"for",
"url",
"in",
"urls",
":",
"if",
"not",
"self",
".",
"is_URL_contained_in_10320LOC",
"(",
"handle",
",",
"url",
",",
"handlerecord_json",
")",
":",
"is_new",
"=",
"True",
"if",
"not",
"is_new",
":",
"LOGGER",
".",
"debug",
"(",
"\"add_additional_URL: No new URL to be added (so no URL is added at all).\"",
")",
"else",
":",
"for",
"url",
"in",
"urls",
":",
"self",
".",
"__add_URL_to_10320LOC",
"(",
"url",
",",
"list_of_entries",
",",
"handle",
")",
"op",
"=",
"'adding URLs'",
"resp",
",",
"put_payload",
"=",
"self",
".",
"__send_handle_put_request",
"(",
"handle",
",",
"list_of_entries",
",",
"overwrite",
"=",
"True",
",",
"op",
"=",
"op",
")",
"# TODO FIXME (one day) Overwrite by index.",
"if",
"hsresponses",
".",
"handle_success",
"(",
"resp",
")",
":",
"pass",
"else",
":",
"msg",
"=",
"'Could not add URLs '",
"+",
"str",
"(",
"urls",
")",
"raise",
"GenericHandleError",
"(",
"operation",
"=",
"op",
",",
"handle",
"=",
"handle",
",",
"reponse",
"=",
"resp",
",",
"msg",
"=",
"msg",
",",
"payload",
"=",
"put_payload",
")"
] |
Add a URL entry to the handle record's 10320/LOC entry. If 10320/LOC
does not exist yet, it is created. If the 10320/LOC entry already
contains the URL, it is not added a second time.
:param handle: The handle to add the URL to.
:param urls: The URL(s) to be added. Several URLs may be specified.
:param attributes: Optional. Additional key-value pairs to set as
attributes to the <location> elements, e.g. weight, http_role or
custom attributes. Note: If the URL already exists but the
attributes are different, they are updated!
:raises: :exc:`~b2handle.handleexceptions.HandleNotFoundException`
:raises: :exc:`~b2handle.handleexceptions.HandleSyntaxError`
:raises: :exc:`~b2handle.handleexceptions.HandleAuthenticationError`
|
[
"Add",
"a",
"URL",
"entry",
"to",
"the",
"handle",
"record",
"s",
"10320",
"/",
"LOC",
"entry",
".",
"If",
"10320",
"/",
"LOC",
"does",
"not",
"exist",
"yet",
"it",
"is",
"created",
".",
"If",
"the",
"10320",
"/",
"LOC",
"entry",
"already",
"contains",
"the",
"URL",
"it",
"is",
"not",
"added",
"a",
"second",
"time",
"."
] |
python
|
train
|
Opentrons/opentrons
|
api/src/opentrons/protocol_api/contexts.py
|
https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/protocol_api/contexts.py#L367-L374
|
def loaded_instruments(self) -> Dict[str, Optional['InstrumentContext']]:
""" Get the instruments that have been loaded into the protocol.
:returns: A dict mapping mount names in lowercase to the instrument
in that mount, or `None` if no instrument is present.
"""
return {mount.name.lower(): instr for mount, instr
in self._instruments.items()}
|
[
"def",
"loaded_instruments",
"(",
"self",
")",
"->",
"Dict",
"[",
"str",
",",
"Optional",
"[",
"'InstrumentContext'",
"]",
"]",
":",
"return",
"{",
"mount",
".",
"name",
".",
"lower",
"(",
")",
":",
"instr",
"for",
"mount",
",",
"instr",
"in",
"self",
".",
"_instruments",
".",
"items",
"(",
")",
"}"
] |
Get the instruments that have been loaded into the protocol.
:returns: A dict mapping mount names in lowercase to the instrument
in that mount, or `None` if no instrument is present.
|
[
"Get",
"the",
"instruments",
"that",
"have",
"been",
"loaded",
"into",
"the",
"protocol",
"."
] |
python
|
train
|
ewels/MultiQC
|
multiqc/modules/mirtrace/mirtrace.py
|
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/mirtrace/mirtrace.py#L241-L271
|
def mirtrace_length_plot(self):
""" Generate the miRTrace Read Length Distribution"""
data = dict()
for s_name in self.length_data:
try:
data[s_name] = {int(d): int(self.length_data[s_name][d]) for d in self.length_data[s_name]}
except KeyError:
pass
if len(data) == 0:
log.debug('No valid data for read length distribution')
return None
config = {
'id': 'mirtrace_length_plot',
'title': 'miRTrace: Read Length Distribution',
'ylab': 'Read Count',
'xlab': 'Read Lenth (bp)',
'ymin': 0,
'xmin': 0,
'xDecimals': False,
'tt_label': '<b>Read Length (bp) {point.x}</b>: {point.y} Read Count',
'xPlotBands': [
{'from': 40, 'to': 50, 'color': '#ffebd1'},
{'from': 26, 'to': 40, 'color': '#e2f5ff'},
{'from': 18, 'to': 26, 'color': '#e5fce0'},
{'from': 0, 'to': 18, 'color': '#ffffe2'},
]
}
return linegraph.plot(data, config)
|
[
"def",
"mirtrace_length_plot",
"(",
"self",
")",
":",
"data",
"=",
"dict",
"(",
")",
"for",
"s_name",
"in",
"self",
".",
"length_data",
":",
"try",
":",
"data",
"[",
"s_name",
"]",
"=",
"{",
"int",
"(",
"d",
")",
":",
"int",
"(",
"self",
".",
"length_data",
"[",
"s_name",
"]",
"[",
"d",
"]",
")",
"for",
"d",
"in",
"self",
".",
"length_data",
"[",
"s_name",
"]",
"}",
"except",
"KeyError",
":",
"pass",
"if",
"len",
"(",
"data",
")",
"==",
"0",
":",
"log",
".",
"debug",
"(",
"'No valid data for read length distribution'",
")",
"return",
"None",
"config",
"=",
"{",
"'id'",
":",
"'mirtrace_length_plot'",
",",
"'title'",
":",
"'miRTrace: Read Length Distribution'",
",",
"'ylab'",
":",
"'Read Count'",
",",
"'xlab'",
":",
"'Read Lenth (bp)'",
",",
"'ymin'",
":",
"0",
",",
"'xmin'",
":",
"0",
",",
"'xDecimals'",
":",
"False",
",",
"'tt_label'",
":",
"'<b>Read Length (bp) {point.x}</b>: {point.y} Read Count'",
",",
"'xPlotBands'",
":",
"[",
"{",
"'from'",
":",
"40",
",",
"'to'",
":",
"50",
",",
"'color'",
":",
"'#ffebd1'",
"}",
",",
"{",
"'from'",
":",
"26",
",",
"'to'",
":",
"40",
",",
"'color'",
":",
"'#e2f5ff'",
"}",
",",
"{",
"'from'",
":",
"18",
",",
"'to'",
":",
"26",
",",
"'color'",
":",
"'#e5fce0'",
"}",
",",
"{",
"'from'",
":",
"0",
",",
"'to'",
":",
"18",
",",
"'color'",
":",
"'#ffffe2'",
"}",
",",
"]",
"}",
"return",
"linegraph",
".",
"plot",
"(",
"data",
",",
"config",
")"
] |
Generate the miRTrace Read Length Distribution
|
[
"Generate",
"the",
"miRTrace",
"Read",
"Length",
"Distribution"
] |
python
|
train
|
reingart/pyafipws
|
wsltv.py
|
https://github.com/reingart/pyafipws/blob/ee87cfe4ac12285ab431df5fec257f103042d1ab/wsltv.py#L314-L409
|
def AnalizarLiquidacion(self, liq):
"Método interno para analizar la respuesta de AFIP"
# proceso los datos básicos de la liquidación (devuelto por consultar):
if liq:
cab = liq['cabecera']
self.CAE = str(cab['cae'])
self.FechaLiquidacion = cab['fechaLiquidacion']
self.NroComprobante = cab['nroComprobante']
tot = liq['totalesOperacion']
self.AlicuotaIVA = tot['alicuotaIVA']
self.ImporteNeto = tot['importeNeto']
self.ImporteIVA = tot['importeIVA']
self.Subtotal = tot['subtotal']
self.TotalRetenciones = tot['totalRetenciones']
self.TotalTributos = tot['totalTributos']
self.Total = tot['total']
# parámetros de salida:
self.params_out = dict(
tipo_cbte=liq['cabecera']['tipoComprobante'],
pto_vta=liq['cabecera']['puntoVenta'],
nro_cbte=liq['cabecera']['nroComprobante'],
fecha=liq['cabecera']['fechaLiquidacion'],
cod_deposito_acopio=liq['cabecera']['codDepositoAcopio'],
cae=str(liq['cabecera']['cae']),
domicilio_punto_venta=liq['cabecera']['domicilioPuntoVenta'],
domicilio_deposito_acopio=liq['cabecera']['domicilioDepositoAcopio'],
emisor=dict(
cuit=liq['emisor']['cuit'],
razon_social=liq['emisor']['razonSocial'],
situacion_iva=liq['emisor']['situacionIVA'],
domicilio=liq['emisor']['domicilio'],
fecha_inicio_actividad=liq['emisor']['fechaInicioActividad'],
),
receptor=dict(
cuit=liq['receptor']['cuit'],
razon_social=liq['receptor']['razonSocial'],
nro_fet=liq['receptor'].get('nroFET'),
nro_socio=liq['receptor'].get('nroSocio'),
situacion_iva=liq['receptor']['situacionIVA'],
domicilio=liq['receptor']['domicilio'],
iibb=liq['receptor'].get('iibb'),
),
control=liq['datosOperacion'].get('control'),
nro_interno=liq['datosOperacion'].get('nroInterno'),
condicion_venta=liq['datosOperacion'].get('condicionVenta'),
variedad_tabaco=liq['datosOperacion']['variedadTabaco'],
puerta=liq['datosOperacion'].get('puerta'),
nro_tarjeta=liq['datosOperacion'].get('nroTarjeta'),
horas=liq['datosOperacion'].get('horas'),
cod_provincia_origen_tabaco=liq['datosOperacion'].get('codProvinciaOrigenTabaco'),
tipo_compra=liq['datosOperacion'].get('tipoCompra'),
peso_total_fardos_kg=liq['detalleOperacion']['pesoTotalFardosKg'],
cantidad_total_fardos=liq['detalleOperacion']['cantidadTotalFardos'],
romaneos=[],
alicuota_iva=liq['totalesOperacion']['alicuotaIVA'],
importe_iva=liq['totalesOperacion']['importeIVA'],
importe_neto=liq['totalesOperacion']['importeNeto'],
subtotal=liq['totalesOperacion']['subtotal'],
total_retenciones=liq['totalesOperacion']['totalRetenciones'],
total_tributos=liq['totalesOperacion']['totalTributos'],
total=liq['totalesOperacion']['total'],
retenciones=[],
tributos=[],
cae_ajustado=liq.get("caeAjustado"),
pdf=liq.get('pdf'),
)
for romaneo in liq['detalleOperacion'].get('romaneo', []):
self.params_out['romaneos'].append(dict(
fecha_romaneo=romaneo['fechaRomaneo'],
nro_romaneo=romaneo['nroRomaneo'],
detalle_clase=[dict(
cantidad_fardos=det['cantidadFardos'],
cod_clase=det['codClase'],
importe=det['importe'],
peso_fardos_kg=det['pesoFardosKg'],
precio_x_kg_fardo=det['precioXKgFardo'],
) for det in romaneo['detalleClase']],
))
for ret in liq.get('retencion', []):
self.params_out['retenciones'].append(dict(
retencion_codigo=ret['codigo'],
retencion_importe=ret['importe'],
))
for trib in liq.get('tributo',[]):
self.params_out['tributos'].append(dict(
tributo_descripcion=trib.get('descripcion', ""),
tributo_base_imponible=trib['baseImponible'],
tributo_alicuota=trib['alicuota'],
tributo_codigo=trib['codigo'],
tributo_importe=trib['importe'],
))
if DEBUG:
import pprint
pprint.pprint(self.params_out)
self.params_out['errores'] = self.errores
|
[
"def",
"AnalizarLiquidacion",
"(",
"self",
",",
"liq",
")",
":",
"# proceso los datos básicos de la liquidación (devuelto por consultar):",
"if",
"liq",
":",
"cab",
"=",
"liq",
"[",
"'cabecera'",
"]",
"self",
".",
"CAE",
"=",
"str",
"(",
"cab",
"[",
"'cae'",
"]",
")",
"self",
".",
"FechaLiquidacion",
"=",
"cab",
"[",
"'fechaLiquidacion'",
"]",
"self",
".",
"NroComprobante",
"=",
"cab",
"[",
"'nroComprobante'",
"]",
"tot",
"=",
"liq",
"[",
"'totalesOperacion'",
"]",
"self",
".",
"AlicuotaIVA",
"=",
"tot",
"[",
"'alicuotaIVA'",
"]",
"self",
".",
"ImporteNeto",
"=",
"tot",
"[",
"'importeNeto'",
"]",
"self",
".",
"ImporteIVA",
"=",
"tot",
"[",
"'importeIVA'",
"]",
"self",
".",
"Subtotal",
"=",
"tot",
"[",
"'subtotal'",
"]",
"self",
".",
"TotalRetenciones",
"=",
"tot",
"[",
"'totalRetenciones'",
"]",
"self",
".",
"TotalTributos",
"=",
"tot",
"[",
"'totalTributos'",
"]",
"self",
".",
"Total",
"=",
"tot",
"[",
"'total'",
"]",
"# parámetros de salida:",
"self",
".",
"params_out",
"=",
"dict",
"(",
"tipo_cbte",
"=",
"liq",
"[",
"'cabecera'",
"]",
"[",
"'tipoComprobante'",
"]",
",",
"pto_vta",
"=",
"liq",
"[",
"'cabecera'",
"]",
"[",
"'puntoVenta'",
"]",
",",
"nro_cbte",
"=",
"liq",
"[",
"'cabecera'",
"]",
"[",
"'nroComprobante'",
"]",
",",
"fecha",
"=",
"liq",
"[",
"'cabecera'",
"]",
"[",
"'fechaLiquidacion'",
"]",
",",
"cod_deposito_acopio",
"=",
"liq",
"[",
"'cabecera'",
"]",
"[",
"'codDepositoAcopio'",
"]",
",",
"cae",
"=",
"str",
"(",
"liq",
"[",
"'cabecera'",
"]",
"[",
"'cae'",
"]",
")",
",",
"domicilio_punto_venta",
"=",
"liq",
"[",
"'cabecera'",
"]",
"[",
"'domicilioPuntoVenta'",
"]",
",",
"domicilio_deposito_acopio",
"=",
"liq",
"[",
"'cabecera'",
"]",
"[",
"'domicilioDepositoAcopio'",
"]",
",",
"emisor",
"=",
"dict",
"(",
"cuit",
"=",
"liq",
"[",
"'emisor'",
"]",
"[",
"'cuit'",
"]",
",",
"razon_social",
"=",
"liq",
"[",
"'emisor'",
"]",
"[",
"'razonSocial'",
"]",
",",
"situacion_iva",
"=",
"liq",
"[",
"'emisor'",
"]",
"[",
"'situacionIVA'",
"]",
",",
"domicilio",
"=",
"liq",
"[",
"'emisor'",
"]",
"[",
"'domicilio'",
"]",
",",
"fecha_inicio_actividad",
"=",
"liq",
"[",
"'emisor'",
"]",
"[",
"'fechaInicioActividad'",
"]",
",",
")",
",",
"receptor",
"=",
"dict",
"(",
"cuit",
"=",
"liq",
"[",
"'receptor'",
"]",
"[",
"'cuit'",
"]",
",",
"razon_social",
"=",
"liq",
"[",
"'receptor'",
"]",
"[",
"'razonSocial'",
"]",
",",
"nro_fet",
"=",
"liq",
"[",
"'receptor'",
"]",
".",
"get",
"(",
"'nroFET'",
")",
",",
"nro_socio",
"=",
"liq",
"[",
"'receptor'",
"]",
".",
"get",
"(",
"'nroSocio'",
")",
",",
"situacion_iva",
"=",
"liq",
"[",
"'receptor'",
"]",
"[",
"'situacionIVA'",
"]",
",",
"domicilio",
"=",
"liq",
"[",
"'receptor'",
"]",
"[",
"'domicilio'",
"]",
",",
"iibb",
"=",
"liq",
"[",
"'receptor'",
"]",
".",
"get",
"(",
"'iibb'",
")",
",",
")",
",",
"control",
"=",
"liq",
"[",
"'datosOperacion'",
"]",
".",
"get",
"(",
"'control'",
")",
",",
"nro_interno",
"=",
"liq",
"[",
"'datosOperacion'",
"]",
".",
"get",
"(",
"'nroInterno'",
")",
",",
"condicion_venta",
"=",
"liq",
"[",
"'datosOperacion'",
"]",
".",
"get",
"(",
"'condicionVenta'",
")",
",",
"variedad_tabaco",
"=",
"liq",
"[",
"'datosOperacion'",
"]",
"[",
"'variedadTabaco'",
"]",
",",
"puerta",
"=",
"liq",
"[",
"'datosOperacion'",
"]",
".",
"get",
"(",
"'puerta'",
")",
",",
"nro_tarjeta",
"=",
"liq",
"[",
"'datosOperacion'",
"]",
".",
"get",
"(",
"'nroTarjeta'",
")",
",",
"horas",
"=",
"liq",
"[",
"'datosOperacion'",
"]",
".",
"get",
"(",
"'horas'",
")",
",",
"cod_provincia_origen_tabaco",
"=",
"liq",
"[",
"'datosOperacion'",
"]",
".",
"get",
"(",
"'codProvinciaOrigenTabaco'",
")",
",",
"tipo_compra",
"=",
"liq",
"[",
"'datosOperacion'",
"]",
".",
"get",
"(",
"'tipoCompra'",
")",
",",
"peso_total_fardos_kg",
"=",
"liq",
"[",
"'detalleOperacion'",
"]",
"[",
"'pesoTotalFardosKg'",
"]",
",",
"cantidad_total_fardos",
"=",
"liq",
"[",
"'detalleOperacion'",
"]",
"[",
"'cantidadTotalFardos'",
"]",
",",
"romaneos",
"=",
"[",
"]",
",",
"alicuota_iva",
"=",
"liq",
"[",
"'totalesOperacion'",
"]",
"[",
"'alicuotaIVA'",
"]",
",",
"importe_iva",
"=",
"liq",
"[",
"'totalesOperacion'",
"]",
"[",
"'importeIVA'",
"]",
",",
"importe_neto",
"=",
"liq",
"[",
"'totalesOperacion'",
"]",
"[",
"'importeNeto'",
"]",
",",
"subtotal",
"=",
"liq",
"[",
"'totalesOperacion'",
"]",
"[",
"'subtotal'",
"]",
",",
"total_retenciones",
"=",
"liq",
"[",
"'totalesOperacion'",
"]",
"[",
"'totalRetenciones'",
"]",
",",
"total_tributos",
"=",
"liq",
"[",
"'totalesOperacion'",
"]",
"[",
"'totalTributos'",
"]",
",",
"total",
"=",
"liq",
"[",
"'totalesOperacion'",
"]",
"[",
"'total'",
"]",
",",
"retenciones",
"=",
"[",
"]",
",",
"tributos",
"=",
"[",
"]",
",",
"cae_ajustado",
"=",
"liq",
".",
"get",
"(",
"\"caeAjustado\"",
")",
",",
"pdf",
"=",
"liq",
".",
"get",
"(",
"'pdf'",
")",
",",
")",
"for",
"romaneo",
"in",
"liq",
"[",
"'detalleOperacion'",
"]",
".",
"get",
"(",
"'romaneo'",
",",
"[",
"]",
")",
":",
"self",
".",
"params_out",
"[",
"'romaneos'",
"]",
".",
"append",
"(",
"dict",
"(",
"fecha_romaneo",
"=",
"romaneo",
"[",
"'fechaRomaneo'",
"]",
",",
"nro_romaneo",
"=",
"romaneo",
"[",
"'nroRomaneo'",
"]",
",",
"detalle_clase",
"=",
"[",
"dict",
"(",
"cantidad_fardos",
"=",
"det",
"[",
"'cantidadFardos'",
"]",
",",
"cod_clase",
"=",
"det",
"[",
"'codClase'",
"]",
",",
"importe",
"=",
"det",
"[",
"'importe'",
"]",
",",
"peso_fardos_kg",
"=",
"det",
"[",
"'pesoFardosKg'",
"]",
",",
"precio_x_kg_fardo",
"=",
"det",
"[",
"'precioXKgFardo'",
"]",
",",
")",
"for",
"det",
"in",
"romaneo",
"[",
"'detalleClase'",
"]",
"]",
",",
")",
")",
"for",
"ret",
"in",
"liq",
".",
"get",
"(",
"'retencion'",
",",
"[",
"]",
")",
":",
"self",
".",
"params_out",
"[",
"'retenciones'",
"]",
".",
"append",
"(",
"dict",
"(",
"retencion_codigo",
"=",
"ret",
"[",
"'codigo'",
"]",
",",
"retencion_importe",
"=",
"ret",
"[",
"'importe'",
"]",
",",
")",
")",
"for",
"trib",
"in",
"liq",
".",
"get",
"(",
"'tributo'",
",",
"[",
"]",
")",
":",
"self",
".",
"params_out",
"[",
"'tributos'",
"]",
".",
"append",
"(",
"dict",
"(",
"tributo_descripcion",
"=",
"trib",
".",
"get",
"(",
"'descripcion'",
",",
"\"\"",
")",
",",
"tributo_base_imponible",
"=",
"trib",
"[",
"'baseImponible'",
"]",
",",
"tributo_alicuota",
"=",
"trib",
"[",
"'alicuota'",
"]",
",",
"tributo_codigo",
"=",
"trib",
"[",
"'codigo'",
"]",
",",
"tributo_importe",
"=",
"trib",
"[",
"'importe'",
"]",
",",
")",
")",
"if",
"DEBUG",
":",
"import",
"pprint",
"pprint",
".",
"pprint",
"(",
"self",
".",
"params_out",
")",
"self",
".",
"params_out",
"[",
"'errores'",
"]",
"=",
"self",
".",
"errores"
] |
Método interno para analizar la respuesta de AFIP
|
[
"Método",
"interno",
"para",
"analizar",
"la",
"respuesta",
"de",
"AFIP"
] |
python
|
train
|
sibirrer/lenstronomy
|
lenstronomy/LensModel/Optimizer/particle_swarm.py
|
https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/LensModel/Optimizer/particle_swarm.py#L167-L174
|
def create(cls, paramCount):
"""
Creates a new particle without position, velocity and -inf as fitness
"""
return Particle(numpy.array([[]]*paramCount),
numpy.array([[]]*paramCount),
-numpy.Inf)
|
[
"def",
"create",
"(",
"cls",
",",
"paramCount",
")",
":",
"return",
"Particle",
"(",
"numpy",
".",
"array",
"(",
"[",
"[",
"]",
"]",
"*",
"paramCount",
")",
",",
"numpy",
".",
"array",
"(",
"[",
"[",
"]",
"]",
"*",
"paramCount",
")",
",",
"-",
"numpy",
".",
"Inf",
")"
] |
Creates a new particle without position, velocity and -inf as fitness
|
[
"Creates",
"a",
"new",
"particle",
"without",
"position",
"velocity",
"and",
"-",
"inf",
"as",
"fitness"
] |
python
|
train
|
pahaz/sshtunnel
|
sshtunnel.py
|
https://github.com/pahaz/sshtunnel/blob/66a923e4c6c8e41b8348420523fbf5ddfd53176c/sshtunnel.py#L1176-L1195
|
def _process_deprecated(attrib, deprecated_attrib, kwargs):
"""
Processes optional deprecate arguments
"""
if deprecated_attrib not in DEPRECATIONS:
raise ValueError('{0} not included in deprecations list'
.format(deprecated_attrib))
if deprecated_attrib in kwargs:
warnings.warn("'{0}' is DEPRECATED use '{1}' instead"
.format(deprecated_attrib,
DEPRECATIONS[deprecated_attrib]),
DeprecationWarning)
if attrib:
raise ValueError("You can't use both '{0}' and '{1}'. "
"Please only use one of them"
.format(deprecated_attrib,
DEPRECATIONS[deprecated_attrib]))
else:
return kwargs.pop(deprecated_attrib)
return attrib
|
[
"def",
"_process_deprecated",
"(",
"attrib",
",",
"deprecated_attrib",
",",
"kwargs",
")",
":",
"if",
"deprecated_attrib",
"not",
"in",
"DEPRECATIONS",
":",
"raise",
"ValueError",
"(",
"'{0} not included in deprecations list'",
".",
"format",
"(",
"deprecated_attrib",
")",
")",
"if",
"deprecated_attrib",
"in",
"kwargs",
":",
"warnings",
".",
"warn",
"(",
"\"'{0}' is DEPRECATED use '{1}' instead\"",
".",
"format",
"(",
"deprecated_attrib",
",",
"DEPRECATIONS",
"[",
"deprecated_attrib",
"]",
")",
",",
"DeprecationWarning",
")",
"if",
"attrib",
":",
"raise",
"ValueError",
"(",
"\"You can't use both '{0}' and '{1}'. \"",
"\"Please only use one of them\"",
".",
"format",
"(",
"deprecated_attrib",
",",
"DEPRECATIONS",
"[",
"deprecated_attrib",
"]",
")",
")",
"else",
":",
"return",
"kwargs",
".",
"pop",
"(",
"deprecated_attrib",
")",
"return",
"attrib"
] |
Processes optional deprecate arguments
|
[
"Processes",
"optional",
"deprecate",
"arguments"
] |
python
|
train
|
ejhigson/nestcheck
|
nestcheck/ns_run_utils.py
|
https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/ns_run_utils.py#L218-L286
|
def combine_threads(threads, assert_birth_point=False):
"""
Combine list of threads into a single ns run.
This is different to combining runs as repeated threads are allowed, and as
some threads can start from log-likelihood contours on which no dead
point in the run is present.
Note that if all the thread labels are not unique and in ascending order,
the output will fail check_ns_run. However provided the thread labels are
not used it will work ok for calculations based on nlive, logl and theta.
Parameters
----------
threads: list of dicts
List of nested sampling run dicts, each representing a single thread.
assert_birth_point: bool, optional
Whether or not to assert there is exactly one point present in the run
with the log-likelihood at which each point was born. This is not true
for bootstrap resamples of runs, where birth points may be repeated or
not present at all.
Returns
-------
run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
"""
thread_min_max = np.vstack([td['thread_min_max'] for td in threads])
assert len(threads) == thread_min_max.shape[0]
# construct samples array from the threads, including an updated nlive
samples_temp = np.vstack([array_given_run(thread) for thread in threads])
samples_temp = samples_temp[np.argsort(samples_temp[:, 0])]
# update the changes in live points column for threads which start part way
# through the run. These are only present in dynamic nested sampling.
logl_starts = thread_min_max[:, 0]
state = np.random.get_state() # save random state
np.random.seed(0) # seed to make sure any random assignment is repoducable
for logl_start in logl_starts[logl_starts != -np.inf]:
ind = np.where(samples_temp[:, 0] == logl_start)[0]
if assert_birth_point:
assert ind.shape == (1,), \
'No unique birth point! ' + str(ind.shape)
if ind.shape == (1,):
# If the point at which this thread started is present exactly
# once in this bootstrap replication:
samples_temp[ind[0], 2] += 1
elif ind.shape == (0,):
# If the point with the likelihood at which the thread started
# is not present in this particular bootstrap replication,
# approximate it with the point with the nearest likelihood.
ind_closest = np.argmin(np.abs(samples_temp[:, 0] - logl_start))
samples_temp[ind_closest, 2] += 1
else:
# If the point at which this thread started is present multiple
# times in this bootstrap replication, select one at random to
# increment nlive on. This avoids any systematic bias from e.g.
# always choosing the first point.
samples_temp[np.random.choice(ind), 2] += 1
np.random.set_state(state)
# make run
ns_run = dict_given_run_array(samples_temp, thread_min_max)
try:
check_ns_run_threads(ns_run)
except AssertionError:
# If the threads are not valid (e.g. for bootstrap resamples) then
# set them to None so they can't be accidentally used
ns_run['thread_labels'] = None
ns_run['thread_min_max'] = None
return ns_run
|
[
"def",
"combine_threads",
"(",
"threads",
",",
"assert_birth_point",
"=",
"False",
")",
":",
"thread_min_max",
"=",
"np",
".",
"vstack",
"(",
"[",
"td",
"[",
"'thread_min_max'",
"]",
"for",
"td",
"in",
"threads",
"]",
")",
"assert",
"len",
"(",
"threads",
")",
"==",
"thread_min_max",
".",
"shape",
"[",
"0",
"]",
"# construct samples array from the threads, including an updated nlive",
"samples_temp",
"=",
"np",
".",
"vstack",
"(",
"[",
"array_given_run",
"(",
"thread",
")",
"for",
"thread",
"in",
"threads",
"]",
")",
"samples_temp",
"=",
"samples_temp",
"[",
"np",
".",
"argsort",
"(",
"samples_temp",
"[",
":",
",",
"0",
"]",
")",
"]",
"# update the changes in live points column for threads which start part way",
"# through the run. These are only present in dynamic nested sampling.",
"logl_starts",
"=",
"thread_min_max",
"[",
":",
",",
"0",
"]",
"state",
"=",
"np",
".",
"random",
".",
"get_state",
"(",
")",
"# save random state",
"np",
".",
"random",
".",
"seed",
"(",
"0",
")",
"# seed to make sure any random assignment is repoducable",
"for",
"logl_start",
"in",
"logl_starts",
"[",
"logl_starts",
"!=",
"-",
"np",
".",
"inf",
"]",
":",
"ind",
"=",
"np",
".",
"where",
"(",
"samples_temp",
"[",
":",
",",
"0",
"]",
"==",
"logl_start",
")",
"[",
"0",
"]",
"if",
"assert_birth_point",
":",
"assert",
"ind",
".",
"shape",
"==",
"(",
"1",
",",
")",
",",
"'No unique birth point! '",
"+",
"str",
"(",
"ind",
".",
"shape",
")",
"if",
"ind",
".",
"shape",
"==",
"(",
"1",
",",
")",
":",
"# If the point at which this thread started is present exactly",
"# once in this bootstrap replication:",
"samples_temp",
"[",
"ind",
"[",
"0",
"]",
",",
"2",
"]",
"+=",
"1",
"elif",
"ind",
".",
"shape",
"==",
"(",
"0",
",",
")",
":",
"# If the point with the likelihood at which the thread started",
"# is not present in this particular bootstrap replication,",
"# approximate it with the point with the nearest likelihood.",
"ind_closest",
"=",
"np",
".",
"argmin",
"(",
"np",
".",
"abs",
"(",
"samples_temp",
"[",
":",
",",
"0",
"]",
"-",
"logl_start",
")",
")",
"samples_temp",
"[",
"ind_closest",
",",
"2",
"]",
"+=",
"1",
"else",
":",
"# If the point at which this thread started is present multiple",
"# times in this bootstrap replication, select one at random to",
"# increment nlive on. This avoids any systematic bias from e.g.",
"# always choosing the first point.",
"samples_temp",
"[",
"np",
".",
"random",
".",
"choice",
"(",
"ind",
")",
",",
"2",
"]",
"+=",
"1",
"np",
".",
"random",
".",
"set_state",
"(",
"state",
")",
"# make run",
"ns_run",
"=",
"dict_given_run_array",
"(",
"samples_temp",
",",
"thread_min_max",
")",
"try",
":",
"check_ns_run_threads",
"(",
"ns_run",
")",
"except",
"AssertionError",
":",
"# If the threads are not valid (e.g. for bootstrap resamples) then",
"# set them to None so they can't be accidentally used",
"ns_run",
"[",
"'thread_labels'",
"]",
"=",
"None",
"ns_run",
"[",
"'thread_min_max'",
"]",
"=",
"None",
"return",
"ns_run"
] |
Combine list of threads into a single ns run.
This is different to combining runs as repeated threads are allowed, and as
some threads can start from log-likelihood contours on which no dead
point in the run is present.
Note that if all the thread labels are not unique and in ascending order,
the output will fail check_ns_run. However provided the thread labels are
not used it will work ok for calculations based on nlive, logl and theta.
Parameters
----------
threads: list of dicts
List of nested sampling run dicts, each representing a single thread.
assert_birth_point: bool, optional
Whether or not to assert there is exactly one point present in the run
with the log-likelihood at which each point was born. This is not true
for bootstrap resamples of runs, where birth points may be repeated or
not present at all.
Returns
-------
run: dict
Nested sampling run dict (see data_processing module docstring for more
details).
|
[
"Combine",
"list",
"of",
"threads",
"into",
"a",
"single",
"ns",
"run",
".",
"This",
"is",
"different",
"to",
"combining",
"runs",
"as",
"repeated",
"threads",
"are",
"allowed",
"and",
"as",
"some",
"threads",
"can",
"start",
"from",
"log",
"-",
"likelihood",
"contours",
"on",
"which",
"no",
"dead",
"point",
"in",
"the",
"run",
"is",
"present",
"."
] |
python
|
train
|
mitsei/dlkit
|
dlkit/services/authorization.py
|
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/services/authorization.py#L1155-L1163
|
def use_plenary_authorization_view(self):
"""Pass through to provider AuthorizationLookupSession.use_plenary_authorization_view"""
self._object_views['authorization'] = PLENARY
# self._get_provider_session('authorization_lookup_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_plenary_authorization_view()
except AttributeError:
pass
|
[
"def",
"use_plenary_authorization_view",
"(",
"self",
")",
":",
"self",
".",
"_object_views",
"[",
"'authorization'",
"]",
"=",
"PLENARY",
"# self._get_provider_session('authorization_lookup_session') # To make sure the session is tracked",
"for",
"session",
"in",
"self",
".",
"_get_provider_sessions",
"(",
")",
":",
"try",
":",
"session",
".",
"use_plenary_authorization_view",
"(",
")",
"except",
"AttributeError",
":",
"pass"
] |
Pass through to provider AuthorizationLookupSession.use_plenary_authorization_view
|
[
"Pass",
"through",
"to",
"provider",
"AuthorizationLookupSession",
".",
"use_plenary_authorization_view"
] |
python
|
train
|
cytoscape/py2cytoscape
|
py2cytoscape/cyrest/apply.py
|
https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/cyrest/apply.py#L132-L143
|
def getLayout(self, algorithmName, verbose=None):
"""
Returns all the details, including names, parameters, and compatible column types for the Layout algorithm specified by the `algorithmName` parameter.
:param algorithmName: Name of the Layout algorithm
:param verbose: print more
:returns: 200: successful operation
"""
response=api(url=self.___url+'apply/layouts/'+str(algorithmName)+'', method="H", verbose=verbose, parse_params=False)
return response
|
[
"def",
"getLayout",
"(",
"self",
",",
"algorithmName",
",",
"verbose",
"=",
"None",
")",
":",
"response",
"=",
"api",
"(",
"url",
"=",
"self",
".",
"___url",
"+",
"'apply/layouts/'",
"+",
"str",
"(",
"algorithmName",
")",
"+",
"''",
",",
"method",
"=",
"\"H\"",
",",
"verbose",
"=",
"verbose",
",",
"parse_params",
"=",
"False",
")",
"return",
"response"
] |
Returns all the details, including names, parameters, and compatible column types for the Layout algorithm specified by the `algorithmName` parameter.
:param algorithmName: Name of the Layout algorithm
:param verbose: print more
:returns: 200: successful operation
|
[
"Returns",
"all",
"the",
"details",
"including",
"names",
"parameters",
"and",
"compatible",
"column",
"types",
"for",
"the",
"Layout",
"algorithm",
"specified",
"by",
"the",
"algorithmName",
"parameter",
"."
] |
python
|
train
|
saltstack/salt
|
salt/states/azurearm_compute.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/azurearm_compute.py#L266-L326
|
def availability_set_absent(name, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure an availability set does not exist in a resource group.
:param name:
Name of the availability set.
:param resource_group:
Name of the resource group containing the availability set.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
aset = __salt__['azurearm_compute.availability_set_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in aset:
ret['result'] = True
ret['comment'] = 'Availability set {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Availability set {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': aset,
'new': {},
}
return ret
deleted = __salt__['azurearm_compute.availability_set_delete'](name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Availability set {0} has been deleted.'.format(name)
ret['changes'] = {
'old': aset,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete availability set {0}!'.format(name)
return ret
|
[
"def",
"availability_set_absent",
"(",
"name",
",",
"resource_group",
",",
"connection_auth",
"=",
"None",
")",
":",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'result'",
":",
"False",
",",
"'comment'",
":",
"''",
",",
"'changes'",
":",
"{",
"}",
"}",
"if",
"not",
"isinstance",
"(",
"connection_auth",
",",
"dict",
")",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Connection information must be specified via connection_auth dictionary!'",
"return",
"ret",
"aset",
"=",
"__salt__",
"[",
"'azurearm_compute.availability_set_get'",
"]",
"(",
"name",
",",
"resource_group",
",",
"azurearm_log_level",
"=",
"'info'",
",",
"*",
"*",
"connection_auth",
")",
"if",
"'error'",
"in",
"aset",
":",
"ret",
"[",
"'result'",
"]",
"=",
"True",
"ret",
"[",
"'comment'",
"]",
"=",
"'Availability set {0} was not found.'",
".",
"format",
"(",
"name",
")",
"return",
"ret",
"elif",
"__opts__",
"[",
"'test'",
"]",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Availability set {0} would be deleted.'",
".",
"format",
"(",
"name",
")",
"ret",
"[",
"'result'",
"]",
"=",
"None",
"ret",
"[",
"'changes'",
"]",
"=",
"{",
"'old'",
":",
"aset",
",",
"'new'",
":",
"{",
"}",
",",
"}",
"return",
"ret",
"deleted",
"=",
"__salt__",
"[",
"'azurearm_compute.availability_set_delete'",
"]",
"(",
"name",
",",
"resource_group",
",",
"*",
"*",
"connection_auth",
")",
"if",
"deleted",
":",
"ret",
"[",
"'result'",
"]",
"=",
"True",
"ret",
"[",
"'comment'",
"]",
"=",
"'Availability set {0} has been deleted.'",
".",
"format",
"(",
"name",
")",
"ret",
"[",
"'changes'",
"]",
"=",
"{",
"'old'",
":",
"aset",
",",
"'new'",
":",
"{",
"}",
"}",
"return",
"ret",
"ret",
"[",
"'comment'",
"]",
"=",
"'Failed to delete availability set {0}!'",
".",
"format",
"(",
"name",
")",
"return",
"ret"
] |
.. versionadded:: 2019.2.0
Ensure an availability set does not exist in a resource group.
:param name:
Name of the availability set.
:param resource_group:
Name of the resource group containing the availability set.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
|
[
"..",
"versionadded",
"::",
"2019",
".",
"2",
".",
"0"
] |
python
|
train
|
monarch-initiative/dipper
|
dipper/sources/Bgee.py
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/Bgee.py#L157-L174
|
def parse(self, limit=None):
"""
Given the input taxa, expects files in the raw directory
with the name {tax_id}_anat_entity_all_data_Pan_troglodytes.tsv.zip
:param limit: int Limit to top ranked anatomy associations per group
:return: None
"""
files_to_download, ftp = self._get_file_list(
self.files['anat_entity']['path'],
self.files['anat_entity']['pattern'])
for dlname in files_to_download:
localfile = '/'.join((self.rawdir, dlname))
with gzip.open(localfile, 'rt', encoding='ISO-8859-1') as fh:
LOG.info("Processing %s", localfile)
self._parse_gene_anatomy(fh, limit)
return
|
[
"def",
"parse",
"(",
"self",
",",
"limit",
"=",
"None",
")",
":",
"files_to_download",
",",
"ftp",
"=",
"self",
".",
"_get_file_list",
"(",
"self",
".",
"files",
"[",
"'anat_entity'",
"]",
"[",
"'path'",
"]",
",",
"self",
".",
"files",
"[",
"'anat_entity'",
"]",
"[",
"'pattern'",
"]",
")",
"for",
"dlname",
"in",
"files_to_download",
":",
"localfile",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"dlname",
")",
")",
"with",
"gzip",
".",
"open",
"(",
"localfile",
",",
"'rt'",
",",
"encoding",
"=",
"'ISO-8859-1'",
")",
"as",
"fh",
":",
"LOG",
".",
"info",
"(",
"\"Processing %s\"",
",",
"localfile",
")",
"self",
".",
"_parse_gene_anatomy",
"(",
"fh",
",",
"limit",
")",
"return"
] |
Given the input taxa, expects files in the raw directory
with the name {tax_id}_anat_entity_all_data_Pan_troglodytes.tsv.zip
:param limit: int Limit to top ranked anatomy associations per group
:return: None
|
[
"Given",
"the",
"input",
"taxa",
"expects",
"files",
"in",
"the",
"raw",
"directory",
"with",
"the",
"name",
"{",
"tax_id",
"}",
"_anat_entity_all_data_Pan_troglodytes",
".",
"tsv",
".",
"zip"
] |
python
|
train
|
dwavesystems/dwave_networkx
|
dwave_networkx/drawing/pegasus_layout.py
|
https://github.com/dwavesystems/dwave_networkx/blob/9ea1223ddbc7e86db2f90b8b23e250e6642c3d68/dwave_networkx/drawing/pegasus_layout.py#L234-L287
|
def draw_pegasus_embedding(G, *args, **kwargs):
"""Draws an embedding onto the pegasus graph G, according to layout.
If interaction_edges is not None, then only display the couplers in that
list. If embedded_graph is not None, the only display the couplers between
chains with intended couplings according to embedded_graph.
Parameters
----------
G : NetworkX graph
Should be a Pegasus graph or a subgraph of a Pegasus graph.
This should be the product of dwave_networkx.pegasus_graph
emb : dict
A dict of chains associated with each node in G. Should be
of the form {node: chain, ...}. Chains should be iterables
of qubit labels (qubits are nodes in G).
embedded_graph : NetworkX graph (optional, default None)
A graph which contains all keys of emb as nodes. If specified,
edges of G will be considered interactions if and only if they
exist between two chains of emb if their keys are connected by
an edge in embedded_graph
interaction_edges : list (optional, default None)
A list of edges which will be used as interactions.
show_labels: boolean (optional, default False)
If show_labels is True, then each chain in emb is labelled with its key.
chain_color : dict (optional, default None)
A dict of colors associated with each key in emb. Should be
of the form {node: rgba_color, ...}. Colors should be length-4
tuples of floats between 0 and 1 inclusive. If chain_color is None,
each chain will be assigned a different color.
unused_color : tuple (optional, default (0.9,0.9,0.9,1.0))
The color to use for nodes and edges of G which are not involved
in chains, and edges which are neither chain edges nor interactions.
If unused_color is None, these nodes and edges will not be shown at all.
crosses: boolean (optional, default False)
If crosses is True, K_4,4 subgraphs are shown in a cross
rather than L configuration. Ignored if G was defined with
nice_coordinates=True.
kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the `pos` parameter which is not used by this
function. If `linear_biases` or `quadratic_biases` are provided,
any provided `node_color` or `edge_color` arguments are ignored.
"""
crosses = kwargs.pop("crosses", False)
draw_embedding(G, pegasus_layout(G, crosses=crosses), *args, **kwargs)
|
[
"def",
"draw_pegasus_embedding",
"(",
"G",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"crosses",
"=",
"kwargs",
".",
"pop",
"(",
"\"crosses\"",
",",
"False",
")",
"draw_embedding",
"(",
"G",
",",
"pegasus_layout",
"(",
"G",
",",
"crosses",
"=",
"crosses",
")",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Draws an embedding onto the pegasus graph G, according to layout.
If interaction_edges is not None, then only display the couplers in that
list. If embedded_graph is not None, the only display the couplers between
chains with intended couplings according to embedded_graph.
Parameters
----------
G : NetworkX graph
Should be a Pegasus graph or a subgraph of a Pegasus graph.
This should be the product of dwave_networkx.pegasus_graph
emb : dict
A dict of chains associated with each node in G. Should be
of the form {node: chain, ...}. Chains should be iterables
of qubit labels (qubits are nodes in G).
embedded_graph : NetworkX graph (optional, default None)
A graph which contains all keys of emb as nodes. If specified,
edges of G will be considered interactions if and only if they
exist between two chains of emb if their keys are connected by
an edge in embedded_graph
interaction_edges : list (optional, default None)
A list of edges which will be used as interactions.
show_labels: boolean (optional, default False)
If show_labels is True, then each chain in emb is labelled with its key.
chain_color : dict (optional, default None)
A dict of colors associated with each key in emb. Should be
of the form {node: rgba_color, ...}. Colors should be length-4
tuples of floats between 0 and 1 inclusive. If chain_color is None,
each chain will be assigned a different color.
unused_color : tuple (optional, default (0.9,0.9,0.9,1.0))
The color to use for nodes and edges of G which are not involved
in chains, and edges which are neither chain edges nor interactions.
If unused_color is None, these nodes and edges will not be shown at all.
crosses: boolean (optional, default False)
If crosses is True, K_4,4 subgraphs are shown in a cross
rather than L configuration. Ignored if G was defined with
nice_coordinates=True.
kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the `pos` parameter which is not used by this
function. If `linear_biases` or `quadratic_biases` are provided,
any provided `node_color` or `edge_color` arguments are ignored.
|
[
"Draws",
"an",
"embedding",
"onto",
"the",
"pegasus",
"graph",
"G",
"according",
"to",
"layout",
"."
] |
python
|
train
|
airspeed-velocity/asv
|
asv/extern/asizeof.py
|
https://github.com/airspeed-velocity/asv/blob/d23bb8b74e8adacbfa3cf5724bda55fb39d56ba6/asv/extern/asizeof.py#L716-L724
|
def _len_dict(obj):
'''Dict length in items (estimate).
'''
n = len(obj) # active items
if n < 6: # ma_smalltable ...
n = 0 # ... in basicsize
else: # at least one unused
n = _power2(n + 1)
return n
|
[
"def",
"_len_dict",
"(",
"obj",
")",
":",
"n",
"=",
"len",
"(",
"obj",
")",
"# active items",
"if",
"n",
"<",
"6",
":",
"# ma_smalltable ...",
"n",
"=",
"0",
"# ... in basicsize",
"else",
":",
"# at least one unused",
"n",
"=",
"_power2",
"(",
"n",
"+",
"1",
")",
"return",
"n"
] |
Dict length in items (estimate).
|
[
"Dict",
"length",
"in",
"items",
"(",
"estimate",
")",
"."
] |
python
|
train
|
spyder-ide/spyder
|
spyder/utils/sourcecode.py
|
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/utils/sourcecode.py#L173-L195
|
def disambiguate_fname(files_path_list, filename):
"""Get tab title without ambiguation."""
fname = os.path.basename(filename)
same_name_files = get_same_name_files(files_path_list, fname)
if len(same_name_files) > 1:
compare_path = shortest_path(same_name_files)
if compare_path == filename:
same_name_files.remove(path_components(filename))
compare_path = shortest_path(same_name_files)
diff_path = differentiate_prefix(path_components(filename),
path_components(compare_path))
diff_path_length = len(diff_path)
path_component = path_components(diff_path)
if (diff_path_length > 20 and len(path_component) > 2):
if path_component[0] != '/' and path_component[0] != '':
path_component = [path_component[0], '...',
path_component[-1]]
else:
path_component = [path_component[2], '...',
path_component[-1]]
diff_path = os.path.join(*path_component)
fname = fname + " - " + diff_path
return fname
|
[
"def",
"disambiguate_fname",
"(",
"files_path_list",
",",
"filename",
")",
":",
"fname",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"filename",
")",
"same_name_files",
"=",
"get_same_name_files",
"(",
"files_path_list",
",",
"fname",
")",
"if",
"len",
"(",
"same_name_files",
")",
">",
"1",
":",
"compare_path",
"=",
"shortest_path",
"(",
"same_name_files",
")",
"if",
"compare_path",
"==",
"filename",
":",
"same_name_files",
".",
"remove",
"(",
"path_components",
"(",
"filename",
")",
")",
"compare_path",
"=",
"shortest_path",
"(",
"same_name_files",
")",
"diff_path",
"=",
"differentiate_prefix",
"(",
"path_components",
"(",
"filename",
")",
",",
"path_components",
"(",
"compare_path",
")",
")",
"diff_path_length",
"=",
"len",
"(",
"diff_path",
")",
"path_component",
"=",
"path_components",
"(",
"diff_path",
")",
"if",
"(",
"diff_path_length",
">",
"20",
"and",
"len",
"(",
"path_component",
")",
">",
"2",
")",
":",
"if",
"path_component",
"[",
"0",
"]",
"!=",
"'/'",
"and",
"path_component",
"[",
"0",
"]",
"!=",
"''",
":",
"path_component",
"=",
"[",
"path_component",
"[",
"0",
"]",
",",
"'...'",
",",
"path_component",
"[",
"-",
"1",
"]",
"]",
"else",
":",
"path_component",
"=",
"[",
"path_component",
"[",
"2",
"]",
",",
"'...'",
",",
"path_component",
"[",
"-",
"1",
"]",
"]",
"diff_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"*",
"path_component",
")",
"fname",
"=",
"fname",
"+",
"\" - \"",
"+",
"diff_path",
"return",
"fname"
] |
Get tab title without ambiguation.
|
[
"Get",
"tab",
"title",
"without",
"ambiguation",
"."
] |
python
|
train
|
lsst-sqre/documenteer
|
documenteer/sphinxconfig/stackconf.py
|
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxconfig/stackconf.py#L533-L647
|
def build_pipelines_lsst_io_configs(*, project_name, copyright=None):
"""Build a `dict` of Sphinx configurations that populate the ``conf.py``
of the main pipelines_lsst_io Sphinx project for LSST Science Pipelines
documentation.
The ``conf.py`` file can ingest these configurations via::
from documenteer.sphinxconfig.stackconf import \
build_pipelines_lsst_io_configs
_g = globals()
_g.update(build_pipelines_lsst_io_configs(
project_name='LSST Science Pipelines')
You can subsequently customize the Sphinx configuration by directly
assigning global variables, as usual in a Sphinx ``config.py``, e.g.::
copyright = '2016 Association of Universities for '
'Research in Astronomy, Inc.'
Parameters
----------
project_name : `str`
Name of the project
copyright : `str`, optional
Copyright statement. Do not include the 'Copyright (c)' string; it'll
be added automatically.
Returns
-------
c : dict
Dictionary of configurations that should be added to the ``conf.py``
global namespace via::
_g = global()
_g.update(c)
"""
# Work around Sphinx bug related to large and highly-nested source files
sys.setrecursionlimit(2000)
c = {}
c = _insert_common_sphinx_configs(
c,
project_name=project_name)
# HTML theme
c = _insert_html_configs(
c,
project_name=project_name,
short_project_name=project_name)
# Sphinx extension modules
c = _insert_extensions(c)
# Intersphinx configuration
c = _insert_intersphinx_mapping(c)
# Breathe extension configuration
# FIXME configure this for multiple sites
# Automodapi and numpydoc configurations
c = _insert_automodapi_configs(c)
# Matplotlib configurations
c = _insert_matplotlib_configs(c)
# Graphviz configurations
c = _insert_graphviz_configs(c)
# Add versioning information
c = _insert_eups_version(c)
# Always use "now" as the date for the main site's docs because we can't
# look at the Git history of each stack package.
date = datetime.datetime.now()
c['today'] = date.strftime('%Y-%m-%d')
# Use this copyright for now. Ultimately we want to gather COPYRIGHT files
# and build an integrated copyright that way.
c['copyright'] = '2015-{year} LSST contributors'.format(
year=date.year)
# Hide todo directives in the "published" documentation on the main site.
c['todo_include_todos'] = False
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
c['exclude_patterns'] = [
'README.rst',
# Build products
'_build',
# Source for release notes (contents are included in built pages)
'releases/note-source/*.rst',
'releases/tickets-source/*.rst',
# EUPS configuration directory
'ups',
# Recommended directory for pip installing doc eng Python packages
'.pyvenv',
# GitHub templates
'.github',
# This 'home' directory is created by the docubase image for the
# sqre/infra/documenteer ci.lsst.codes Jenkins job. Ideally this
# shouldn't be in the directory at all, but we certainly need to
# ignore it while its here.
'home',
]
# Insert rst_epilog configuration
c = _insert_rst_epilog(c)
# Set up the context for the sphinx-jinja extension
c = _insert_jinja_configuration(c)
return c
|
[
"def",
"build_pipelines_lsst_io_configs",
"(",
"*",
",",
"project_name",
",",
"copyright",
"=",
"None",
")",
":",
"# Work around Sphinx bug related to large and highly-nested source files",
"sys",
".",
"setrecursionlimit",
"(",
"2000",
")",
"c",
"=",
"{",
"}",
"c",
"=",
"_insert_common_sphinx_configs",
"(",
"c",
",",
"project_name",
"=",
"project_name",
")",
"# HTML theme",
"c",
"=",
"_insert_html_configs",
"(",
"c",
",",
"project_name",
"=",
"project_name",
",",
"short_project_name",
"=",
"project_name",
")",
"# Sphinx extension modules",
"c",
"=",
"_insert_extensions",
"(",
"c",
")",
"# Intersphinx configuration",
"c",
"=",
"_insert_intersphinx_mapping",
"(",
"c",
")",
"# Breathe extension configuration",
"# FIXME configure this for multiple sites",
"# Automodapi and numpydoc configurations",
"c",
"=",
"_insert_automodapi_configs",
"(",
"c",
")",
"# Matplotlib configurations",
"c",
"=",
"_insert_matplotlib_configs",
"(",
"c",
")",
"# Graphviz configurations",
"c",
"=",
"_insert_graphviz_configs",
"(",
"c",
")",
"# Add versioning information",
"c",
"=",
"_insert_eups_version",
"(",
"c",
")",
"# Always use \"now\" as the date for the main site's docs because we can't",
"# look at the Git history of each stack package.",
"date",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"c",
"[",
"'today'",
"]",
"=",
"date",
".",
"strftime",
"(",
"'%Y-%m-%d'",
")",
"# Use this copyright for now. Ultimately we want to gather COPYRIGHT files",
"# and build an integrated copyright that way.",
"c",
"[",
"'copyright'",
"]",
"=",
"'2015-{year} LSST contributors'",
".",
"format",
"(",
"year",
"=",
"date",
".",
"year",
")",
"# Hide todo directives in the \"published\" documentation on the main site.",
"c",
"[",
"'todo_include_todos'",
"]",
"=",
"False",
"# List of patterns, relative to source directory, that match files and",
"# directories to ignore when looking for source files.",
"c",
"[",
"'exclude_patterns'",
"]",
"=",
"[",
"'README.rst'",
",",
"# Build products",
"'_build'",
",",
"# Source for release notes (contents are included in built pages)",
"'releases/note-source/*.rst'",
",",
"'releases/tickets-source/*.rst'",
",",
"# EUPS configuration directory",
"'ups'",
",",
"# Recommended directory for pip installing doc eng Python packages",
"'.pyvenv'",
",",
"# GitHub templates",
"'.github'",
",",
"# This 'home' directory is created by the docubase image for the",
"# sqre/infra/documenteer ci.lsst.codes Jenkins job. Ideally this",
"# shouldn't be in the directory at all, but we certainly need to",
"# ignore it while its here.",
"'home'",
",",
"]",
"# Insert rst_epilog configuration",
"c",
"=",
"_insert_rst_epilog",
"(",
"c",
")",
"# Set up the context for the sphinx-jinja extension",
"c",
"=",
"_insert_jinja_configuration",
"(",
"c",
")",
"return",
"c"
] |
Build a `dict` of Sphinx configurations that populate the ``conf.py``
of the main pipelines_lsst_io Sphinx project for LSST Science Pipelines
documentation.
The ``conf.py`` file can ingest these configurations via::
from documenteer.sphinxconfig.stackconf import \
build_pipelines_lsst_io_configs
_g = globals()
_g.update(build_pipelines_lsst_io_configs(
project_name='LSST Science Pipelines')
You can subsequently customize the Sphinx configuration by directly
assigning global variables, as usual in a Sphinx ``config.py``, e.g.::
copyright = '2016 Association of Universities for '
'Research in Astronomy, Inc.'
Parameters
----------
project_name : `str`
Name of the project
copyright : `str`, optional
Copyright statement. Do not include the 'Copyright (c)' string; it'll
be added automatically.
Returns
-------
c : dict
Dictionary of configurations that should be added to the ``conf.py``
global namespace via::
_g = global()
_g.update(c)
|
[
"Build",
"a",
"dict",
"of",
"Sphinx",
"configurations",
"that",
"populate",
"the",
"conf",
".",
"py",
"of",
"the",
"main",
"pipelines_lsst_io",
"Sphinx",
"project",
"for",
"LSST",
"Science",
"Pipelines",
"documentation",
"."
] |
python
|
train
|
wavycloud/pyboto3
|
pyboto3/support.py
|
https://github.com/wavycloud/pyboto3/blob/924957ccf994303713a4eed90b775ff2ab95b2e5/pyboto3/support.py#L244-L352
|
def describe_cases(caseIdList=None, displayId=None, afterTime=None, beforeTime=None, includeResolvedCases=None, nextToken=None, maxResults=None, language=None, includeCommunications=None):
"""
Returns a list of cases that you specify by passing one or more case IDs. In addition, you can filter the cases by date by setting values for the afterTime and beforeTime request parameters. You can set values for the includeResolvedCases and includeCommunications request parameters to control how much information is returned.
Case data is available for 12 months after creation. If a case was created more than 12 months ago, a request for data might cause an error.
The response returns the following in JSON format:
See also: AWS API Documentation
:example: response = client.describe_cases(
caseIdList=[
'string',
],
displayId='string',
afterTime='string',
beforeTime='string',
includeResolvedCases=True|False,
nextToken='string',
maxResults=123,
language='string',
includeCommunications=True|False
)
:type caseIdList: list
:param caseIdList: A list of ID numbers of the support cases you want returned. The maximum number of cases is 100.
(string) --
:type displayId: string
:param displayId: The ID displayed for a case in the AWS Support Center user interface.
:type afterTime: string
:param afterTime: The start date for a filtered date search on support case communications. Case communications are available for 12 months after creation.
:type beforeTime: string
:param beforeTime: The end date for a filtered date search on support case communications. Case communications are available for 12 months after creation.
:type includeResolvedCases: boolean
:param includeResolvedCases: Specifies whether resolved support cases should be included in the DescribeCases results. The default is false .
:type nextToken: string
:param nextToken: A resumption point for pagination.
:type maxResults: integer
:param maxResults: The maximum number of results to return before paginating.
:type language: string
:param language: The ISO 639-1 code for the language in which AWS provides support. AWS Support currently supports English ('en') and Japanese ('ja'). Language parameters must be passed explicitly for operations that take them.
:type includeCommunications: boolean
:param includeCommunications: Specifies whether communications should be included in the DescribeCases results. The default is true .
:rtype: dict
:return: {
'cases': [
{
'caseId': 'string',
'displayId': 'string',
'subject': 'string',
'status': 'string',
'serviceCode': 'string',
'categoryCode': 'string',
'severityCode': 'string',
'submittedBy': 'string',
'timeCreated': 'string',
'recentCommunications': {
'communications': [
{
'caseId': 'string',
'body': 'string',
'submittedBy': 'string',
'timeCreated': 'string',
'attachmentSet': [
{
'attachmentId': 'string',
'fileName': 'string'
},
]
},
],
'nextToken': 'string'
},
'ccEmailAddresses': [
'string',
],
'language': 'string'
},
],
'nextToken': 'string'
}
:returns:
caseIdList (list) -- A list of ID numbers of the support cases you want returned. The maximum number of cases is 100.
(string) --
displayId (string) -- The ID displayed for a case in the AWS Support Center user interface.
afterTime (string) -- The start date for a filtered date search on support case communications. Case communications are available for 12 months after creation.
beforeTime (string) -- The end date for a filtered date search on support case communications. Case communications are available for 12 months after creation.
includeResolvedCases (boolean) -- Specifies whether resolved support cases should be included in the DescribeCases results. The default is false .
nextToken (string) -- A resumption point for pagination.
maxResults (integer) -- The maximum number of results to return before paginating.
language (string) -- The ISO 639-1 code for the language in which AWS provides support. AWS Support currently supports English ("en") and Japanese ("ja"). Language parameters must be passed explicitly for operations that take them.
includeCommunications (boolean) -- Specifies whether communications should be included in the DescribeCases results. The default is true .
"""
pass
|
[
"def",
"describe_cases",
"(",
"caseIdList",
"=",
"None",
",",
"displayId",
"=",
"None",
",",
"afterTime",
"=",
"None",
",",
"beforeTime",
"=",
"None",
",",
"includeResolvedCases",
"=",
"None",
",",
"nextToken",
"=",
"None",
",",
"maxResults",
"=",
"None",
",",
"language",
"=",
"None",
",",
"includeCommunications",
"=",
"None",
")",
":",
"pass"
] |
Returns a list of cases that you specify by passing one or more case IDs. In addition, you can filter the cases by date by setting values for the afterTime and beforeTime request parameters. You can set values for the includeResolvedCases and includeCommunications request parameters to control how much information is returned.
Case data is available for 12 months after creation. If a case was created more than 12 months ago, a request for data might cause an error.
The response returns the following in JSON format:
See also: AWS API Documentation
:example: response = client.describe_cases(
caseIdList=[
'string',
],
displayId='string',
afterTime='string',
beforeTime='string',
includeResolvedCases=True|False,
nextToken='string',
maxResults=123,
language='string',
includeCommunications=True|False
)
:type caseIdList: list
:param caseIdList: A list of ID numbers of the support cases you want returned. The maximum number of cases is 100.
(string) --
:type displayId: string
:param displayId: The ID displayed for a case in the AWS Support Center user interface.
:type afterTime: string
:param afterTime: The start date for a filtered date search on support case communications. Case communications are available for 12 months after creation.
:type beforeTime: string
:param beforeTime: The end date for a filtered date search on support case communications. Case communications are available for 12 months after creation.
:type includeResolvedCases: boolean
:param includeResolvedCases: Specifies whether resolved support cases should be included in the DescribeCases results. The default is false .
:type nextToken: string
:param nextToken: A resumption point for pagination.
:type maxResults: integer
:param maxResults: The maximum number of results to return before paginating.
:type language: string
:param language: The ISO 639-1 code for the language in which AWS provides support. AWS Support currently supports English ('en') and Japanese ('ja'). Language parameters must be passed explicitly for operations that take them.
:type includeCommunications: boolean
:param includeCommunications: Specifies whether communications should be included in the DescribeCases results. The default is true .
:rtype: dict
:return: {
'cases': [
{
'caseId': 'string',
'displayId': 'string',
'subject': 'string',
'status': 'string',
'serviceCode': 'string',
'categoryCode': 'string',
'severityCode': 'string',
'submittedBy': 'string',
'timeCreated': 'string',
'recentCommunications': {
'communications': [
{
'caseId': 'string',
'body': 'string',
'submittedBy': 'string',
'timeCreated': 'string',
'attachmentSet': [
{
'attachmentId': 'string',
'fileName': 'string'
},
]
},
],
'nextToken': 'string'
},
'ccEmailAddresses': [
'string',
],
'language': 'string'
},
],
'nextToken': 'string'
}
:returns:
caseIdList (list) -- A list of ID numbers of the support cases you want returned. The maximum number of cases is 100.
(string) --
displayId (string) -- The ID displayed for a case in the AWS Support Center user interface.
afterTime (string) -- The start date for a filtered date search on support case communications. Case communications are available for 12 months after creation.
beforeTime (string) -- The end date for a filtered date search on support case communications. Case communications are available for 12 months after creation.
includeResolvedCases (boolean) -- Specifies whether resolved support cases should be included in the DescribeCases results. The default is false .
nextToken (string) -- A resumption point for pagination.
maxResults (integer) -- The maximum number of results to return before paginating.
language (string) -- The ISO 639-1 code for the language in which AWS provides support. AWS Support currently supports English ("en") and Japanese ("ja"). Language parameters must be passed explicitly for operations that take them.
includeCommunications (boolean) -- Specifies whether communications should be included in the DescribeCases results. The default is true .
|
[
"Returns",
"a",
"list",
"of",
"cases",
"that",
"you",
"specify",
"by",
"passing",
"one",
"or",
"more",
"case",
"IDs",
".",
"In",
"addition",
"you",
"can",
"filter",
"the",
"cases",
"by",
"date",
"by",
"setting",
"values",
"for",
"the",
"afterTime",
"and",
"beforeTime",
"request",
"parameters",
".",
"You",
"can",
"set",
"values",
"for",
"the",
"includeResolvedCases",
"and",
"includeCommunications",
"request",
"parameters",
"to",
"control",
"how",
"much",
"information",
"is",
"returned",
".",
"Case",
"data",
"is",
"available",
"for",
"12",
"months",
"after",
"creation",
".",
"If",
"a",
"case",
"was",
"created",
"more",
"than",
"12",
"months",
"ago",
"a",
"request",
"for",
"data",
"might",
"cause",
"an",
"error",
".",
"The",
"response",
"returns",
"the",
"following",
"in",
"JSON",
"format",
":",
"See",
"also",
":",
"AWS",
"API",
"Documentation",
":",
"example",
":",
"response",
"=",
"client",
".",
"describe_cases",
"(",
"caseIdList",
"=",
"[",
"string",
"]",
"displayId",
"=",
"string",
"afterTime",
"=",
"string",
"beforeTime",
"=",
"string",
"includeResolvedCases",
"=",
"True|False",
"nextToken",
"=",
"string",
"maxResults",
"=",
"123",
"language",
"=",
"string",
"includeCommunications",
"=",
"True|False",
")",
":",
"type",
"caseIdList",
":",
"list",
":",
"param",
"caseIdList",
":",
"A",
"list",
"of",
"ID",
"numbers",
"of",
"the",
"support",
"cases",
"you",
"want",
"returned",
".",
"The",
"maximum",
"number",
"of",
"cases",
"is",
"100",
".",
"(",
"string",
")",
"--"
] |
python
|
train
|
bhmm/bhmm
|
bhmm/util/statistics.py
|
https://github.com/bhmm/bhmm/blob/9804d18c2ddb684fb4d90b544cc209617a89ca9a/bhmm/util/statistics.py#L34-L75
|
def confidence_interval(data, alpha):
"""
Computes the mean and alpha-confidence interval of the given sample set
Parameters
----------
data : ndarray
a 1D-array of samples
alpha : float in [0,1]
the confidence level, i.e. percentage of data included in the interval
Returns
-------
[m,l,r] where m is the mean of the data, and (l,r) are the m-alpha/2 and m+alpha/2
confidence interval boundaries.
"""
if alpha < 0 or alpha > 1:
raise ValueError('Not a meaningful confidence level: '+str(alpha))
# compute mean
m = np.mean(data)
# sort data
sdata = np.sort(data)
# index of the mean
im = np.searchsorted(sdata, m)
if im == 0 or im == len(sdata):
pm = im
else:
pm = (im-1) + (m-sdata[im-1]) / (sdata[im]-sdata[im-1])
# left interval boundary
pl = pm - alpha * pm
il1 = max(0, int(math.floor(pl)))
il2 = min(len(sdata)-1, int(math.ceil(pl)))
l = sdata[il1] + (pl - il1)*(sdata[il2] - sdata[il1])
# right interval boundary
pr = pm + alpha * (len(data)-im)
ir1 = max(0, int(math.floor(pr)))
ir2 = min(len(sdata)-1, int(math.ceil(pr)))
r = sdata[ir1] + (pr - ir1)*(sdata[ir2] - sdata[ir1])
# return
return m, l, r
|
[
"def",
"confidence_interval",
"(",
"data",
",",
"alpha",
")",
":",
"if",
"alpha",
"<",
"0",
"or",
"alpha",
">",
"1",
":",
"raise",
"ValueError",
"(",
"'Not a meaningful confidence level: '",
"+",
"str",
"(",
"alpha",
")",
")",
"# compute mean",
"m",
"=",
"np",
".",
"mean",
"(",
"data",
")",
"# sort data",
"sdata",
"=",
"np",
".",
"sort",
"(",
"data",
")",
"# index of the mean",
"im",
"=",
"np",
".",
"searchsorted",
"(",
"sdata",
",",
"m",
")",
"if",
"im",
"==",
"0",
"or",
"im",
"==",
"len",
"(",
"sdata",
")",
":",
"pm",
"=",
"im",
"else",
":",
"pm",
"=",
"(",
"im",
"-",
"1",
")",
"+",
"(",
"m",
"-",
"sdata",
"[",
"im",
"-",
"1",
"]",
")",
"/",
"(",
"sdata",
"[",
"im",
"]",
"-",
"sdata",
"[",
"im",
"-",
"1",
"]",
")",
"# left interval boundary",
"pl",
"=",
"pm",
"-",
"alpha",
"*",
"pm",
"il1",
"=",
"max",
"(",
"0",
",",
"int",
"(",
"math",
".",
"floor",
"(",
"pl",
")",
")",
")",
"il2",
"=",
"min",
"(",
"len",
"(",
"sdata",
")",
"-",
"1",
",",
"int",
"(",
"math",
".",
"ceil",
"(",
"pl",
")",
")",
")",
"l",
"=",
"sdata",
"[",
"il1",
"]",
"+",
"(",
"pl",
"-",
"il1",
")",
"*",
"(",
"sdata",
"[",
"il2",
"]",
"-",
"sdata",
"[",
"il1",
"]",
")",
"# right interval boundary",
"pr",
"=",
"pm",
"+",
"alpha",
"*",
"(",
"len",
"(",
"data",
")",
"-",
"im",
")",
"ir1",
"=",
"max",
"(",
"0",
",",
"int",
"(",
"math",
".",
"floor",
"(",
"pr",
")",
")",
")",
"ir2",
"=",
"min",
"(",
"len",
"(",
"sdata",
")",
"-",
"1",
",",
"int",
"(",
"math",
".",
"ceil",
"(",
"pr",
")",
")",
")",
"r",
"=",
"sdata",
"[",
"ir1",
"]",
"+",
"(",
"pr",
"-",
"ir1",
")",
"*",
"(",
"sdata",
"[",
"ir2",
"]",
"-",
"sdata",
"[",
"ir1",
"]",
")",
"# return",
"return",
"m",
",",
"l",
",",
"r"
] |
Computes the mean and alpha-confidence interval of the given sample set
Parameters
----------
data : ndarray
a 1D-array of samples
alpha : float in [0,1]
the confidence level, i.e. percentage of data included in the interval
Returns
-------
[m,l,r] where m is the mean of the data, and (l,r) are the m-alpha/2 and m+alpha/2
confidence interval boundaries.
|
[
"Computes",
"the",
"mean",
"and",
"alpha",
"-",
"confidence",
"interval",
"of",
"the",
"given",
"sample",
"set"
] |
python
|
train
|
PythonCharmers/python-future
|
src/future/types/newstr.py
|
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/types/newstr.py#L275-L286
|
def splitlines(self, keepends=False):
"""
S.splitlines(keepends=False) -> list of strings
Return a list of the lines in S, breaking at line boundaries.
Line breaks are not included in the resulting list unless keepends
is given and true.
"""
# Py2 unicode.splitlines() takes keepends as an optional parameter,
# not as a keyword argument as in Python 3 str.
parts = super(newstr, self).splitlines(keepends)
return [newstr(part) for part in parts]
|
[
"def",
"splitlines",
"(",
"self",
",",
"keepends",
"=",
"False",
")",
":",
"# Py2 unicode.splitlines() takes keepends as an optional parameter,",
"# not as a keyword argument as in Python 3 str.",
"parts",
"=",
"super",
"(",
"newstr",
",",
"self",
")",
".",
"splitlines",
"(",
"keepends",
")",
"return",
"[",
"newstr",
"(",
"part",
")",
"for",
"part",
"in",
"parts",
"]"
] |
S.splitlines(keepends=False) -> list of strings
Return a list of the lines in S, breaking at line boundaries.
Line breaks are not included in the resulting list unless keepends
is given and true.
|
[
"S",
".",
"splitlines",
"(",
"keepends",
"=",
"False",
")",
"-",
">",
"list",
"of",
"strings"
] |
python
|
train
|
almcc/cinder-data
|
cinder_data/store.py
|
https://github.com/almcc/cinder-data/blob/4159a5186c4b4fc32354749892e86130530f6ec5/cinder_data/store.py#L41-L55
|
def peek_record(self, model_class, record_id):
"""Return an instance of the model_class from the cache if it is present.
Args:
model_class (:class:`cinder_data.model.CinderModel`): A subclass of
:class:`cinder_data.model.CinderModel` of your chosen model.
record_id (int): The id of the record requested.
Returns:
:class:`cinder_data.model.CinderModel`: An instance of model_class or None.
"""
if self._cache:
return self._cache.get_record(model_class.__name__, record_id)
else:
return None
|
[
"def",
"peek_record",
"(",
"self",
",",
"model_class",
",",
"record_id",
")",
":",
"if",
"self",
".",
"_cache",
":",
"return",
"self",
".",
"_cache",
".",
"get_record",
"(",
"model_class",
".",
"__name__",
",",
"record_id",
")",
"else",
":",
"return",
"None"
] |
Return an instance of the model_class from the cache if it is present.
Args:
model_class (:class:`cinder_data.model.CinderModel`): A subclass of
:class:`cinder_data.model.CinderModel` of your chosen model.
record_id (int): The id of the record requested.
Returns:
:class:`cinder_data.model.CinderModel`: An instance of model_class or None.
|
[
"Return",
"an",
"instance",
"of",
"the",
"model_class",
"from",
"the",
"cache",
"if",
"it",
"is",
"present",
"."
] |
python
|
train
|
Vagrants/blackbird
|
blackbird/utils/configread.py
|
https://github.com/Vagrants/blackbird/blob/3b38cd5650caae362e0668dbd38bf8f88233e079/blackbird/utils/configread.py#L380-L422
|
def _get_raw_specs(self, config):
"""
This method extract only the "Validate.spec" from
modules that were collected by ConfigReader._get_modules().
And, this method append "Validate.spec" to raw_specs.
This method creates a dictionary like the following:
raw_specs = {
'redis': (
"[redis]",
"host = string(default='127.0.0.1')",
"port = integer(0, 65535, default=6379)",
"db = integer(default=0)",
"charset = string(default='utf-8')",
"password = string(default=None)"
),
...
}
raw_specs is used by ConfigReader._create_specs().
"""
# spec_name is hard-corded
raw_specs = {}
spec_name = 'Validator'
modules = self._get_modules()
for section, options in config.items():
if section == 'global':
continue
try:
name = options['module']
except KeyError:
raise ConfigMissingValue(section, 'module')
try:
spec = getattr(modules[name], spec_name)().spec
raw_specs[name] = spec
except KeyError:
raise NotSupportedError(name)
return raw_specs
|
[
"def",
"_get_raw_specs",
"(",
"self",
",",
"config",
")",
":",
"# spec_name is hard-corded",
"raw_specs",
"=",
"{",
"}",
"spec_name",
"=",
"'Validator'",
"modules",
"=",
"self",
".",
"_get_modules",
"(",
")",
"for",
"section",
",",
"options",
"in",
"config",
".",
"items",
"(",
")",
":",
"if",
"section",
"==",
"'global'",
":",
"continue",
"try",
":",
"name",
"=",
"options",
"[",
"'module'",
"]",
"except",
"KeyError",
":",
"raise",
"ConfigMissingValue",
"(",
"section",
",",
"'module'",
")",
"try",
":",
"spec",
"=",
"getattr",
"(",
"modules",
"[",
"name",
"]",
",",
"spec_name",
")",
"(",
")",
".",
"spec",
"raw_specs",
"[",
"name",
"]",
"=",
"spec",
"except",
"KeyError",
":",
"raise",
"NotSupportedError",
"(",
"name",
")",
"return",
"raw_specs"
] |
This method extract only the "Validate.spec" from
modules that were collected by ConfigReader._get_modules().
And, this method append "Validate.spec" to raw_specs.
This method creates a dictionary like the following:
raw_specs = {
'redis': (
"[redis]",
"host = string(default='127.0.0.1')",
"port = integer(0, 65535, default=6379)",
"db = integer(default=0)",
"charset = string(default='utf-8')",
"password = string(default=None)"
),
...
}
raw_specs is used by ConfigReader._create_specs().
|
[
"This",
"method",
"extract",
"only",
"the",
"Validate",
".",
"spec",
"from",
"modules",
"that",
"were",
"collected",
"by",
"ConfigReader",
".",
"_get_modules",
"()",
".",
"And",
"this",
"method",
"append",
"Validate",
".",
"spec",
"to",
"raw_specs",
".",
"This",
"method",
"creates",
"a",
"dictionary",
"like",
"the",
"following",
":",
"raw_specs",
"=",
"{",
"redis",
":",
"(",
"[",
"redis",
"]",
"host",
"=",
"string",
"(",
"default",
"=",
"127",
".",
"0",
".",
"0",
".",
"1",
")",
"port",
"=",
"integer",
"(",
"0",
"65535",
"default",
"=",
"6379",
")",
"db",
"=",
"integer",
"(",
"default",
"=",
"0",
")",
"charset",
"=",
"string",
"(",
"default",
"=",
"utf",
"-",
"8",
")",
"password",
"=",
"string",
"(",
"default",
"=",
"None",
")",
")",
"...",
"}"
] |
python
|
train
|
lrq3000/pyFileFixity
|
pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py
|
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py#L474-L485
|
def OnOpenFile(self, event):
"""Request to open a new profile file"""
dialog = wx.FileDialog(self, style=wx.OPEN|wx.FD_MULTIPLE)
if dialog.ShowModal() == wx.ID_OK:
paths = dialog.GetPaths()
if self.loader:
# we've already got a displayed data-set, open new window...
frame = MainFrame()
frame.Show(True)
frame.load(*paths)
else:
self.load(*paths)
|
[
"def",
"OnOpenFile",
"(",
"self",
",",
"event",
")",
":",
"dialog",
"=",
"wx",
".",
"FileDialog",
"(",
"self",
",",
"style",
"=",
"wx",
".",
"OPEN",
"|",
"wx",
".",
"FD_MULTIPLE",
")",
"if",
"dialog",
".",
"ShowModal",
"(",
")",
"==",
"wx",
".",
"ID_OK",
":",
"paths",
"=",
"dialog",
".",
"GetPaths",
"(",
")",
"if",
"self",
".",
"loader",
":",
"# we've already got a displayed data-set, open new window...",
"frame",
"=",
"MainFrame",
"(",
")",
"frame",
".",
"Show",
"(",
"True",
")",
"frame",
".",
"load",
"(",
"*",
"paths",
")",
"else",
":",
"self",
".",
"load",
"(",
"*",
"paths",
")"
] |
Request to open a new profile file
|
[
"Request",
"to",
"open",
"a",
"new",
"profile",
"file"
] |
python
|
train
|
Microsoft/malmo
|
MalmoEnv/malmoenv/commands.py
|
https://github.com/Microsoft/malmo/blob/4139cd6f3e52f6e893a931a1d4b70d35f8e70e5a/MalmoEnv/malmoenv/commands.py#L85-L135
|
def get_actions(self, commands):
"""Get parameterized actions from command list based on command type and verb."""
actions = []
for type, turn_based, verb in commands:
if len(self.action_filter) != 0 and verb not in self.action_filter:
continue
if type == 'DiscreteMovement':
if verb in {"move", "turn", "look",
"strafe", "jumpmove", "jumpstrafe"}:
actions.append(verb + " 1")
actions.append(verb + " -1")
elif verb in {"jumpeast", "jumpnorth", "jumpsouth",
"jumpwest", "movenorth", "moveeast",
"movesouth", "movewest", "jumpuse",
"use", "attack", "jump"}:
actions.append(verb + " 1")
else:
raise CommandHandlerException("Invalid discrete command")
elif type == 'ContinuousMovement':
# Translate to discrete.
if verb in {"move", "strafe", "pitch", "turn"}:
actions.append(verb + " 1")
actions.append(verb + " -1")
elif verb in {"crouch", "jump", "attack", "use"}:
actions.append(verb + " 1")
actions.append(verb + " 0")
else:
raise CommandHandlerException("Invalid continuous command")
elif type == 'HumanLevel':
if verb == 'moveMouse':
actions.append('mouseMove 0 0')
elif verb in {'forward', 'back', 'left', 'right'}:
actions.append(verb + ' 1')
actions.append(verb + ' 0')
else:
actions.append(verb)
elif type == 'MissionQuit':
if verb != 'quit':
raise CommandHandlerException("Invalid quit command")
actions.append(verb)
elif type == 'Chat':
if verb != 'chat':
raise CommandHandlerException("Invalid chat command")
actions.append(verb)
elif type == 'SimpleCraft':
if verb != 'craft':
raise CommandHandlerException("Invalid craft command")
actions.append(verb)
elif type == 'AbsoluteMovement' or 'Inventory':
actions.append(verb)
return actions
|
[
"def",
"get_actions",
"(",
"self",
",",
"commands",
")",
":",
"actions",
"=",
"[",
"]",
"for",
"type",
",",
"turn_based",
",",
"verb",
"in",
"commands",
":",
"if",
"len",
"(",
"self",
".",
"action_filter",
")",
"!=",
"0",
"and",
"verb",
"not",
"in",
"self",
".",
"action_filter",
":",
"continue",
"if",
"type",
"==",
"'DiscreteMovement'",
":",
"if",
"verb",
"in",
"{",
"\"move\"",
",",
"\"turn\"",
",",
"\"look\"",
",",
"\"strafe\"",
",",
"\"jumpmove\"",
",",
"\"jumpstrafe\"",
"}",
":",
"actions",
".",
"append",
"(",
"verb",
"+",
"\" 1\"",
")",
"actions",
".",
"append",
"(",
"verb",
"+",
"\" -1\"",
")",
"elif",
"verb",
"in",
"{",
"\"jumpeast\"",
",",
"\"jumpnorth\"",
",",
"\"jumpsouth\"",
",",
"\"jumpwest\"",
",",
"\"movenorth\"",
",",
"\"moveeast\"",
",",
"\"movesouth\"",
",",
"\"movewest\"",
",",
"\"jumpuse\"",
",",
"\"use\"",
",",
"\"attack\"",
",",
"\"jump\"",
"}",
":",
"actions",
".",
"append",
"(",
"verb",
"+",
"\" 1\"",
")",
"else",
":",
"raise",
"CommandHandlerException",
"(",
"\"Invalid discrete command\"",
")",
"elif",
"type",
"==",
"'ContinuousMovement'",
":",
"# Translate to discrete.",
"if",
"verb",
"in",
"{",
"\"move\"",
",",
"\"strafe\"",
",",
"\"pitch\"",
",",
"\"turn\"",
"}",
":",
"actions",
".",
"append",
"(",
"verb",
"+",
"\" 1\"",
")",
"actions",
".",
"append",
"(",
"verb",
"+",
"\" -1\"",
")",
"elif",
"verb",
"in",
"{",
"\"crouch\"",
",",
"\"jump\"",
",",
"\"attack\"",
",",
"\"use\"",
"}",
":",
"actions",
".",
"append",
"(",
"verb",
"+",
"\" 1\"",
")",
"actions",
".",
"append",
"(",
"verb",
"+",
"\" 0\"",
")",
"else",
":",
"raise",
"CommandHandlerException",
"(",
"\"Invalid continuous command\"",
")",
"elif",
"type",
"==",
"'HumanLevel'",
":",
"if",
"verb",
"==",
"'moveMouse'",
":",
"actions",
".",
"append",
"(",
"'mouseMove 0 0'",
")",
"elif",
"verb",
"in",
"{",
"'forward'",
",",
"'back'",
",",
"'left'",
",",
"'right'",
"}",
":",
"actions",
".",
"append",
"(",
"verb",
"+",
"' 1'",
")",
"actions",
".",
"append",
"(",
"verb",
"+",
"' 0'",
")",
"else",
":",
"actions",
".",
"append",
"(",
"verb",
")",
"elif",
"type",
"==",
"'MissionQuit'",
":",
"if",
"verb",
"!=",
"'quit'",
":",
"raise",
"CommandHandlerException",
"(",
"\"Invalid quit command\"",
")",
"actions",
".",
"append",
"(",
"verb",
")",
"elif",
"type",
"==",
"'Chat'",
":",
"if",
"verb",
"!=",
"'chat'",
":",
"raise",
"CommandHandlerException",
"(",
"\"Invalid chat command\"",
")",
"actions",
".",
"append",
"(",
"verb",
")",
"elif",
"type",
"==",
"'SimpleCraft'",
":",
"if",
"verb",
"!=",
"'craft'",
":",
"raise",
"CommandHandlerException",
"(",
"\"Invalid craft command\"",
")",
"actions",
".",
"append",
"(",
"verb",
")",
"elif",
"type",
"==",
"'AbsoluteMovement'",
"or",
"'Inventory'",
":",
"actions",
".",
"append",
"(",
"verb",
")",
"return",
"actions"
] |
Get parameterized actions from command list based on command type and verb.
|
[
"Get",
"parameterized",
"actions",
"from",
"command",
"list",
"based",
"on",
"command",
"type",
"and",
"verb",
"."
] |
python
|
train
|
KE-works/pykechain
|
pykechain/models/part.py
|
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/part.py#L255-L281
|
def proxy_model(self):
"""
Retrieve the proxy model of this proxied `Part` as a `Part`.
Allows you to retrieve the model of a proxy. But trying to get the catalog model of a part that
has no proxy, will raise an :exc:`NotFoundError`. Only models can have a proxy.
:return: :class:`Part` with category `MODEL` and from which the current part is proxied
:raises NotFoundError: When no proxy model is found
Example
-------
>>> proxy_part = project.model('Proxy based on catalog model')
>>> catalog_model_of_proxy_part = proxy_part.proxy_model()
>>> proxied_material_of_the_bolt_model = project.model('Bolt Material')
>>> proxy_basis_for_the_material_model = proxied_material_of_the_bolt_model.proxy_model()
"""
if self.category != Category.MODEL:
raise IllegalArgumentError("Part {} is not a model, therefore it cannot have a proxy model".format(self))
if 'proxy' in self._json_data and self._json_data.get('proxy'):
catalog_model_id = self._json_data['proxy'].get('id')
return self._client.model(pk=catalog_model_id)
else:
raise NotFoundError("Part {} is not a proxy".format(self.name))
|
[
"def",
"proxy_model",
"(",
"self",
")",
":",
"if",
"self",
".",
"category",
"!=",
"Category",
".",
"MODEL",
":",
"raise",
"IllegalArgumentError",
"(",
"\"Part {} is not a model, therefore it cannot have a proxy model\"",
".",
"format",
"(",
"self",
")",
")",
"if",
"'proxy'",
"in",
"self",
".",
"_json_data",
"and",
"self",
".",
"_json_data",
".",
"get",
"(",
"'proxy'",
")",
":",
"catalog_model_id",
"=",
"self",
".",
"_json_data",
"[",
"'proxy'",
"]",
".",
"get",
"(",
"'id'",
")",
"return",
"self",
".",
"_client",
".",
"model",
"(",
"pk",
"=",
"catalog_model_id",
")",
"else",
":",
"raise",
"NotFoundError",
"(",
"\"Part {} is not a proxy\"",
".",
"format",
"(",
"self",
".",
"name",
")",
")"
] |
Retrieve the proxy model of this proxied `Part` as a `Part`.
Allows you to retrieve the model of a proxy. But trying to get the catalog model of a part that
has no proxy, will raise an :exc:`NotFoundError`. Only models can have a proxy.
:return: :class:`Part` with category `MODEL` and from which the current part is proxied
:raises NotFoundError: When no proxy model is found
Example
-------
>>> proxy_part = project.model('Proxy based on catalog model')
>>> catalog_model_of_proxy_part = proxy_part.proxy_model()
>>> proxied_material_of_the_bolt_model = project.model('Bolt Material')
>>> proxy_basis_for_the_material_model = proxied_material_of_the_bolt_model.proxy_model()
|
[
"Retrieve",
"the",
"proxy",
"model",
"of",
"this",
"proxied",
"Part",
"as",
"a",
"Part",
"."
] |
python
|
train
|
apache/incubator-mxnet
|
python/mxnet/symbol/symbol.py
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/symbol/symbol.py#L2718-L2739
|
def load_json(json_str):
"""Loads symbol from json string.
Parameters
----------
json_str : str
A JSON string.
Returns
-------
sym : Symbol
The loaded symbol.
See Also
--------
Symbol.tojson : Used to save symbol into json string.
"""
if not isinstance(json_str, string_types):
raise TypeError('fname required to be string')
handle = SymbolHandle()
check_call(_LIB.MXSymbolCreateFromJSON(c_str(json_str), ctypes.byref(handle)))
return Symbol(handle)
|
[
"def",
"load_json",
"(",
"json_str",
")",
":",
"if",
"not",
"isinstance",
"(",
"json_str",
",",
"string_types",
")",
":",
"raise",
"TypeError",
"(",
"'fname required to be string'",
")",
"handle",
"=",
"SymbolHandle",
"(",
")",
"check_call",
"(",
"_LIB",
".",
"MXSymbolCreateFromJSON",
"(",
"c_str",
"(",
"json_str",
")",
",",
"ctypes",
".",
"byref",
"(",
"handle",
")",
")",
")",
"return",
"Symbol",
"(",
"handle",
")"
] |
Loads symbol from json string.
Parameters
----------
json_str : str
A JSON string.
Returns
-------
sym : Symbol
The loaded symbol.
See Also
--------
Symbol.tojson : Used to save symbol into json string.
|
[
"Loads",
"symbol",
"from",
"json",
"string",
"."
] |
python
|
train
|
diging/tethne
|
tethne/analyze/collection.py
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/analyze/collection.py#L72-L101
|
def connected(G, method_name, **kwargs):
"""
Performs analysis methods from networkx.connected on each graph in the
collection.
Parameters
----------
G : :class:`.GraphCollection`
The :class:`.GraphCollection` to analyze. The specified method will be
applied to each graph in ``G``.
method : string
Name of method in networkx.connected.
**kwargs : kwargs
Keyword arguments, passed directly to method.
Returns
-------
results : dict
Keys are graph indices, values are output of method for that graph.
Raises
------
ValueError
If name is not in networkx.connected, or if no such method exists.
"""
warnings.warn("To be removed in 0.8. Use GraphCollection.analyze instead.",
DeprecationWarning)
return G.analyze(['connected', method_name], **kwargs)
|
[
"def",
"connected",
"(",
"G",
",",
"method_name",
",",
"*",
"*",
"kwargs",
")",
":",
"warnings",
".",
"warn",
"(",
"\"To be removed in 0.8. Use GraphCollection.analyze instead.\"",
",",
"DeprecationWarning",
")",
"return",
"G",
".",
"analyze",
"(",
"[",
"'connected'",
",",
"method_name",
"]",
",",
"*",
"*",
"kwargs",
")"
] |
Performs analysis methods from networkx.connected on each graph in the
collection.
Parameters
----------
G : :class:`.GraphCollection`
The :class:`.GraphCollection` to analyze. The specified method will be
applied to each graph in ``G``.
method : string
Name of method in networkx.connected.
**kwargs : kwargs
Keyword arguments, passed directly to method.
Returns
-------
results : dict
Keys are graph indices, values are output of method for that graph.
Raises
------
ValueError
If name is not in networkx.connected, or if no such method exists.
|
[
"Performs",
"analysis",
"methods",
"from",
"networkx",
".",
"connected",
"on",
"each",
"graph",
"in",
"the",
"collection",
"."
] |
python
|
train
|
peri-source/peri
|
peri/logger.py
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/logger.py#L82-L88
|
def set_formatter(self, formatter='standard', handlers=None):
"""
Set the text format of messages to one of the pre-determined forms,
one of ['quiet', 'minimal', 'standard', 'verbose']
"""
for h in self.get_handlers(handlers):
h.setFormatter(logging.Formatter(formatters[formatter]))
|
[
"def",
"set_formatter",
"(",
"self",
",",
"formatter",
"=",
"'standard'",
",",
"handlers",
"=",
"None",
")",
":",
"for",
"h",
"in",
"self",
".",
"get_handlers",
"(",
"handlers",
")",
":",
"h",
".",
"setFormatter",
"(",
"logging",
".",
"Formatter",
"(",
"formatters",
"[",
"formatter",
"]",
")",
")"
] |
Set the text format of messages to one of the pre-determined forms,
one of ['quiet', 'minimal', 'standard', 'verbose']
|
[
"Set",
"the",
"text",
"format",
"of",
"messages",
"to",
"one",
"of",
"the",
"pre",
"-",
"determined",
"forms",
"one",
"of",
"[",
"quiet",
"minimal",
"standard",
"verbose",
"]"
] |
python
|
valid
|
theislab/anndata
|
anndata/readwrite/read.py
|
https://github.com/theislab/anndata/blob/34f4eb63710628fbc15e7050e5efcac1d7806062/anndata/readwrite/read.py#L202-L217
|
def read_mtx(filename: PathLike, dtype: str='float32') -> AnnData:
"""Read ``.mtx`` file.
Parameters
----------
filename
The filename.
dtype
Numpy data type.
"""
from scipy.io import mmread
# could be rewritten accounting for dtype to be more performant
X = mmread(fspath(filename)).astype(dtype)
from scipy.sparse import csr_matrix
X = csr_matrix(X)
return AnnData(X, dtype=dtype)
|
[
"def",
"read_mtx",
"(",
"filename",
":",
"PathLike",
",",
"dtype",
":",
"str",
"=",
"'float32'",
")",
"->",
"AnnData",
":",
"from",
"scipy",
".",
"io",
"import",
"mmread",
"# could be rewritten accounting for dtype to be more performant",
"X",
"=",
"mmread",
"(",
"fspath",
"(",
"filename",
")",
")",
".",
"astype",
"(",
"dtype",
")",
"from",
"scipy",
".",
"sparse",
"import",
"csr_matrix",
"X",
"=",
"csr_matrix",
"(",
"X",
")",
"return",
"AnnData",
"(",
"X",
",",
"dtype",
"=",
"dtype",
")"
] |
Read ``.mtx`` file.
Parameters
----------
filename
The filename.
dtype
Numpy data type.
|
[
"Read",
".",
"mtx",
"file",
"."
] |
python
|
train
|
google/grr
|
grr/server/grr_response_server/databases/db_compat.py
|
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/db_compat.py#L70-L96
|
def ProcessHuntFlowError(flow_obj,
error_message=None,
backtrace=None,
status_msg=None):
"""Processes error and status message for a given hunt-induced flow."""
if not hunt.IsLegacyHunt(flow_obj.parent_hunt_id):
hunt.StopHuntIfCPUOrNetworkLimitsExceeded(flow_obj.parent_hunt_id)
return
hunt_urn = rdfvalue.RDFURN("hunts").Add(flow_obj.parent_hunt_id)
client_urn = rdf_client.ClientURN(flow_obj.client_id)
error = rdf_hunts.HuntError(client_id=flow_obj.client_id, backtrace=backtrace)
if error_message is not None:
error.log_message = error_message
with data_store.DB.GetMutationPool() as pool:
grr_collections.HuntErrorCollection.StaticAdd(
hunt_urn.Add("ErrorClients"), error, mutation_pool=pool)
grr_collections.ClientUrnCollection.StaticAdd(
hunt_urn.Add("CompletedClients"), client_urn, mutation_pool=pool)
if status_msg is not None:
with aff4.FACTORY.Open(hunt_urn, mode="rw") as fd:
# Legacy AFF4 code expects token to be set.
fd.token = access_control.ACLToken(username=fd.creator)
fd.GetRunner().SaveResourceUsage(flow_obj.client_id, status_msg)
|
[
"def",
"ProcessHuntFlowError",
"(",
"flow_obj",
",",
"error_message",
"=",
"None",
",",
"backtrace",
"=",
"None",
",",
"status_msg",
"=",
"None",
")",
":",
"if",
"not",
"hunt",
".",
"IsLegacyHunt",
"(",
"flow_obj",
".",
"parent_hunt_id",
")",
":",
"hunt",
".",
"StopHuntIfCPUOrNetworkLimitsExceeded",
"(",
"flow_obj",
".",
"parent_hunt_id",
")",
"return",
"hunt_urn",
"=",
"rdfvalue",
".",
"RDFURN",
"(",
"\"hunts\"",
")",
".",
"Add",
"(",
"flow_obj",
".",
"parent_hunt_id",
")",
"client_urn",
"=",
"rdf_client",
".",
"ClientURN",
"(",
"flow_obj",
".",
"client_id",
")",
"error",
"=",
"rdf_hunts",
".",
"HuntError",
"(",
"client_id",
"=",
"flow_obj",
".",
"client_id",
",",
"backtrace",
"=",
"backtrace",
")",
"if",
"error_message",
"is",
"not",
"None",
":",
"error",
".",
"log_message",
"=",
"error_message",
"with",
"data_store",
".",
"DB",
".",
"GetMutationPool",
"(",
")",
"as",
"pool",
":",
"grr_collections",
".",
"HuntErrorCollection",
".",
"StaticAdd",
"(",
"hunt_urn",
".",
"Add",
"(",
"\"ErrorClients\"",
")",
",",
"error",
",",
"mutation_pool",
"=",
"pool",
")",
"grr_collections",
".",
"ClientUrnCollection",
".",
"StaticAdd",
"(",
"hunt_urn",
".",
"Add",
"(",
"\"CompletedClients\"",
")",
",",
"client_urn",
",",
"mutation_pool",
"=",
"pool",
")",
"if",
"status_msg",
"is",
"not",
"None",
":",
"with",
"aff4",
".",
"FACTORY",
".",
"Open",
"(",
"hunt_urn",
",",
"mode",
"=",
"\"rw\"",
")",
"as",
"fd",
":",
"# Legacy AFF4 code expects token to be set.",
"fd",
".",
"token",
"=",
"access_control",
".",
"ACLToken",
"(",
"username",
"=",
"fd",
".",
"creator",
")",
"fd",
".",
"GetRunner",
"(",
")",
".",
"SaveResourceUsage",
"(",
"flow_obj",
".",
"client_id",
",",
"status_msg",
")"
] |
Processes error and status message for a given hunt-induced flow.
|
[
"Processes",
"error",
"and",
"status",
"message",
"for",
"a",
"given",
"hunt",
"-",
"induced",
"flow",
"."
] |
python
|
train
|
ska-sa/purr
|
Purr/LogEntry.py
|
https://github.com/ska-sa/purr/blob/4c848768d0485d0f88b30850d0d5372221b21b66/Purr/LogEntry.py#L386-L388
|
def generateIndex(self, refresh=0, refresh_index=0):
"""Writes the index file"""
open(self.index_file, "wt").write(self.renderIndex(refresh=refresh, refresh_index=refresh_index))
|
[
"def",
"generateIndex",
"(",
"self",
",",
"refresh",
"=",
"0",
",",
"refresh_index",
"=",
"0",
")",
":",
"open",
"(",
"self",
".",
"index_file",
",",
"\"wt\"",
")",
".",
"write",
"(",
"self",
".",
"renderIndex",
"(",
"refresh",
"=",
"refresh",
",",
"refresh_index",
"=",
"refresh_index",
")",
")"
] |
Writes the index file
|
[
"Writes",
"the",
"index",
"file"
] |
python
|
train
|
devopshq/crosspm
|
crosspm/helpers/output.py
|
https://github.com/devopshq/crosspm/blob/c831442ecfaa1d43c66cb148857096cea292c950/crosspm/helpers/output.py#L55-L68
|
def register_output_format(name):
"""
Load output format function to dictionary (decorator with this function name)
"""
def check_decorator(fn):
_output_format_map[name] = fn
def wrapper(*args, **kwargs):
return fn(*args, **kwargs)
return wrapper
return check_decorator
|
[
"def",
"register_output_format",
"(",
"name",
")",
":",
"def",
"check_decorator",
"(",
"fn",
")",
":",
"_output_format_map",
"[",
"name",
"]",
"=",
"fn",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"fn",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapper",
"return",
"check_decorator"
] |
Load output format function to dictionary (decorator with this function name)
|
[
"Load",
"output",
"format",
"function",
"to",
"dictionary",
"(",
"decorator",
"with",
"this",
"function",
"name",
")"
] |
python
|
train
|
junaruga/rpm-py-installer
|
install.py
|
https://github.com/junaruga/rpm-py-installer/blob/12f45feb0ba533dec8d0d16ef1e9b7fb8cfbd4ed/install.py#L1282-L1284
|
def create_installer(self, rpm_py_version, **kwargs):
"""Create Installer object."""
return DebianInstaller(rpm_py_version, self.python, self.rpm, **kwargs)
|
[
"def",
"create_installer",
"(",
"self",
",",
"rpm_py_version",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"DebianInstaller",
"(",
"rpm_py_version",
",",
"self",
".",
"python",
",",
"self",
".",
"rpm",
",",
"*",
"*",
"kwargs",
")"
] |
Create Installer object.
|
[
"Create",
"Installer",
"object",
"."
] |
python
|
train
|
redhat-cip/python-dciclient
|
dciclient/v1/shell_commands/job.py
|
https://github.com/redhat-cip/python-dciclient/blob/a4aa5899062802bbe4c30a075d8447f8d222d214/dciclient/v1/shell_commands/job.py#L314-L335
|
def file_upload(context, id, name, path, jobstate_id, test_id, mime):
"""file_upload(context, id, path)
Upload a file in a job
>>> dcictl job-upload-file [OPTIONS]
:param string id: ID of the job to attach file to [required]
:param string name: Name of the file [required]
:param string path: Path to the file to upload [required]
:param string jobstate_id: ID of the jobstate to attach the file
:param string test_id: ID of the test if the file is a test result
:param string mime: The mime type of the file
"""
result = dci_file.create_with_stream(context,
name=name,
job_id=id,
file_path=path,
jobstate_id=jobstate_id,
test_id=test_id,
mime=mime)
utils.format_output(result, context.format)
|
[
"def",
"file_upload",
"(",
"context",
",",
"id",
",",
"name",
",",
"path",
",",
"jobstate_id",
",",
"test_id",
",",
"mime",
")",
":",
"result",
"=",
"dci_file",
".",
"create_with_stream",
"(",
"context",
",",
"name",
"=",
"name",
",",
"job_id",
"=",
"id",
",",
"file_path",
"=",
"path",
",",
"jobstate_id",
"=",
"jobstate_id",
",",
"test_id",
"=",
"test_id",
",",
"mime",
"=",
"mime",
")",
"utils",
".",
"format_output",
"(",
"result",
",",
"context",
".",
"format",
")"
] |
file_upload(context, id, path)
Upload a file in a job
>>> dcictl job-upload-file [OPTIONS]
:param string id: ID of the job to attach file to [required]
:param string name: Name of the file [required]
:param string path: Path to the file to upload [required]
:param string jobstate_id: ID of the jobstate to attach the file
:param string test_id: ID of the test if the file is a test result
:param string mime: The mime type of the file
|
[
"file_upload",
"(",
"context",
"id",
"path",
")"
] |
python
|
train
|
sentinel-hub/sentinelhub-py
|
sentinelhub/ogc.py
|
https://github.com/sentinel-hub/sentinelhub-py/blob/08a83b7f1e289187159a643336995d8369860fea/sentinelhub/ogc.py#L524-L558
|
def _fetch_features(self):
"""Collects data from WFS service
:return: dictionary containing info about product tiles
:rtype: dict
"""
if self.feature_offset is None:
return
main_url = '{}{}/{}?'.format(self.base_url, ServiceType.WFS.value, self.instance_id)
params = {'SERVICE': ServiceType.WFS.value,
'REQUEST': 'GetFeature',
'TYPENAMES': DataSource.get_wfs_typename(self.data_source),
'BBOX': str(self.bbox.reverse()) if self.bbox.crs is CRS.WGS84 else str(self.bbox),
'OUTPUTFORMAT': MimeType.get_string(MimeType.JSON),
'SRSNAME': CRS.ogc_string(self.bbox.crs),
'TIME': '{}/{}'.format(self.time_interval[0], self.time_interval[1]),
'MAXCC': 100.0 * self.maxcc,
'MAXFEATURES': SHConfig().max_wfs_records_per_query,
'FEATURE_OFFSET': self.feature_offset}
url = main_url + urlencode(params)
LOGGER.debug("URL=%s", url)
response = get_json(url)
is_sentinel1 = self.data_source.is_sentinel1()
for tile_info in response["features"]:
if not is_sentinel1 or self._sentinel1_product_check(tile_info['properties']['id'], self.data_source):
self.tile_list.append(tile_info)
if len(response["features"]) < SHConfig().max_wfs_records_per_query:
self.feature_offset = None
else:
self.feature_offset += SHConfig().max_wfs_records_per_query
|
[
"def",
"_fetch_features",
"(",
"self",
")",
":",
"if",
"self",
".",
"feature_offset",
"is",
"None",
":",
"return",
"main_url",
"=",
"'{}{}/{}?'",
".",
"format",
"(",
"self",
".",
"base_url",
",",
"ServiceType",
".",
"WFS",
".",
"value",
",",
"self",
".",
"instance_id",
")",
"params",
"=",
"{",
"'SERVICE'",
":",
"ServiceType",
".",
"WFS",
".",
"value",
",",
"'REQUEST'",
":",
"'GetFeature'",
",",
"'TYPENAMES'",
":",
"DataSource",
".",
"get_wfs_typename",
"(",
"self",
".",
"data_source",
")",
",",
"'BBOX'",
":",
"str",
"(",
"self",
".",
"bbox",
".",
"reverse",
"(",
")",
")",
"if",
"self",
".",
"bbox",
".",
"crs",
"is",
"CRS",
".",
"WGS84",
"else",
"str",
"(",
"self",
".",
"bbox",
")",
",",
"'OUTPUTFORMAT'",
":",
"MimeType",
".",
"get_string",
"(",
"MimeType",
".",
"JSON",
")",
",",
"'SRSNAME'",
":",
"CRS",
".",
"ogc_string",
"(",
"self",
".",
"bbox",
".",
"crs",
")",
",",
"'TIME'",
":",
"'{}/{}'",
".",
"format",
"(",
"self",
".",
"time_interval",
"[",
"0",
"]",
",",
"self",
".",
"time_interval",
"[",
"1",
"]",
")",
",",
"'MAXCC'",
":",
"100.0",
"*",
"self",
".",
"maxcc",
",",
"'MAXFEATURES'",
":",
"SHConfig",
"(",
")",
".",
"max_wfs_records_per_query",
",",
"'FEATURE_OFFSET'",
":",
"self",
".",
"feature_offset",
"}",
"url",
"=",
"main_url",
"+",
"urlencode",
"(",
"params",
")",
"LOGGER",
".",
"debug",
"(",
"\"URL=%s\"",
",",
"url",
")",
"response",
"=",
"get_json",
"(",
"url",
")",
"is_sentinel1",
"=",
"self",
".",
"data_source",
".",
"is_sentinel1",
"(",
")",
"for",
"tile_info",
"in",
"response",
"[",
"\"features\"",
"]",
":",
"if",
"not",
"is_sentinel1",
"or",
"self",
".",
"_sentinel1_product_check",
"(",
"tile_info",
"[",
"'properties'",
"]",
"[",
"'id'",
"]",
",",
"self",
".",
"data_source",
")",
":",
"self",
".",
"tile_list",
".",
"append",
"(",
"tile_info",
")",
"if",
"len",
"(",
"response",
"[",
"\"features\"",
"]",
")",
"<",
"SHConfig",
"(",
")",
".",
"max_wfs_records_per_query",
":",
"self",
".",
"feature_offset",
"=",
"None",
"else",
":",
"self",
".",
"feature_offset",
"+=",
"SHConfig",
"(",
")",
".",
"max_wfs_records_per_query"
] |
Collects data from WFS service
:return: dictionary containing info about product tiles
:rtype: dict
|
[
"Collects",
"data",
"from",
"WFS",
"service"
] |
python
|
train
|
nabetama/slacky
|
slacky/rest/rest.py
|
https://github.com/nabetama/slacky/blob/dde62ce49af9b8f581729c36d2ac790310b570e4/slacky/rest/rest.py#L552-L560
|
def set_purpose(self, group_name, purpose):
""" https://api.slack.com/methods/groups.setPurpose
"""
group_id = self.get_group_id(group_name)
self.params.update({
'channel': group_id,
'purpose': purpose,
})
return FromUrl('https://slack.com/api/groups.setPurpose', self._requests)(data=self.params).post()
|
[
"def",
"set_purpose",
"(",
"self",
",",
"group_name",
",",
"purpose",
")",
":",
"group_id",
"=",
"self",
".",
"get_group_id",
"(",
"group_name",
")",
"self",
".",
"params",
".",
"update",
"(",
"{",
"'channel'",
":",
"group_id",
",",
"'purpose'",
":",
"purpose",
",",
"}",
")",
"return",
"FromUrl",
"(",
"'https://slack.com/api/groups.setPurpose'",
",",
"self",
".",
"_requests",
")",
"(",
"data",
"=",
"self",
".",
"params",
")",
".",
"post",
"(",
")"
] |
https://api.slack.com/methods/groups.setPurpose
|
[
"https",
":",
"//",
"api",
".",
"slack",
".",
"com",
"/",
"methods",
"/",
"groups",
".",
"setPurpose"
] |
python
|
train
|
MisterY/price-database
|
pricedb/app.py
|
https://github.com/MisterY/price-database/blob/b4fd366b7763891c690fe3000b8840e656da023e/pricedb/app.py#L174-L185
|
def get_prices_on(self, on_date: str, namespace: str, symbol: str):
""" Returns the latest price on the date """
repo = self.get_price_repository()
query = (
repo.query.filter(dal.Price.namespace == namespace)
.filter(dal.Price.symbol == symbol)
.filter(dal.Price.date == on_date)
.order_by(dal.Price.time.desc())
)
result = query.first()
# logging.debug(result)
return result
|
[
"def",
"get_prices_on",
"(",
"self",
",",
"on_date",
":",
"str",
",",
"namespace",
":",
"str",
",",
"symbol",
":",
"str",
")",
":",
"repo",
"=",
"self",
".",
"get_price_repository",
"(",
")",
"query",
"=",
"(",
"repo",
".",
"query",
".",
"filter",
"(",
"dal",
".",
"Price",
".",
"namespace",
"==",
"namespace",
")",
".",
"filter",
"(",
"dal",
".",
"Price",
".",
"symbol",
"==",
"symbol",
")",
".",
"filter",
"(",
"dal",
".",
"Price",
".",
"date",
"==",
"on_date",
")",
".",
"order_by",
"(",
"dal",
".",
"Price",
".",
"time",
".",
"desc",
"(",
")",
")",
")",
"result",
"=",
"query",
".",
"first",
"(",
")",
"# logging.debug(result)",
"return",
"result"
] |
Returns the latest price on the date
|
[
"Returns",
"the",
"latest",
"price",
"on",
"the",
"date"
] |
python
|
test
|
apache/incubator-mxnet
|
python/mxnet/recordio.py
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/recordio.py#L268-L276
|
def seek(self, idx):
"""Sets the current read pointer position.
This function is internally called by `read_idx(idx)` to find the current
reader pointer position. It doesn't return anything."""
assert not self.writable
self._check_pid(allow_reset=True)
pos = ctypes.c_size_t(self.idx[idx])
check_call(_LIB.MXRecordIOReaderSeek(self.handle, pos))
|
[
"def",
"seek",
"(",
"self",
",",
"idx",
")",
":",
"assert",
"not",
"self",
".",
"writable",
"self",
".",
"_check_pid",
"(",
"allow_reset",
"=",
"True",
")",
"pos",
"=",
"ctypes",
".",
"c_size_t",
"(",
"self",
".",
"idx",
"[",
"idx",
"]",
")",
"check_call",
"(",
"_LIB",
".",
"MXRecordIOReaderSeek",
"(",
"self",
".",
"handle",
",",
"pos",
")",
")"
] |
Sets the current read pointer position.
This function is internally called by `read_idx(idx)` to find the current
reader pointer position. It doesn't return anything.
|
[
"Sets",
"the",
"current",
"read",
"pointer",
"position",
"."
] |
python
|
train
|
JarryShaw/PyPCAPKit
|
src/const/ipv6/qs_function.py
|
https://github.com/JarryShaw/PyPCAPKit/blob/c7f0da9aebc2cf210bf8f8b912f7d3cbb98ca10e/src/const/ipv6/qs_function.py#L16-L22
|
def get(key, default=-1):
"""Backport support for original codes."""
if isinstance(key, int):
return QS_Function(key)
if key not in QS_Function._member_map_:
extend_enum(QS_Function, key, default)
return QS_Function[key]
|
[
"def",
"get",
"(",
"key",
",",
"default",
"=",
"-",
"1",
")",
":",
"if",
"isinstance",
"(",
"key",
",",
"int",
")",
":",
"return",
"QS_Function",
"(",
"key",
")",
"if",
"key",
"not",
"in",
"QS_Function",
".",
"_member_map_",
":",
"extend_enum",
"(",
"QS_Function",
",",
"key",
",",
"default",
")",
"return",
"QS_Function",
"[",
"key",
"]"
] |
Backport support for original codes.
|
[
"Backport",
"support",
"for",
"original",
"codes",
"."
] |
python
|
train
|
agoragames/haigha
|
haigha/connections/rabbit_connection.py
|
https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/connections/rabbit_connection.py#L227-L238
|
def _recv_nack(self, method_frame):
'''Receive a nack from the broker.'''
if self._nack_listener:
delivery_tag = method_frame.args.read_longlong()
multiple, requeue = method_frame.args.read_bits(2)
if multiple:
while self._last_ack_id < delivery_tag:
self._last_ack_id += 1
self._nack_listener(self._last_ack_id, requeue)
else:
self._last_ack_id = delivery_tag
self._nack_listener(self._last_ack_id, requeue)
|
[
"def",
"_recv_nack",
"(",
"self",
",",
"method_frame",
")",
":",
"if",
"self",
".",
"_nack_listener",
":",
"delivery_tag",
"=",
"method_frame",
".",
"args",
".",
"read_longlong",
"(",
")",
"multiple",
",",
"requeue",
"=",
"method_frame",
".",
"args",
".",
"read_bits",
"(",
"2",
")",
"if",
"multiple",
":",
"while",
"self",
".",
"_last_ack_id",
"<",
"delivery_tag",
":",
"self",
".",
"_last_ack_id",
"+=",
"1",
"self",
".",
"_nack_listener",
"(",
"self",
".",
"_last_ack_id",
",",
"requeue",
")",
"else",
":",
"self",
".",
"_last_ack_id",
"=",
"delivery_tag",
"self",
".",
"_nack_listener",
"(",
"self",
".",
"_last_ack_id",
",",
"requeue",
")"
] |
Receive a nack from the broker.
|
[
"Receive",
"a",
"nack",
"from",
"the",
"broker",
"."
] |
python
|
train
|
synw/dataswim
|
dataswim/maps/__init__.py
|
https://github.com/synw/dataswim/blob/4a4a53f80daa7cd8e8409d76a19ce07296269da2/dataswim/maps/__init__.py#L94-L101
|
def map(self, lat, long, zoom=13, tiles="map"):
"""
Sets a map
"""
try:
self.dsmap = self._map(lat, long, zoom, tiles)
except Exception as e:
self.err(e, self.map, "Can not get map")
|
[
"def",
"map",
"(",
"self",
",",
"lat",
",",
"long",
",",
"zoom",
"=",
"13",
",",
"tiles",
"=",
"\"map\"",
")",
":",
"try",
":",
"self",
".",
"dsmap",
"=",
"self",
".",
"_map",
"(",
"lat",
",",
"long",
",",
"zoom",
",",
"tiles",
")",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"err",
"(",
"e",
",",
"self",
".",
"map",
",",
"\"Can not get map\"",
")"
] |
Sets a map
|
[
"Sets",
"a",
"map"
] |
python
|
train
|
Zsailer/pandas_flavor
|
pandas_flavor/register.py
|
https://github.com/Zsailer/pandas_flavor/blob/1953aeee09424300d69a11dd2ffd3460a806fb65/pandas_flavor/register.py#L38-L57
|
def register_series_method(method):
"""Register a function as a method attached to the Pandas Series.
"""
def inner(*args, **kwargs):
class AccessorMethod(object):
__doc__ = method.__doc__
def __init__(self, pandas_obj):
self._obj = pandas_obj
@wraps(method)
def __call__(self, *args, **kwargs):
return method(self._obj, *args, **kwargs)
register_series_accessor(method.__name__)(AccessorMethod)
return method
return inner()
|
[
"def",
"register_series_method",
"(",
"method",
")",
":",
"def",
"inner",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"class",
"AccessorMethod",
"(",
"object",
")",
":",
"__doc__",
"=",
"method",
".",
"__doc__",
"def",
"__init__",
"(",
"self",
",",
"pandas_obj",
")",
":",
"self",
".",
"_obj",
"=",
"pandas_obj",
"@",
"wraps",
"(",
"method",
")",
"def",
"__call__",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"method",
"(",
"self",
".",
"_obj",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"register_series_accessor",
"(",
"method",
".",
"__name__",
")",
"(",
"AccessorMethod",
")",
"return",
"method",
"return",
"inner",
"(",
")"
] |
Register a function as a method attached to the Pandas Series.
|
[
"Register",
"a",
"function",
"as",
"a",
"method",
"attached",
"to",
"the",
"Pandas",
"Series",
"."
] |
python
|
train
|
pycontribs/pyrax
|
pyrax/autoscale.py
|
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/autoscale.py#L669-L683
|
def replace_webhook(self, scaling_group, policy, webhook, name,
metadata=None):
"""
Replace an existing webhook. All of the attributes must be specified.
If you wish to delete any of the optional attributes, pass them in as
None.
"""
uri = "/%s/%s/policies/%s/webhooks/%s" % (self.uri_base,
utils.get_id(scaling_group), utils.get_id(policy),
utils.get_id(webhook))
group_id = utils.get_id(scaling_group)
policy_id = utils.get_id(policy)
webhook_id = utils.get_id(webhook)
body = self._create_webhook_body(name, metadata=metadata)
resp, resp_body = self.api.method_put(uri, body=body)
|
[
"def",
"replace_webhook",
"(",
"self",
",",
"scaling_group",
",",
"policy",
",",
"webhook",
",",
"name",
",",
"metadata",
"=",
"None",
")",
":",
"uri",
"=",
"\"/%s/%s/policies/%s/webhooks/%s\"",
"%",
"(",
"self",
".",
"uri_base",
",",
"utils",
".",
"get_id",
"(",
"scaling_group",
")",
",",
"utils",
".",
"get_id",
"(",
"policy",
")",
",",
"utils",
".",
"get_id",
"(",
"webhook",
")",
")",
"group_id",
"=",
"utils",
".",
"get_id",
"(",
"scaling_group",
")",
"policy_id",
"=",
"utils",
".",
"get_id",
"(",
"policy",
")",
"webhook_id",
"=",
"utils",
".",
"get_id",
"(",
"webhook",
")",
"body",
"=",
"self",
".",
"_create_webhook_body",
"(",
"name",
",",
"metadata",
"=",
"metadata",
")",
"resp",
",",
"resp_body",
"=",
"self",
".",
"api",
".",
"method_put",
"(",
"uri",
",",
"body",
"=",
"body",
")"
] |
Replace an existing webhook. All of the attributes must be specified.
If you wish to delete any of the optional attributes, pass them in as
None.
|
[
"Replace",
"an",
"existing",
"webhook",
".",
"All",
"of",
"the",
"attributes",
"must",
"be",
"specified",
".",
"If",
"you",
"wish",
"to",
"delete",
"any",
"of",
"the",
"optional",
"attributes",
"pass",
"them",
"in",
"as",
"None",
"."
] |
python
|
train
|
pydata/xarray
|
xarray/tutorial.py
|
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/tutorial.py#L26-L89
|
def open_dataset(name, cache=True, cache_dir=_default_cache_dir,
github_url='https://github.com/pydata/xarray-data',
branch='master', **kws):
"""
Load a dataset from the online repository (requires internet).
If a local copy is found then always use that to avoid network traffic.
Parameters
----------
name : str
Name of the netcdf file containing the dataset
ie. 'air_temperature'
cache_dir : string, optional
The directory in which to search for and write cached data.
cache : boolean, optional
If True, then cache data locally for use on subsequent calls
github_url : string
Github repository where the data is stored
branch : string
The git branch to download from
kws : dict, optional
Passed to xarray.open_dataset
See Also
--------
xarray.open_dataset
"""
longdir = _os.path.expanduser(cache_dir)
fullname = name + '.nc'
localfile = _os.sep.join((longdir, fullname))
md5name = name + '.md5'
md5file = _os.sep.join((longdir, md5name))
if not _os.path.exists(localfile):
# This will always leave this directory on disk.
# May want to add an option to remove it.
if not _os.path.isdir(longdir):
_os.mkdir(longdir)
url = '/'.join((github_url, 'raw', branch, fullname))
urlretrieve(url, localfile)
url = '/'.join((github_url, 'raw', branch, md5name))
urlretrieve(url, md5file)
localmd5 = file_md5_checksum(localfile)
with open(md5file, 'r') as f:
remotemd5 = f.read()
if localmd5 != remotemd5:
_os.remove(localfile)
msg = """
MD5 checksum does not match, try downloading dataset again.
"""
raise IOError(msg)
ds = _open_dataset(localfile, **kws)
if not cache:
ds = ds.load()
_os.remove(localfile)
return ds
|
[
"def",
"open_dataset",
"(",
"name",
",",
"cache",
"=",
"True",
",",
"cache_dir",
"=",
"_default_cache_dir",
",",
"github_url",
"=",
"'https://github.com/pydata/xarray-data'",
",",
"branch",
"=",
"'master'",
",",
"*",
"*",
"kws",
")",
":",
"longdir",
"=",
"_os",
".",
"path",
".",
"expanduser",
"(",
"cache_dir",
")",
"fullname",
"=",
"name",
"+",
"'.nc'",
"localfile",
"=",
"_os",
".",
"sep",
".",
"join",
"(",
"(",
"longdir",
",",
"fullname",
")",
")",
"md5name",
"=",
"name",
"+",
"'.md5'",
"md5file",
"=",
"_os",
".",
"sep",
".",
"join",
"(",
"(",
"longdir",
",",
"md5name",
")",
")",
"if",
"not",
"_os",
".",
"path",
".",
"exists",
"(",
"localfile",
")",
":",
"# This will always leave this directory on disk.",
"# May want to add an option to remove it.",
"if",
"not",
"_os",
".",
"path",
".",
"isdir",
"(",
"longdir",
")",
":",
"_os",
".",
"mkdir",
"(",
"longdir",
")",
"url",
"=",
"'/'",
".",
"join",
"(",
"(",
"github_url",
",",
"'raw'",
",",
"branch",
",",
"fullname",
")",
")",
"urlretrieve",
"(",
"url",
",",
"localfile",
")",
"url",
"=",
"'/'",
".",
"join",
"(",
"(",
"github_url",
",",
"'raw'",
",",
"branch",
",",
"md5name",
")",
")",
"urlretrieve",
"(",
"url",
",",
"md5file",
")",
"localmd5",
"=",
"file_md5_checksum",
"(",
"localfile",
")",
"with",
"open",
"(",
"md5file",
",",
"'r'",
")",
"as",
"f",
":",
"remotemd5",
"=",
"f",
".",
"read",
"(",
")",
"if",
"localmd5",
"!=",
"remotemd5",
":",
"_os",
".",
"remove",
"(",
"localfile",
")",
"msg",
"=",
"\"\"\"\n MD5 checksum does not match, try downloading dataset again.\n \"\"\"",
"raise",
"IOError",
"(",
"msg",
")",
"ds",
"=",
"_open_dataset",
"(",
"localfile",
",",
"*",
"*",
"kws",
")",
"if",
"not",
"cache",
":",
"ds",
"=",
"ds",
".",
"load",
"(",
")",
"_os",
".",
"remove",
"(",
"localfile",
")",
"return",
"ds"
] |
Load a dataset from the online repository (requires internet).
If a local copy is found then always use that to avoid network traffic.
Parameters
----------
name : str
Name of the netcdf file containing the dataset
ie. 'air_temperature'
cache_dir : string, optional
The directory in which to search for and write cached data.
cache : boolean, optional
If True, then cache data locally for use on subsequent calls
github_url : string
Github repository where the data is stored
branch : string
The git branch to download from
kws : dict, optional
Passed to xarray.open_dataset
See Also
--------
xarray.open_dataset
|
[
"Load",
"a",
"dataset",
"from",
"the",
"online",
"repository",
"(",
"requires",
"internet",
")",
"."
] |
python
|
train
|
titusjan/argos
|
argos/repo/repotreeview.py
|
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/repo/repotreeview.py#L265-L305
|
def reloadFileOfCurrentItem(self, rtiRegItem=None):
""" Finds the repo tree item that holds the file of the current item and reloads it.
Reloading is done by removing the repo tree item and inserting a new one.
The new item will have by of type rtiRegItem.cls. If rtiRegItem is None (the default),
the new rtiClass will be the same as the old one.
The rtiRegItem.cls will be imported. If this fails the old class will be used, and a
warning will be logged.
"""
logger.debug("reloadFileOfCurrentItem, rtiClass={}".format(rtiRegItem))
currentIndex = self.getRowCurrentIndex()
if not currentIndex.isValid():
return
currentItem, _ = self.getCurrentItem()
oldPath = currentItem.nodePath
fileRtiIndex = self.model().findFileRtiIndex(currentIndex)
isExpanded = self.isExpanded(fileRtiIndex)
if rtiRegItem is None:
rtiClass = None
else:
rtiRegItem.tryImportClass()
rtiClass = rtiRegItem.cls
newRtiIndex = self.model().reloadFileAtIndex(fileRtiIndex, rtiClass=rtiClass)
try:
# Expand and select the name with the old path
_lastItem, lastIndex = self.expandPath(oldPath)
self.setCurrentIndex(lastIndex)
return lastIndex
except Exception as ex:
# The old path may not exist anymore. In that case select file RTI
logger.warning("Unable to select {!r} beause of: {}".format(oldPath, ex))
self.setExpanded(newRtiIndex, isExpanded)
self.setCurrentIndex(newRtiIndex)
return newRtiIndex
|
[
"def",
"reloadFileOfCurrentItem",
"(",
"self",
",",
"rtiRegItem",
"=",
"None",
")",
":",
"logger",
".",
"debug",
"(",
"\"reloadFileOfCurrentItem, rtiClass={}\"",
".",
"format",
"(",
"rtiRegItem",
")",
")",
"currentIndex",
"=",
"self",
".",
"getRowCurrentIndex",
"(",
")",
"if",
"not",
"currentIndex",
".",
"isValid",
"(",
")",
":",
"return",
"currentItem",
",",
"_",
"=",
"self",
".",
"getCurrentItem",
"(",
")",
"oldPath",
"=",
"currentItem",
".",
"nodePath",
"fileRtiIndex",
"=",
"self",
".",
"model",
"(",
")",
".",
"findFileRtiIndex",
"(",
"currentIndex",
")",
"isExpanded",
"=",
"self",
".",
"isExpanded",
"(",
"fileRtiIndex",
")",
"if",
"rtiRegItem",
"is",
"None",
":",
"rtiClass",
"=",
"None",
"else",
":",
"rtiRegItem",
".",
"tryImportClass",
"(",
")",
"rtiClass",
"=",
"rtiRegItem",
".",
"cls",
"newRtiIndex",
"=",
"self",
".",
"model",
"(",
")",
".",
"reloadFileAtIndex",
"(",
"fileRtiIndex",
",",
"rtiClass",
"=",
"rtiClass",
")",
"try",
":",
"# Expand and select the name with the old path",
"_lastItem",
",",
"lastIndex",
"=",
"self",
".",
"expandPath",
"(",
"oldPath",
")",
"self",
".",
"setCurrentIndex",
"(",
"lastIndex",
")",
"return",
"lastIndex",
"except",
"Exception",
"as",
"ex",
":",
"# The old path may not exist anymore. In that case select file RTI",
"logger",
".",
"warning",
"(",
"\"Unable to select {!r} beause of: {}\"",
".",
"format",
"(",
"oldPath",
",",
"ex",
")",
")",
"self",
".",
"setExpanded",
"(",
"newRtiIndex",
",",
"isExpanded",
")",
"self",
".",
"setCurrentIndex",
"(",
"newRtiIndex",
")",
"return",
"newRtiIndex"
] |
Finds the repo tree item that holds the file of the current item and reloads it.
Reloading is done by removing the repo tree item and inserting a new one.
The new item will have by of type rtiRegItem.cls. If rtiRegItem is None (the default),
the new rtiClass will be the same as the old one.
The rtiRegItem.cls will be imported. If this fails the old class will be used, and a
warning will be logged.
|
[
"Finds",
"the",
"repo",
"tree",
"item",
"that",
"holds",
"the",
"file",
"of",
"the",
"current",
"item",
"and",
"reloads",
"it",
".",
"Reloading",
"is",
"done",
"by",
"removing",
"the",
"repo",
"tree",
"item",
"and",
"inserting",
"a",
"new",
"one",
"."
] |
python
|
train
|
jaysonsantos/python-binary-memcached
|
bmemcached/client/distributed.py
|
https://github.com/jaysonsantos/python-binary-memcached/blob/6a792829349c69204d9c5045e5c34b4231216dd6/bmemcached/client/distributed.py#L61-L86
|
def set_multi(self, mappings, time=0, compress_level=-1):
"""
Set multiple keys with it's values on server.
:param mappings: A dict with keys/values
:type mappings: dict
:param time: Time in seconds that your key will expire.
:type time: int
:param compress_level: How much to compress.
0 = no compression, 1 = fastest, 9 = slowest but best,
-1 = default compression level.
:type compress_level: int
:return: True in case of success and False in case of failure
:rtype: bool
"""
returns = []
if not mappings:
return False
server_mappings = defaultdict(dict)
for key, value in mappings.items():
server_key = self._get_server(key)
server_mappings[server_key].update([(key, value)])
for server, m in server_mappings.items():
returns.append(server.set_multi(m, time, compress_level))
return all(returns)
|
[
"def",
"set_multi",
"(",
"self",
",",
"mappings",
",",
"time",
"=",
"0",
",",
"compress_level",
"=",
"-",
"1",
")",
":",
"returns",
"=",
"[",
"]",
"if",
"not",
"mappings",
":",
"return",
"False",
"server_mappings",
"=",
"defaultdict",
"(",
"dict",
")",
"for",
"key",
",",
"value",
"in",
"mappings",
".",
"items",
"(",
")",
":",
"server_key",
"=",
"self",
".",
"_get_server",
"(",
"key",
")",
"server_mappings",
"[",
"server_key",
"]",
".",
"update",
"(",
"[",
"(",
"key",
",",
"value",
")",
"]",
")",
"for",
"server",
",",
"m",
"in",
"server_mappings",
".",
"items",
"(",
")",
":",
"returns",
".",
"append",
"(",
"server",
".",
"set_multi",
"(",
"m",
",",
"time",
",",
"compress_level",
")",
")",
"return",
"all",
"(",
"returns",
")"
] |
Set multiple keys with it's values on server.
:param mappings: A dict with keys/values
:type mappings: dict
:param time: Time in seconds that your key will expire.
:type time: int
:param compress_level: How much to compress.
0 = no compression, 1 = fastest, 9 = slowest but best,
-1 = default compression level.
:type compress_level: int
:return: True in case of success and False in case of failure
:rtype: bool
|
[
"Set",
"multiple",
"keys",
"with",
"it",
"s",
"values",
"on",
"server",
"."
] |
python
|
train
|
apple/turicreate
|
src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers.py
|
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers.py#L107-L117
|
def _same_elements_per_channel(x):
"""
Test if a 3D (H,W,C) matrix x has the same element in each (H,W) matrix for each channel
"""
eps = 1e-5
dims = x.shape
for c in range(dims[-1]):
xc = x[:,:,c].flatten()
if not np.all(np.absolute(xc - xc[0]) < eps):
return False
return True
|
[
"def",
"_same_elements_per_channel",
"(",
"x",
")",
":",
"eps",
"=",
"1e-5",
"dims",
"=",
"x",
".",
"shape",
"for",
"c",
"in",
"range",
"(",
"dims",
"[",
"-",
"1",
"]",
")",
":",
"xc",
"=",
"x",
"[",
":",
",",
":",
",",
"c",
"]",
".",
"flatten",
"(",
")",
"if",
"not",
"np",
".",
"all",
"(",
"np",
".",
"absolute",
"(",
"xc",
"-",
"xc",
"[",
"0",
"]",
")",
"<",
"eps",
")",
":",
"return",
"False",
"return",
"True"
] |
Test if a 3D (H,W,C) matrix x has the same element in each (H,W) matrix for each channel
|
[
"Test",
"if",
"a",
"3D",
"(",
"H",
"W",
"C",
")",
"matrix",
"x",
"has",
"the",
"same",
"element",
"in",
"each",
"(",
"H",
"W",
")",
"matrix",
"for",
"each",
"channel"
] |
python
|
train
|
sirfz/tesserocr
|
setup.py
|
https://github.com/sirfz/tesserocr/blob/052fb5d7d4e1398c8a07958b389b37e1090fb897/setup.py#L80-L115
|
def package_config():
"""Use pkg-config to get library build parameters and tesseract version."""
p = subprocess.Popen(['pkg-config', '--exists', '--atleast-version={}'.format(_TESSERACT_MIN_VERSION),
'--print-errors', 'tesseract'],
stderr=subprocess.PIPE)
_, error = p.communicate()
if p.returncode != 0:
raise Exception(error)
p = subprocess.Popen(['pkg-config', '--libs', '--cflags', 'tesseract'], stdout=subprocess.PIPE)
output, _ = p.communicate()
flags = _read_string(output).strip().split()
p = subprocess.Popen(['pkg-config', '--libs', '--cflags', 'lept'], stdout=subprocess.PIPE)
output, _ = p.communicate()
flags2 = _read_string(output).strip().split()
options = {'-L': 'library_dirs',
'-I': 'include_dirs',
'-l': 'libraries'}
config = {}
import itertools
for f in itertools.chain(flags, flags2):
try:
opt = options[f[:2]]
except KeyError:
continue
val = f[2:]
if opt == 'include_dirs' and psplit(val)[1].strip(os.sep) in ('leptonica', 'tesseract'):
val = dirname(val)
config.setdefault(opt, set()).add(val)
config = {k: list(v) for k, v in config.items()}
p = subprocess.Popen(['pkg-config', '--modversion', 'tesseract'], stdout=subprocess.PIPE)
version, _ = p.communicate()
version = _read_string(version).strip()
_LOGGER.info("Supporting tesseract v{}".format(version))
config['cython_compile_time_env'] = {'TESSERACT_VERSION': version_to_int(version)}
_LOGGER.info("Configs from pkg-config: {}".format(config))
return config
|
[
"def",
"package_config",
"(",
")",
":",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"[",
"'pkg-config'",
",",
"'--exists'",
",",
"'--atleast-version={}'",
".",
"format",
"(",
"_TESSERACT_MIN_VERSION",
")",
",",
"'--print-errors'",
",",
"'tesseract'",
"]",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
")",
"_",
",",
"error",
"=",
"p",
".",
"communicate",
"(",
")",
"if",
"p",
".",
"returncode",
"!=",
"0",
":",
"raise",
"Exception",
"(",
"error",
")",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"[",
"'pkg-config'",
",",
"'--libs'",
",",
"'--cflags'",
",",
"'tesseract'",
"]",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
")",
"output",
",",
"_",
"=",
"p",
".",
"communicate",
"(",
")",
"flags",
"=",
"_read_string",
"(",
"output",
")",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"[",
"'pkg-config'",
",",
"'--libs'",
",",
"'--cflags'",
",",
"'lept'",
"]",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
")",
"output",
",",
"_",
"=",
"p",
".",
"communicate",
"(",
")",
"flags2",
"=",
"_read_string",
"(",
"output",
")",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"options",
"=",
"{",
"'-L'",
":",
"'library_dirs'",
",",
"'-I'",
":",
"'include_dirs'",
",",
"'-l'",
":",
"'libraries'",
"}",
"config",
"=",
"{",
"}",
"import",
"itertools",
"for",
"f",
"in",
"itertools",
".",
"chain",
"(",
"flags",
",",
"flags2",
")",
":",
"try",
":",
"opt",
"=",
"options",
"[",
"f",
"[",
":",
"2",
"]",
"]",
"except",
"KeyError",
":",
"continue",
"val",
"=",
"f",
"[",
"2",
":",
"]",
"if",
"opt",
"==",
"'include_dirs'",
"and",
"psplit",
"(",
"val",
")",
"[",
"1",
"]",
".",
"strip",
"(",
"os",
".",
"sep",
")",
"in",
"(",
"'leptonica'",
",",
"'tesseract'",
")",
":",
"val",
"=",
"dirname",
"(",
"val",
")",
"config",
".",
"setdefault",
"(",
"opt",
",",
"set",
"(",
")",
")",
".",
"add",
"(",
"val",
")",
"config",
"=",
"{",
"k",
":",
"list",
"(",
"v",
")",
"for",
"k",
",",
"v",
"in",
"config",
".",
"items",
"(",
")",
"}",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"[",
"'pkg-config'",
",",
"'--modversion'",
",",
"'tesseract'",
"]",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
")",
"version",
",",
"_",
"=",
"p",
".",
"communicate",
"(",
")",
"version",
"=",
"_read_string",
"(",
"version",
")",
".",
"strip",
"(",
")",
"_LOGGER",
".",
"info",
"(",
"\"Supporting tesseract v{}\"",
".",
"format",
"(",
"version",
")",
")",
"config",
"[",
"'cython_compile_time_env'",
"]",
"=",
"{",
"'TESSERACT_VERSION'",
":",
"version_to_int",
"(",
"version",
")",
"}",
"_LOGGER",
".",
"info",
"(",
"\"Configs from pkg-config: {}\"",
".",
"format",
"(",
"config",
")",
")",
"return",
"config"
] |
Use pkg-config to get library build parameters and tesseract version.
|
[
"Use",
"pkg",
"-",
"config",
"to",
"get",
"library",
"build",
"parameters",
"and",
"tesseract",
"version",
"."
] |
python
|
train
|
ArduPilot/MAVProxy
|
MAVProxy/modules/mavproxy_adsb.py
|
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_adsb.py#L154-L158
|
def get_v_distance(self, latlonalt1, latlonalt2):
'''get the horizontal distance between threat and vehicle'''
(lat1, lon1, alt1) = latlonalt1
(lat2, lon2, alt2) = latlonalt2
return alt2 - alt1
|
[
"def",
"get_v_distance",
"(",
"self",
",",
"latlonalt1",
",",
"latlonalt2",
")",
":",
"(",
"lat1",
",",
"lon1",
",",
"alt1",
")",
"=",
"latlonalt1",
"(",
"lat2",
",",
"lon2",
",",
"alt2",
")",
"=",
"latlonalt2",
"return",
"alt2",
"-",
"alt1"
] |
get the horizontal distance between threat and vehicle
|
[
"get",
"the",
"horizontal",
"distance",
"between",
"threat",
"and",
"vehicle"
] |
python
|
train
|
vinci1it2000/schedula
|
schedula/utils/dsp.py
|
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/dsp.py#L34-L71
|
def combine_dicts(*dicts, copy=False, base=None):
"""
Combines multiple dicts in one.
:param dicts:
A sequence of dicts.
:type dicts: dict
:param copy:
If True, it returns a deepcopy of input values.
:type copy: bool, optional
:param base:
Base dict where combine multiple dicts in one.
:type base: dict, optional
:return:
A unique dict.
:rtype: dict
Example::
>>> sorted(combine_dicts({'a': 3, 'c': 3}, {'a': 1, 'b': 2}).items())
[('a', 1), ('b', 2), ('c', 3)]
"""
if len(dicts) == 1 and base is None: # Only one input dict.
cd = dicts[0].copy()
else:
cd = {} if base is None else base # Initialize empty dict.
for d in dicts: # Combine dicts.
if d:
# noinspection PyTypeChecker
cd.update(d)
# Return combined dict.
return {k: _copy.deepcopy(v) for k, v in cd.items()} if copy else cd
|
[
"def",
"combine_dicts",
"(",
"*",
"dicts",
",",
"copy",
"=",
"False",
",",
"base",
"=",
"None",
")",
":",
"if",
"len",
"(",
"dicts",
")",
"==",
"1",
"and",
"base",
"is",
"None",
":",
"# Only one input dict.",
"cd",
"=",
"dicts",
"[",
"0",
"]",
".",
"copy",
"(",
")",
"else",
":",
"cd",
"=",
"{",
"}",
"if",
"base",
"is",
"None",
"else",
"base",
"# Initialize empty dict.",
"for",
"d",
"in",
"dicts",
":",
"# Combine dicts.",
"if",
"d",
":",
"# noinspection PyTypeChecker",
"cd",
".",
"update",
"(",
"d",
")",
"# Return combined dict.",
"return",
"{",
"k",
":",
"_copy",
".",
"deepcopy",
"(",
"v",
")",
"for",
"k",
",",
"v",
"in",
"cd",
".",
"items",
"(",
")",
"}",
"if",
"copy",
"else",
"cd"
] |
Combines multiple dicts in one.
:param dicts:
A sequence of dicts.
:type dicts: dict
:param copy:
If True, it returns a deepcopy of input values.
:type copy: bool, optional
:param base:
Base dict where combine multiple dicts in one.
:type base: dict, optional
:return:
A unique dict.
:rtype: dict
Example::
>>> sorted(combine_dicts({'a': 3, 'c': 3}, {'a': 1, 'b': 2}).items())
[('a', 1), ('b', 2), ('c', 3)]
|
[
"Combines",
"multiple",
"dicts",
"in",
"one",
"."
] |
python
|
train
|
clalancette/pycdlib
|
pycdlib/rockridge.py
|
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/rockridge.py#L1558-L1592
|
def parse(self, rrstr):
# type: (bytes) -> None
'''
Parse a Rock Ridge Time Stamp record out of a string.
Parameters:
rrstr - The string to parse the record out of.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('TF record already initialized!')
# We assume that the caller has already checked the su_entry_version,
# so we don't bother.
(su_len, su_entry_version_unused, self.time_flags,) = struct.unpack_from('=BBB', rrstr[:5], 2)
if su_len < 5:
raise pycdlibexception.PyCdlibInvalidISO('Not enough bytes in the TF record')
tflen = 7
if self.time_flags & (1 << 7):
tflen = 17
offset = 5
for index, fieldname in enumerate(self.FIELDNAMES):
if self.time_flags & (1 << index):
if tflen == 7:
setattr(self, fieldname, dates.DirectoryRecordDate())
elif tflen == 17:
setattr(self, fieldname, dates.VolumeDescriptorDate())
getattr(self, fieldname).parse(rrstr[offset:offset + tflen])
offset += tflen
self._initialized = True
|
[
"def",
"parse",
"(",
"self",
",",
"rrstr",
")",
":",
"# type: (bytes) -> None",
"if",
"self",
".",
"_initialized",
":",
"raise",
"pycdlibexception",
".",
"PyCdlibInternalError",
"(",
"'TF record already initialized!'",
")",
"# We assume that the caller has already checked the su_entry_version,",
"# so we don't bother.",
"(",
"su_len",
",",
"su_entry_version_unused",
",",
"self",
".",
"time_flags",
",",
")",
"=",
"struct",
".",
"unpack_from",
"(",
"'=BBB'",
",",
"rrstr",
"[",
":",
"5",
"]",
",",
"2",
")",
"if",
"su_len",
"<",
"5",
":",
"raise",
"pycdlibexception",
".",
"PyCdlibInvalidISO",
"(",
"'Not enough bytes in the TF record'",
")",
"tflen",
"=",
"7",
"if",
"self",
".",
"time_flags",
"&",
"(",
"1",
"<<",
"7",
")",
":",
"tflen",
"=",
"17",
"offset",
"=",
"5",
"for",
"index",
",",
"fieldname",
"in",
"enumerate",
"(",
"self",
".",
"FIELDNAMES",
")",
":",
"if",
"self",
".",
"time_flags",
"&",
"(",
"1",
"<<",
"index",
")",
":",
"if",
"tflen",
"==",
"7",
":",
"setattr",
"(",
"self",
",",
"fieldname",
",",
"dates",
".",
"DirectoryRecordDate",
"(",
")",
")",
"elif",
"tflen",
"==",
"17",
":",
"setattr",
"(",
"self",
",",
"fieldname",
",",
"dates",
".",
"VolumeDescriptorDate",
"(",
")",
")",
"getattr",
"(",
"self",
",",
"fieldname",
")",
".",
"parse",
"(",
"rrstr",
"[",
"offset",
":",
"offset",
"+",
"tflen",
"]",
")",
"offset",
"+=",
"tflen",
"self",
".",
"_initialized",
"=",
"True"
] |
Parse a Rock Ridge Time Stamp record out of a string.
Parameters:
rrstr - The string to parse the record out of.
Returns:
Nothing.
|
[
"Parse",
"a",
"Rock",
"Ridge",
"Time",
"Stamp",
"record",
"out",
"of",
"a",
"string",
"."
] |
python
|
train
|
facelessuser/pyspelling
|
pyspelling/filters/context.py
|
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/context.py#L48-L76
|
def setup(self):
"""Setup."""
self.context_visible_first = self.config['context_visible_first']
self.delimiters = []
self.escapes = None
self.line_endings = self.config['normalize_line_endings']
escapes = []
for delimiter in self.config['delimiters']:
if not isinstance(delimiter, dict):
continue
group = util.random_name_gen()
while (
group in delimiter['open'] or
group in delimiter['close'] or
group in delimiter.get('content', DEFAULT_CONTENT)
):
group = util.random_name_gen()
pattern = r'%s(?P<%s>%s)(?:%s|\Z)' % (
delimiter['open'],
group,
delimiter.get('content', DEFAULT_CONTENT),
delimiter['close']
)
self.delimiters.append((re.compile(pattern, re.M), group))
escapes = self.config['escapes']
if escapes:
self.escapes = re.compile(escapes)
|
[
"def",
"setup",
"(",
"self",
")",
":",
"self",
".",
"context_visible_first",
"=",
"self",
".",
"config",
"[",
"'context_visible_first'",
"]",
"self",
".",
"delimiters",
"=",
"[",
"]",
"self",
".",
"escapes",
"=",
"None",
"self",
".",
"line_endings",
"=",
"self",
".",
"config",
"[",
"'normalize_line_endings'",
"]",
"escapes",
"=",
"[",
"]",
"for",
"delimiter",
"in",
"self",
".",
"config",
"[",
"'delimiters'",
"]",
":",
"if",
"not",
"isinstance",
"(",
"delimiter",
",",
"dict",
")",
":",
"continue",
"group",
"=",
"util",
".",
"random_name_gen",
"(",
")",
"while",
"(",
"group",
"in",
"delimiter",
"[",
"'open'",
"]",
"or",
"group",
"in",
"delimiter",
"[",
"'close'",
"]",
"or",
"group",
"in",
"delimiter",
".",
"get",
"(",
"'content'",
",",
"DEFAULT_CONTENT",
")",
")",
":",
"group",
"=",
"util",
".",
"random_name_gen",
"(",
")",
"pattern",
"=",
"r'%s(?P<%s>%s)(?:%s|\\Z)'",
"%",
"(",
"delimiter",
"[",
"'open'",
"]",
",",
"group",
",",
"delimiter",
".",
"get",
"(",
"'content'",
",",
"DEFAULT_CONTENT",
")",
",",
"delimiter",
"[",
"'close'",
"]",
")",
"self",
".",
"delimiters",
".",
"append",
"(",
"(",
"re",
".",
"compile",
"(",
"pattern",
",",
"re",
".",
"M",
")",
",",
"group",
")",
")",
"escapes",
"=",
"self",
".",
"config",
"[",
"'escapes'",
"]",
"if",
"escapes",
":",
"self",
".",
"escapes",
"=",
"re",
".",
"compile",
"(",
"escapes",
")"
] |
Setup.
|
[
"Setup",
"."
] |
python
|
train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.