repo
stringlengths 7
55
| path
stringlengths 4
223
| url
stringlengths 87
315
| code
stringlengths 75
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values | avg_line_len
float64 7.91
980
|
---|---|---|---|---|---|---|---|---|---|
tensorflow/tensor2tensor | tensor2tensor/models/research/attention_lm_moe.py | https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/attention_lm_moe.py#L733-L744 | def attention_lm_moe_memory_efficient():
"""Memory-efficient version."""
hparams = attention_lm_moe_large()
hparams.diet_experts = True
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
hparams.layer_prepostprocess_dropout = 0.0
hparams.memory_efficient_ffn = True
hparams.attention_type = AttentionType.MEMORY_EFFICIENT
hparams.num_heads = 8
hparams.factored_logits = True
return hparams | [
"def",
"attention_lm_moe_memory_efficient",
"(",
")",
":",
"hparams",
"=",
"attention_lm_moe_large",
"(",
")",
"hparams",
".",
"diet_experts",
"=",
"True",
"hparams",
".",
"layer_preprocess_sequence",
"=",
"\"n\"",
"hparams",
".",
"layer_postprocess_sequence",
"=",
"\"da\"",
"hparams",
".",
"layer_prepostprocess_dropout",
"=",
"0.0",
"hparams",
".",
"memory_efficient_ffn",
"=",
"True",
"hparams",
".",
"attention_type",
"=",
"AttentionType",
".",
"MEMORY_EFFICIENT",
"hparams",
".",
"num_heads",
"=",
"8",
"hparams",
".",
"factored_logits",
"=",
"True",
"return",
"hparams"
] | Memory-efficient version. | [
"Memory",
"-",
"efficient",
"version",
"."
] | python | train | 35.916667 |
sphinx-gallery/sphinx-gallery | sphinx_gallery/scrapers.py | https://github.com/sphinx-gallery/sphinx-gallery/blob/b0c1f6701bf3f4cef238757e1105cf3686b5e674/sphinx_gallery/scrapers.py#L251-L282 | def figure_rst(figure_list, sources_dir):
"""Generate RST for a list of PNG filenames.
Depending on whether we have one or more figures, we use a
single rst call to 'image' or a horizontal list.
Parameters
----------
figure_list : list
List of strings of the figures' absolute paths.
sources_dir : str
absolute path of Sphinx documentation sources
Returns
-------
images_rst : str
rst code to embed the images in the document
"""
figure_paths = [os.path.relpath(figure_path, sources_dir)
.replace(os.sep, '/').lstrip('/')
for figure_path in figure_list]
images_rst = ""
if len(figure_paths) == 1:
figure_name = figure_paths[0]
images_rst = SINGLE_IMAGE % figure_name
elif len(figure_paths) > 1:
images_rst = HLIST_HEADER
for figure_name in figure_paths:
images_rst += HLIST_IMAGE_TEMPLATE % figure_name
return images_rst | [
"def",
"figure_rst",
"(",
"figure_list",
",",
"sources_dir",
")",
":",
"figure_paths",
"=",
"[",
"os",
".",
"path",
".",
"relpath",
"(",
"figure_path",
",",
"sources_dir",
")",
".",
"replace",
"(",
"os",
".",
"sep",
",",
"'/'",
")",
".",
"lstrip",
"(",
"'/'",
")",
"for",
"figure_path",
"in",
"figure_list",
"]",
"images_rst",
"=",
"\"\"",
"if",
"len",
"(",
"figure_paths",
")",
"==",
"1",
":",
"figure_name",
"=",
"figure_paths",
"[",
"0",
"]",
"images_rst",
"=",
"SINGLE_IMAGE",
"%",
"figure_name",
"elif",
"len",
"(",
"figure_paths",
")",
">",
"1",
":",
"images_rst",
"=",
"HLIST_HEADER",
"for",
"figure_name",
"in",
"figure_paths",
":",
"images_rst",
"+=",
"HLIST_IMAGE_TEMPLATE",
"%",
"figure_name",
"return",
"images_rst"
] | Generate RST for a list of PNG filenames.
Depending on whether we have one or more figures, we use a
single rst call to 'image' or a horizontal list.
Parameters
----------
figure_list : list
List of strings of the figures' absolute paths.
sources_dir : str
absolute path of Sphinx documentation sources
Returns
-------
images_rst : str
rst code to embed the images in the document | [
"Generate",
"RST",
"for",
"a",
"list",
"of",
"PNG",
"filenames",
"."
] | python | train | 30.1875 |
satellogic/telluric | telluric/georaster.py | https://github.com/satellogic/telluric/blob/e752cd3ee71e339f79717e526fde362e80055d9e/telluric/georaster.py#L332-L362 | def _stack_bands(one, other):
# type: (_Raster, _Raster) -> _Raster
"""Merges two rasters with non overlapping bands by stacking the bands.
"""
assert set(one.band_names).intersection(set(other.band_names)) == set()
# We raise an error in the bands are the same. See above.
if one.band_names == other.band_names:
raise ValueError("rasters have the same bands, use another merge strategy")
# Apply "or" to the mask in the same way rasterio does, see
# https://mapbox.github.io/rasterio/topics/masks.html#dataset-masks
# In other words, mask the values that are already masked in either
# of the two rasters, since one mask per band is not supported
new_mask = np.ma.getmaskarray(one.image)[0] | np.ma.getmaskarray(other.image)[0]
# Concatenate the data along the band axis and apply the mask
new_image = np.ma.masked_array(
np.concatenate([
one.image.data,
other.image.data
]),
mask=[new_mask] * (one.image.shape[0] + other.image.shape[0])
)
new_bands = one.band_names + other.band_names
# We don't copy image and mask here, due to performance issues,
# this output should not use without eventually being copied
# In this context we are copying the object in the end of merge_all merge_first and merge
return _Raster(image=new_image, band_names=new_bands) | [
"def",
"_stack_bands",
"(",
"one",
",",
"other",
")",
":",
"# type: (_Raster, _Raster) -> _Raster",
"assert",
"set",
"(",
"one",
".",
"band_names",
")",
".",
"intersection",
"(",
"set",
"(",
"other",
".",
"band_names",
")",
")",
"==",
"set",
"(",
")",
"# We raise an error in the bands are the same. See above.",
"if",
"one",
".",
"band_names",
"==",
"other",
".",
"band_names",
":",
"raise",
"ValueError",
"(",
"\"rasters have the same bands, use another merge strategy\"",
")",
"# Apply \"or\" to the mask in the same way rasterio does, see",
"# https://mapbox.github.io/rasterio/topics/masks.html#dataset-masks",
"# In other words, mask the values that are already masked in either",
"# of the two rasters, since one mask per band is not supported",
"new_mask",
"=",
"np",
".",
"ma",
".",
"getmaskarray",
"(",
"one",
".",
"image",
")",
"[",
"0",
"]",
"|",
"np",
".",
"ma",
".",
"getmaskarray",
"(",
"other",
".",
"image",
")",
"[",
"0",
"]",
"# Concatenate the data along the band axis and apply the mask",
"new_image",
"=",
"np",
".",
"ma",
".",
"masked_array",
"(",
"np",
".",
"concatenate",
"(",
"[",
"one",
".",
"image",
".",
"data",
",",
"other",
".",
"image",
".",
"data",
"]",
")",
",",
"mask",
"=",
"[",
"new_mask",
"]",
"*",
"(",
"one",
".",
"image",
".",
"shape",
"[",
"0",
"]",
"+",
"other",
".",
"image",
".",
"shape",
"[",
"0",
"]",
")",
")",
"new_bands",
"=",
"one",
".",
"band_names",
"+",
"other",
".",
"band_names",
"# We don't copy image and mask here, due to performance issues,",
"# this output should not use without eventually being copied",
"# In this context we are copying the object in the end of merge_all merge_first and merge",
"return",
"_Raster",
"(",
"image",
"=",
"new_image",
",",
"band_names",
"=",
"new_bands",
")"
] | Merges two rasters with non overlapping bands by stacking the bands. | [
"Merges",
"two",
"rasters",
"with",
"non",
"overlapping",
"bands",
"by",
"stacking",
"the",
"bands",
"."
] | python | train | 43.935484 |
zeth/inputs | inputs.py | https://github.com/zeth/inputs/blob/a46681dbf77d6ab07834f550e5855c1f50701f99/inputs.py#L3587-L3594 | def handle_abs(self):
"""Gets the state as the raw abolute numbers."""
# pylint: disable=no-member
x_raw = self.microbit.accelerometer.get_x()
y_raw = self.microbit.accelerometer.get_y()
x_abs = ('Absolute', 0x00, x_raw)
y_abs = ('Absolute', 0x01, y_raw)
return x_abs, y_abs | [
"def",
"handle_abs",
"(",
"self",
")",
":",
"# pylint: disable=no-member",
"x_raw",
"=",
"self",
".",
"microbit",
".",
"accelerometer",
".",
"get_x",
"(",
")",
"y_raw",
"=",
"self",
".",
"microbit",
".",
"accelerometer",
".",
"get_y",
"(",
")",
"x_abs",
"=",
"(",
"'Absolute'",
",",
"0x00",
",",
"x_raw",
")",
"y_abs",
"=",
"(",
"'Absolute'",
",",
"0x01",
",",
"y_raw",
")",
"return",
"x_abs",
",",
"y_abs"
] | Gets the state as the raw abolute numbers. | [
"Gets",
"the",
"state",
"as",
"the",
"raw",
"abolute",
"numbers",
"."
] | python | train | 40.375 |
brian-rose/climlab | climlab/utils/walk.py | https://github.com/brian-rose/climlab/blob/eae188a2ae9308229b8cbb8fe0b65f51b50ee1e6/climlab/utils/walk.py#L3-L71 | def walk_processes(top, topname='top', topdown=True, ignoreFlag=False):
"""Generator for recursive tree of climlab processes
Starts walking from climlab process ``top`` and generates a complete
list of all processes and sub-processes that are managed from ``top`` process.
``level`` indicades the rank of specific process in the process hierarchy:
.. note::
* level 0: ``top`` process
* level 1: sub-processes of ``top`` process
* level 2: sub-sub-processes of ``top`` process (=subprocesses of level 1 processes)
The method is based on os.walk().
:param top: top process from where walking should start
:type top: :class:`~climlab.process.process.Process`
:param str topname: name of top process [default: 'top']
:param bool topdown: whether geneterate *process_types* in regular or
in reverse order [default: True]
:param bool ignoreFlag: whether ``topdown`` flag should be ignored or not
[default: False]
:returns: name (str), proc (process), level (int)
:Example:
::
>>> import climlab
>>> from climlab.utils import walk
>>> model = climlab.EBM()
>>> for name, proc, top_proc in walk.walk_processes(model):
... print name
...
top
diffusion
LW
iceline
cold_albedo
warm_albedo
albedo
insolation
"""
if not ignoreFlag:
flag = topdown
else:
flag = True
proc = top
level = 0
if flag:
yield topname, proc, level
if len(proc.subprocess) > 0: # there are sub-processes
level += 1
for name, subproc in proc.subprocess.items():
for name2, subproc2, level2 in walk_processes(subproc,
topname=name,
topdown=subproc.topdown,
ignoreFlag=ignoreFlag):
yield name2, subproc2, level+level2
if not flag:
yield topname, proc, level | [
"def",
"walk_processes",
"(",
"top",
",",
"topname",
"=",
"'top'",
",",
"topdown",
"=",
"True",
",",
"ignoreFlag",
"=",
"False",
")",
":",
"if",
"not",
"ignoreFlag",
":",
"flag",
"=",
"topdown",
"else",
":",
"flag",
"=",
"True",
"proc",
"=",
"top",
"level",
"=",
"0",
"if",
"flag",
":",
"yield",
"topname",
",",
"proc",
",",
"level",
"if",
"len",
"(",
"proc",
".",
"subprocess",
")",
">",
"0",
":",
"# there are sub-processes",
"level",
"+=",
"1",
"for",
"name",
",",
"subproc",
"in",
"proc",
".",
"subprocess",
".",
"items",
"(",
")",
":",
"for",
"name2",
",",
"subproc2",
",",
"level2",
"in",
"walk_processes",
"(",
"subproc",
",",
"topname",
"=",
"name",
",",
"topdown",
"=",
"subproc",
".",
"topdown",
",",
"ignoreFlag",
"=",
"ignoreFlag",
")",
":",
"yield",
"name2",
",",
"subproc2",
",",
"level",
"+",
"level2",
"if",
"not",
"flag",
":",
"yield",
"topname",
",",
"proc",
",",
"level"
] | Generator for recursive tree of climlab processes
Starts walking from climlab process ``top`` and generates a complete
list of all processes and sub-processes that are managed from ``top`` process.
``level`` indicades the rank of specific process in the process hierarchy:
.. note::
* level 0: ``top`` process
* level 1: sub-processes of ``top`` process
* level 2: sub-sub-processes of ``top`` process (=subprocesses of level 1 processes)
The method is based on os.walk().
:param top: top process from where walking should start
:type top: :class:`~climlab.process.process.Process`
:param str topname: name of top process [default: 'top']
:param bool topdown: whether geneterate *process_types* in regular or
in reverse order [default: True]
:param bool ignoreFlag: whether ``topdown`` flag should be ignored or not
[default: False]
:returns: name (str), proc (process), level (int)
:Example:
::
>>> import climlab
>>> from climlab.utils import walk
>>> model = climlab.EBM()
>>> for name, proc, top_proc in walk.walk_processes(model):
... print name
...
top
diffusion
LW
iceline
cold_albedo
warm_albedo
albedo
insolation | [
"Generator",
"for",
"recursive",
"tree",
"of",
"climlab",
"processes"
] | python | train | 31.913043 |
zhmcclient/python-zhmcclient | zhmcclient/_console.py | https://github.com/zhmcclient/python-zhmcclient/blob/9657563e5d9184c51d3c903442a58b9725fdf335/zhmcclient/_console.py#L427-L469 | def get_audit_log(self, begin_time=None, end_time=None):
"""
Return the console audit log entries, optionally filtered by their
creation time.
Authorization requirements:
* Task permission to the "Audit and Log Management" task.
Parameters:
begin_time (:class:`~py:datetime.datetime`):
Begin time for filtering. Log entries with a creation time older
than the begin time will be omitted from the results.
If `None`, no such filtering is performed (and the oldest available
log entries will be included).
end_time (:class:`~py:datetime.datetime`):
End time for filtering. Log entries with a creation time newer
than the end time will be omitted from the results.
If `None`, no such filtering is performed (and the newest available
log entries will be included).
Returns:
:term:`json object`:
A JSON object with the log entries, as described in section
'Response body contents' of operation 'Get Console Audit Log' in
the :term:`HMC API` book.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
query_parms = self._time_query_parms(begin_time, end_time)
uri = self.uri + '/operations/get-audit-log' + query_parms
result = self.manager.session.post(uri)
return result | [
"def",
"get_audit_log",
"(",
"self",
",",
"begin_time",
"=",
"None",
",",
"end_time",
"=",
"None",
")",
":",
"query_parms",
"=",
"self",
".",
"_time_query_parms",
"(",
"begin_time",
",",
"end_time",
")",
"uri",
"=",
"self",
".",
"uri",
"+",
"'/operations/get-audit-log'",
"+",
"query_parms",
"result",
"=",
"self",
".",
"manager",
".",
"session",
".",
"post",
"(",
"uri",
")",
"return",
"result"
] | Return the console audit log entries, optionally filtered by their
creation time.
Authorization requirements:
* Task permission to the "Audit and Log Management" task.
Parameters:
begin_time (:class:`~py:datetime.datetime`):
Begin time for filtering. Log entries with a creation time older
than the begin time will be omitted from the results.
If `None`, no such filtering is performed (and the oldest available
log entries will be included).
end_time (:class:`~py:datetime.datetime`):
End time for filtering. Log entries with a creation time newer
than the end time will be omitted from the results.
If `None`, no such filtering is performed (and the newest available
log entries will be included).
Returns:
:term:`json object`:
A JSON object with the log entries, as described in section
'Response body contents' of operation 'Get Console Audit Log' in
the :term:`HMC API` book.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError` | [
"Return",
"the",
"console",
"audit",
"log",
"entries",
"optionally",
"filtered",
"by",
"their",
"creation",
"time",
"."
] | python | train | 35.418605 |
scottwoodall/python-pgextras | pgextras/__init__.py | https://github.com/scottwoodall/python-pgextras/blob/d3aa83081d41b14b7c1f003cd837c812a2b5fff5/pgextras/__init__.py#L422-L443 | def locks(self):
"""
Display queries with active locks.
Record(
procpid=31776,
relname=None,
transactionid=None,
granted=True,
query_snippet='select * from hello;',
age=datetime.timedelta(0, 0, 288174),
)
:returns: list of Records
"""
return self.execute(
sql.LOCKS.format(
pid_column=self.pid_column,
query_column=self.query_column
)
) | [
"def",
"locks",
"(",
"self",
")",
":",
"return",
"self",
".",
"execute",
"(",
"sql",
".",
"LOCKS",
".",
"format",
"(",
"pid_column",
"=",
"self",
".",
"pid_column",
",",
"query_column",
"=",
"self",
".",
"query_column",
")",
")"
] | Display queries with active locks.
Record(
procpid=31776,
relname=None,
transactionid=None,
granted=True,
query_snippet='select * from hello;',
age=datetime.timedelta(0, 0, 288174),
)
:returns: list of Records | [
"Display",
"queries",
"with",
"active",
"locks",
"."
] | python | train | 23.181818 |
saltstack/salt | salt/modules/x509.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/x509.py#L616-L644 | def read_csr(csr):
'''
Returns a dict containing details of a certificate request.
:depends: - OpenSSL command line tool
csr:
A path or PEM encoded string containing the CSR to read.
CLI Example:
.. code-block:: bash
salt '*' x509.read_csr /etc/pki/mycert.csr
'''
csr = _get_request_obj(csr)
ret = {
# X509 Version 3 has a value of 2 in the field.
# Version 2 has a value of 1.
# https://tools.ietf.org/html/rfc5280#section-4.1.2.1
'Version': csr.get_version() + 1,
# Get size returns in bytes. The world thinks of key sizes in bits.
'Subject': _parse_subject(csr.get_subject()),
'Subject Hash': _dec2hex(csr.get_subject().as_hash()),
'Public Key Hash': hashlib.sha1(csr.get_pubkey().get_modulus()).hexdigest(),
'X509v3 Extensions': _get_csr_extensions(csr),
}
return ret | [
"def",
"read_csr",
"(",
"csr",
")",
":",
"csr",
"=",
"_get_request_obj",
"(",
"csr",
")",
"ret",
"=",
"{",
"# X509 Version 3 has a value of 2 in the field.",
"# Version 2 has a value of 1.",
"# https://tools.ietf.org/html/rfc5280#section-4.1.2.1",
"'Version'",
":",
"csr",
".",
"get_version",
"(",
")",
"+",
"1",
",",
"# Get size returns in bytes. The world thinks of key sizes in bits.",
"'Subject'",
":",
"_parse_subject",
"(",
"csr",
".",
"get_subject",
"(",
")",
")",
",",
"'Subject Hash'",
":",
"_dec2hex",
"(",
"csr",
".",
"get_subject",
"(",
")",
".",
"as_hash",
"(",
")",
")",
",",
"'Public Key Hash'",
":",
"hashlib",
".",
"sha1",
"(",
"csr",
".",
"get_pubkey",
"(",
")",
".",
"get_modulus",
"(",
")",
")",
".",
"hexdigest",
"(",
")",
",",
"'X509v3 Extensions'",
":",
"_get_csr_extensions",
"(",
"csr",
")",
",",
"}",
"return",
"ret"
] | Returns a dict containing details of a certificate request.
:depends: - OpenSSL command line tool
csr:
A path or PEM encoded string containing the CSR to read.
CLI Example:
.. code-block:: bash
salt '*' x509.read_csr /etc/pki/mycert.csr | [
"Returns",
"a",
"dict",
"containing",
"details",
"of",
"a",
"certificate",
"request",
"."
] | python | train | 30.448276 |
jlafon/django-rest-framework-oauth | rest_framework_oauth/authentication.py | https://github.com/jlafon/django-rest-framework-oauth/blob/05ff90623fa811f166a000d8d58aa855c07c7435/rest_framework_oauth/authentication.py#L168-L187 | def authenticate_credentials(self, request, access_token):
"""
Authenticate the request, given the access token.
"""
try:
token = oauth2_provider.oauth2.models.AccessToken.objects.select_related('user')
# provider_now switches to timezone aware datetime when
# the oauth2_provider version supports to it.
token = token.get(token=access_token, expires__gt=provider_now())
except oauth2_provider.oauth2.models.AccessToken.DoesNotExist:
raise exceptions.AuthenticationFailed('Invalid token')
user = token.user
if not user.is_active:
msg = 'User inactive or deleted: %s' % user.username
raise exceptions.AuthenticationFailed(msg)
return (user, token) | [
"def",
"authenticate_credentials",
"(",
"self",
",",
"request",
",",
"access_token",
")",
":",
"try",
":",
"token",
"=",
"oauth2_provider",
".",
"oauth2",
".",
"models",
".",
"AccessToken",
".",
"objects",
".",
"select_related",
"(",
"'user'",
")",
"# provider_now switches to timezone aware datetime when",
"# the oauth2_provider version supports to it.",
"token",
"=",
"token",
".",
"get",
"(",
"token",
"=",
"access_token",
",",
"expires__gt",
"=",
"provider_now",
"(",
")",
")",
"except",
"oauth2_provider",
".",
"oauth2",
".",
"models",
".",
"AccessToken",
".",
"DoesNotExist",
":",
"raise",
"exceptions",
".",
"AuthenticationFailed",
"(",
"'Invalid token'",
")",
"user",
"=",
"token",
".",
"user",
"if",
"not",
"user",
".",
"is_active",
":",
"msg",
"=",
"'User inactive or deleted: %s'",
"%",
"user",
".",
"username",
"raise",
"exceptions",
".",
"AuthenticationFailed",
"(",
"msg",
")",
"return",
"(",
"user",
",",
"token",
")"
] | Authenticate the request, given the access token. | [
"Authenticate",
"the",
"request",
"given",
"the",
"access",
"token",
"."
] | python | train | 38.95 |
emory-libraries/eulfedora | eulfedora/syncutil.py | https://github.com/emory-libraries/eulfedora/blob/161826f3fdcdab4007f6fa7dfd9f1ecabc4bcbe4/eulfedora/syncutil.py#L413-L466 | def encoded_datastream(self):
'''Generator for datastream content. Takes a list of sections
of data within the current chunk (split on binaryContent start and
end tags), runs a base64 decode, and yields the data. Computes
datastream size and MD5 as data is decoded for sanity-checking
purposes. If binary content is not completed within the current
chunk, it will retrieve successive chunks of export data until it
finds the end. Sets a flag when partial content is left within
the current chunk for continued processing by :meth:`object_data`.
:param sections: list of export data split on binary content start
and end tags, starting with the first section of binary content
'''
# return a generator of data to be uploaded to fedora
size = 0
if self.verify:
md5 = hashlib.md5()
leftover = None
while self.within_file:
content = self.get_next_section()
if content == BINARY_CONTENT_END:
if self.verify:
logger.info('Decoded content size %s (%s) MD5 %s',
size, humanize_file_size(size), md5.hexdigest())
self.within_file = False
elif self.within_file:
# if there was leftover binary content from the last chunk,
# add it to the content now
if leftover is not None:
content = b''.join([leftover, content])
leftover = None
try:
# decode method used by base64.decode
decoded_content = binascii.a2b_base64(content)
except binascii.Error:
# decoding can fail with a padding error when
# a line of encoded content runs across a read chunk
lines = content.split(b'\n')
# decode and yield all but the last line of encoded content
decoded_content = binascii.a2b_base64(b''.join(lines[:-1]))
# store the leftover to be decoded with the next chunk
leftover = lines[-1]
if decoded_content is not None:
if self.verify:
md5.update(decoded_content)
size += len(decoded_content)
yield decoded_content | [
"def",
"encoded_datastream",
"(",
"self",
")",
":",
"# return a generator of data to be uploaded to fedora",
"size",
"=",
"0",
"if",
"self",
".",
"verify",
":",
"md5",
"=",
"hashlib",
".",
"md5",
"(",
")",
"leftover",
"=",
"None",
"while",
"self",
".",
"within_file",
":",
"content",
"=",
"self",
".",
"get_next_section",
"(",
")",
"if",
"content",
"==",
"BINARY_CONTENT_END",
":",
"if",
"self",
".",
"verify",
":",
"logger",
".",
"info",
"(",
"'Decoded content size %s (%s) MD5 %s'",
",",
"size",
",",
"humanize_file_size",
"(",
"size",
")",
",",
"md5",
".",
"hexdigest",
"(",
")",
")",
"self",
".",
"within_file",
"=",
"False",
"elif",
"self",
".",
"within_file",
":",
"# if there was leftover binary content from the last chunk,",
"# add it to the content now",
"if",
"leftover",
"is",
"not",
"None",
":",
"content",
"=",
"b''",
".",
"join",
"(",
"[",
"leftover",
",",
"content",
"]",
")",
"leftover",
"=",
"None",
"try",
":",
"# decode method used by base64.decode",
"decoded_content",
"=",
"binascii",
".",
"a2b_base64",
"(",
"content",
")",
"except",
"binascii",
".",
"Error",
":",
"# decoding can fail with a padding error when",
"# a line of encoded content runs across a read chunk",
"lines",
"=",
"content",
".",
"split",
"(",
"b'\\n'",
")",
"# decode and yield all but the last line of encoded content",
"decoded_content",
"=",
"binascii",
".",
"a2b_base64",
"(",
"b''",
".",
"join",
"(",
"lines",
"[",
":",
"-",
"1",
"]",
")",
")",
"# store the leftover to be decoded with the next chunk",
"leftover",
"=",
"lines",
"[",
"-",
"1",
"]",
"if",
"decoded_content",
"is",
"not",
"None",
":",
"if",
"self",
".",
"verify",
":",
"md5",
".",
"update",
"(",
"decoded_content",
")",
"size",
"+=",
"len",
"(",
"decoded_content",
")",
"yield",
"decoded_content"
] | Generator for datastream content. Takes a list of sections
of data within the current chunk (split on binaryContent start and
end tags), runs a base64 decode, and yields the data. Computes
datastream size and MD5 as data is decoded for sanity-checking
purposes. If binary content is not completed within the current
chunk, it will retrieve successive chunks of export data until it
finds the end. Sets a flag when partial content is left within
the current chunk for continued processing by :meth:`object_data`.
:param sections: list of export data split on binary content start
and end tags, starting with the first section of binary content | [
"Generator",
"for",
"datastream",
"content",
".",
"Takes",
"a",
"list",
"of",
"sections",
"of",
"data",
"within",
"the",
"current",
"chunk",
"(",
"split",
"on",
"binaryContent",
"start",
"and",
"end",
"tags",
")",
"runs",
"a",
"base64",
"decode",
"and",
"yields",
"the",
"data",
".",
"Computes",
"datastream",
"size",
"and",
"MD5",
"as",
"data",
"is",
"decoded",
"for",
"sanity",
"-",
"checking",
"purposes",
".",
"If",
"binary",
"content",
"is",
"not",
"completed",
"within",
"the",
"current",
"chunk",
"it",
"will",
"retrieve",
"successive",
"chunks",
"of",
"export",
"data",
"until",
"it",
"finds",
"the",
"end",
".",
"Sets",
"a",
"flag",
"when",
"partial",
"content",
"is",
"left",
"within",
"the",
"current",
"chunk",
"for",
"continued",
"processing",
"by",
":",
"meth",
":",
"object_data",
"."
] | python | train | 44.388889 |
IBM/ibm-cos-sdk-python-s3transfer | ibm_s3transfer/aspera/futures.py | https://github.com/IBM/ibm-cos-sdk-python-s3transfer/blob/24ba53137213e26e6b8fc2c3ec1e8198d507d22b/ibm_s3transfer/aspera/futures.py#L584-L592 | def set_transfer_spec(self):
''' run the function to set the transfer spec on error set associated exception '''
_ret = False
try:
self._args.transfer_spec_func(self._args)
_ret = True
except Exception as ex:
self.notify_exception(AsperaTransferSpecError(ex), False)
return _ret | [
"def",
"set_transfer_spec",
"(",
"self",
")",
":",
"_ret",
"=",
"False",
"try",
":",
"self",
".",
"_args",
".",
"transfer_spec_func",
"(",
"self",
".",
"_args",
")",
"_ret",
"=",
"True",
"except",
"Exception",
"as",
"ex",
":",
"self",
".",
"notify_exception",
"(",
"AsperaTransferSpecError",
"(",
"ex",
")",
",",
"False",
")",
"return",
"_ret"
] | run the function to set the transfer spec on error set associated exception | [
"run",
"the",
"function",
"to",
"set",
"the",
"transfer",
"spec",
"on",
"error",
"set",
"associated",
"exception"
] | python | train | 38.444444 |
googleapis/google-cloud-python | api_core/google/api_core/gapic_v1/method.py | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/api_core/google/api_core/gapic_v1/method.py#L50-L86 | def _determine_timeout(default_timeout, specified_timeout, retry):
"""Determines how timeout should be applied to a wrapped method.
Args:
default_timeout (Optional[Timeout]): The default timeout specified
at method creation time.
specified_timeout (Optional[Timeout]): The timeout specified at
invocation time. If :attr:`DEFAULT`, this will be set to
the ``default_timeout``.
retry (Optional[Retry]): The retry specified at invocation time.
Returns:
Optional[Timeout]: The timeout to apply to the method or ``None``.
"""
if specified_timeout is DEFAULT:
specified_timeout = default_timeout
if specified_timeout is default_timeout:
# If timeout is the default and the default timeout is exponential and
# a non-default retry is specified, make sure the timeout's deadline
# matches the retry's. This handles the case where the user leaves
# the timeout default but specifies a lower deadline via the retry.
if (
retry
and retry is not DEFAULT
and isinstance(default_timeout, timeout.ExponentialTimeout)
):
return default_timeout.with_deadline(retry._deadline)
else:
return default_timeout
# If timeout is specified as a number instead of a Timeout instance,
# convert it to a ConstantTimeout.
if isinstance(specified_timeout, (int, float)):
return timeout.ConstantTimeout(specified_timeout)
else:
return specified_timeout | [
"def",
"_determine_timeout",
"(",
"default_timeout",
",",
"specified_timeout",
",",
"retry",
")",
":",
"if",
"specified_timeout",
"is",
"DEFAULT",
":",
"specified_timeout",
"=",
"default_timeout",
"if",
"specified_timeout",
"is",
"default_timeout",
":",
"# If timeout is the default and the default timeout is exponential and",
"# a non-default retry is specified, make sure the timeout's deadline",
"# matches the retry's. This handles the case where the user leaves",
"# the timeout default but specifies a lower deadline via the retry.",
"if",
"(",
"retry",
"and",
"retry",
"is",
"not",
"DEFAULT",
"and",
"isinstance",
"(",
"default_timeout",
",",
"timeout",
".",
"ExponentialTimeout",
")",
")",
":",
"return",
"default_timeout",
".",
"with_deadline",
"(",
"retry",
".",
"_deadline",
")",
"else",
":",
"return",
"default_timeout",
"# If timeout is specified as a number instead of a Timeout instance,",
"# convert it to a ConstantTimeout.",
"if",
"isinstance",
"(",
"specified_timeout",
",",
"(",
"int",
",",
"float",
")",
")",
":",
"return",
"timeout",
".",
"ConstantTimeout",
"(",
"specified_timeout",
")",
"else",
":",
"return",
"specified_timeout"
] | Determines how timeout should be applied to a wrapped method.
Args:
default_timeout (Optional[Timeout]): The default timeout specified
at method creation time.
specified_timeout (Optional[Timeout]): The timeout specified at
invocation time. If :attr:`DEFAULT`, this will be set to
the ``default_timeout``.
retry (Optional[Retry]): The retry specified at invocation time.
Returns:
Optional[Timeout]: The timeout to apply to the method or ``None``. | [
"Determines",
"how",
"timeout",
"should",
"be",
"applied",
"to",
"a",
"wrapped",
"method",
"."
] | python | train | 41.513514 |
FPGAwars/apio | apio/commands/time.py | https://github.com/FPGAwars/apio/blob/5c6310f11a061a760764c6b5847bfb431dc3d0bc/apio/commands/time.py#L37-L54 | def cli(ctx, board, fpga, pack, type, size, project_dir,
verbose, verbose_yosys, verbose_arachne):
"""Bitstream timing analysis."""
# Run scons
exit_code = SCons(project_dir).time({
'board': board,
'fpga': fpga,
'size': size,
'type': type,
'pack': pack,
'verbose': {
'all': verbose,
'yosys': verbose_yosys,
'arachne': verbose_arachne
}
})
ctx.exit(exit_code) | [
"def",
"cli",
"(",
"ctx",
",",
"board",
",",
"fpga",
",",
"pack",
",",
"type",
",",
"size",
",",
"project_dir",
",",
"verbose",
",",
"verbose_yosys",
",",
"verbose_arachne",
")",
":",
"# Run scons",
"exit_code",
"=",
"SCons",
"(",
"project_dir",
")",
".",
"time",
"(",
"{",
"'board'",
":",
"board",
",",
"'fpga'",
":",
"fpga",
",",
"'size'",
":",
"size",
",",
"'type'",
":",
"type",
",",
"'pack'",
":",
"pack",
",",
"'verbose'",
":",
"{",
"'all'",
":",
"verbose",
",",
"'yosys'",
":",
"verbose_yosys",
",",
"'arachne'",
":",
"verbose_arachne",
"}",
"}",
")",
"ctx",
".",
"exit",
"(",
"exit_code",
")"
] | Bitstream timing analysis. | [
"Bitstream",
"timing",
"analysis",
"."
] | python | train | 25.666667 |
dnanhkhoa/logone | logone/logone.py | https://github.com/dnanhkhoa/logone/blob/7345a59e54ae59418a2c35ae7e7af5b2784fa1b5/logone/logone.py#L87-L96 | def set_level(self, level):
"""
Set the logging level of this logger.
:param level: must be an int or a str.
"""
for handler in self.__coloredlogs_handlers:
handler.setLevel(level=level)
self.logger.setLevel(level=level) | [
"def",
"set_level",
"(",
"self",
",",
"level",
")",
":",
"for",
"handler",
"in",
"self",
".",
"__coloredlogs_handlers",
":",
"handler",
".",
"setLevel",
"(",
"level",
"=",
"level",
")",
"self",
".",
"logger",
".",
"setLevel",
"(",
"level",
"=",
"level",
")"
] | Set the logging level of this logger.
:param level: must be an int or a str. | [
"Set",
"the",
"logging",
"level",
"of",
"this",
"logger",
"."
] | python | train | 27.3 |
jrabbit/taskd-client-py | taskc/simple.py | https://github.com/jrabbit/taskd-client-py/blob/473f121eca7fdb358874c9c00827f9a6ecdcda4e/taskc/simple.py#L16-L28 | def _is_path(instance, attribute, s, exists=True):
"Validator for path-yness"
if not s:
# allow False as a default
return
if exists:
if os.path.exists(s):
return
else:
raise OSError("path does not exist")
else:
# how do we tell if it's a path if it doesn't exist?
raise TypeError("Not a path?") | [
"def",
"_is_path",
"(",
"instance",
",",
"attribute",
",",
"s",
",",
"exists",
"=",
"True",
")",
":",
"if",
"not",
"s",
":",
"# allow False as a default",
"return",
"if",
"exists",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"s",
")",
":",
"return",
"else",
":",
"raise",
"OSError",
"(",
"\"path does not exist\"",
")",
"else",
":",
"# how do we tell if it's a path if it doesn't exist?",
"raise",
"TypeError",
"(",
"\"Not a path?\"",
")"
] | Validator for path-yness | [
"Validator",
"for",
"path",
"-",
"yness"
] | python | train | 28.461538 |
sanger-pathogens/ariba | ariba/assembly_compare.py | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/assembly_compare.py#L139-L168 | def nucmer_hits_to_ref_and_qry_coords(cls, nucmer_hits, contig=None):
'''Same as nucmer_hits_to_ref_coords, except removes containing hits first,
and returns ref and qry coords lists'''
if contig is None:
ctg_coords = {key: [] for key in nucmer_hits.keys()}
else:
ctg_coords = {contig: []}
ref_coords = {}
for key in ctg_coords:
hits = copy.copy(nucmer_hits[key])
hits.sort(key=lambda x: len(x.ref_coords()))
if len(hits) > 1:
i = 0
while i < len(hits) - 1:
c1 = hits[i].ref_coords()
c2 = hits[i+1].ref_coords()
if c2.contains(c1):
hits.pop(i)
else:
i += 1
ref_coords[key] = [hit.ref_coords() for hit in hits]
ctg_coords[key] = [hit.qry_coords() for hit in hits]
pyfastaq.intervals.merge_overlapping_in_list(ref_coords[key])
pyfastaq.intervals.merge_overlapping_in_list(ctg_coords[key])
return ctg_coords, ref_coords | [
"def",
"nucmer_hits_to_ref_and_qry_coords",
"(",
"cls",
",",
"nucmer_hits",
",",
"contig",
"=",
"None",
")",
":",
"if",
"contig",
"is",
"None",
":",
"ctg_coords",
"=",
"{",
"key",
":",
"[",
"]",
"for",
"key",
"in",
"nucmer_hits",
".",
"keys",
"(",
")",
"}",
"else",
":",
"ctg_coords",
"=",
"{",
"contig",
":",
"[",
"]",
"}",
"ref_coords",
"=",
"{",
"}",
"for",
"key",
"in",
"ctg_coords",
":",
"hits",
"=",
"copy",
".",
"copy",
"(",
"nucmer_hits",
"[",
"key",
"]",
")",
"hits",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"len",
"(",
"x",
".",
"ref_coords",
"(",
")",
")",
")",
"if",
"len",
"(",
"hits",
")",
">",
"1",
":",
"i",
"=",
"0",
"while",
"i",
"<",
"len",
"(",
"hits",
")",
"-",
"1",
":",
"c1",
"=",
"hits",
"[",
"i",
"]",
".",
"ref_coords",
"(",
")",
"c2",
"=",
"hits",
"[",
"i",
"+",
"1",
"]",
".",
"ref_coords",
"(",
")",
"if",
"c2",
".",
"contains",
"(",
"c1",
")",
":",
"hits",
".",
"pop",
"(",
"i",
")",
"else",
":",
"i",
"+=",
"1",
"ref_coords",
"[",
"key",
"]",
"=",
"[",
"hit",
".",
"ref_coords",
"(",
")",
"for",
"hit",
"in",
"hits",
"]",
"ctg_coords",
"[",
"key",
"]",
"=",
"[",
"hit",
".",
"qry_coords",
"(",
")",
"for",
"hit",
"in",
"hits",
"]",
"pyfastaq",
".",
"intervals",
".",
"merge_overlapping_in_list",
"(",
"ref_coords",
"[",
"key",
"]",
")",
"pyfastaq",
".",
"intervals",
".",
"merge_overlapping_in_list",
"(",
"ctg_coords",
"[",
"key",
"]",
")",
"return",
"ctg_coords",
",",
"ref_coords"
] | Same as nucmer_hits_to_ref_coords, except removes containing hits first,
and returns ref and qry coords lists | [
"Same",
"as",
"nucmer_hits_to_ref_coords",
"except",
"removes",
"containing",
"hits",
"first",
"and",
"returns",
"ref",
"and",
"qry",
"coords",
"lists"
] | python | train | 37.3 |
openstack/networking-cisco | networking_cisco/plugins/cisco/cfg_agent/device_drivers/iosxe/iosxe_routing_driver.py | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/plugins/cisco/cfg_agent/device_drivers/iosxe/iosxe_routing_driver.py#L355-L365 | def _get_capabilities(self):
"""Get the servers NETCONF capabilities.
:return: List of server capabilities.
"""
conn = self._get_connection()
capabilities = []
for c in conn.server_capabilities:
capabilities.append(c)
LOG.debug("Server capabilities: %s", capabilities)
return capabilities | [
"def",
"_get_capabilities",
"(",
"self",
")",
":",
"conn",
"=",
"self",
".",
"_get_connection",
"(",
")",
"capabilities",
"=",
"[",
"]",
"for",
"c",
"in",
"conn",
".",
"server_capabilities",
":",
"capabilities",
".",
"append",
"(",
"c",
")",
"LOG",
".",
"debug",
"(",
"\"Server capabilities: %s\"",
",",
"capabilities",
")",
"return",
"capabilities"
] | Get the servers NETCONF capabilities.
:return: List of server capabilities. | [
"Get",
"the",
"servers",
"NETCONF",
"capabilities",
"."
] | python | train | 32.272727 |
aholkner/bacon | bacon/sound.py | https://github.com/aholkner/bacon/blob/edf3810dcb211942d392a8637945871399b0650d/bacon/sound.py#L169-L179 | def set_loop_points(self, start_sample=-1, end_sample=0):
'''Set the loop points within the sound.
The sound must have been created with ``loop=True``. The default parameters cause the loop points to be set to
the entire sound duration.
:note: There is currently no API for converting sample numbers to times.
:param start_sample: sample number to loop back to
:param end_sample: sample number to loop at
'''
lib.SetVoiceLoopPoints(self._handle, start_sample, end_sample) | [
"def",
"set_loop_points",
"(",
"self",
",",
"start_sample",
"=",
"-",
"1",
",",
"end_sample",
"=",
"0",
")",
":",
"lib",
".",
"SetVoiceLoopPoints",
"(",
"self",
".",
"_handle",
",",
"start_sample",
",",
"end_sample",
")"
] | Set the loop points within the sound.
The sound must have been created with ``loop=True``. The default parameters cause the loop points to be set to
the entire sound duration.
:note: There is currently no API for converting sample numbers to times.
:param start_sample: sample number to loop back to
:param end_sample: sample number to loop at | [
"Set",
"the",
"loop",
"points",
"within",
"the",
"sound",
"."
] | python | test | 48 |
urinieto/msaf | msaf/base.py | https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/base.py#L357-L383 | def _compute_all_features(self):
"""Computes all the features (beatsync, framesync) from the audio."""
# Read actual audio waveform
self._audio, _ = librosa.load(self.file_struct.audio_file,
sr=self.sr)
# Get duration of audio file
self.dur = len(self._audio) / float(self.sr)
# Compute actual features
self._framesync_features = self.compute_features()
# Compute framesync times
self._compute_framesync_times()
# Compute/Read beats
self._est_beats_times, self._est_beats_frames = self.estimate_beats()
self._ann_beats_times, self._ann_beats_frames = self.read_ann_beats()
# Beat-Synchronize
pad = True # Always append to the end of the features
self._est_beatsync_features, self._est_beatsync_times = \
self.compute_beat_sync_features(self._est_beats_frames,
self._est_beats_times, pad)
self._ann_beatsync_features, self._ann_beatsync_times = \
self.compute_beat_sync_features(self._ann_beats_frames,
self._ann_beats_times, pad) | [
"def",
"_compute_all_features",
"(",
"self",
")",
":",
"# Read actual audio waveform",
"self",
".",
"_audio",
",",
"_",
"=",
"librosa",
".",
"load",
"(",
"self",
".",
"file_struct",
".",
"audio_file",
",",
"sr",
"=",
"self",
".",
"sr",
")",
"# Get duration of audio file",
"self",
".",
"dur",
"=",
"len",
"(",
"self",
".",
"_audio",
")",
"/",
"float",
"(",
"self",
".",
"sr",
")",
"# Compute actual features",
"self",
".",
"_framesync_features",
"=",
"self",
".",
"compute_features",
"(",
")",
"# Compute framesync times",
"self",
".",
"_compute_framesync_times",
"(",
")",
"# Compute/Read beats",
"self",
".",
"_est_beats_times",
",",
"self",
".",
"_est_beats_frames",
"=",
"self",
".",
"estimate_beats",
"(",
")",
"self",
".",
"_ann_beats_times",
",",
"self",
".",
"_ann_beats_frames",
"=",
"self",
".",
"read_ann_beats",
"(",
")",
"# Beat-Synchronize",
"pad",
"=",
"True",
"# Always append to the end of the features",
"self",
".",
"_est_beatsync_features",
",",
"self",
".",
"_est_beatsync_times",
"=",
"self",
".",
"compute_beat_sync_features",
"(",
"self",
".",
"_est_beats_frames",
",",
"self",
".",
"_est_beats_times",
",",
"pad",
")",
"self",
".",
"_ann_beatsync_features",
",",
"self",
".",
"_ann_beatsync_times",
"=",
"self",
".",
"compute_beat_sync_features",
"(",
"self",
".",
"_ann_beats_frames",
",",
"self",
".",
"_ann_beats_times",
",",
"pad",
")"
] | Computes all the features (beatsync, framesync) from the audio. | [
"Computes",
"all",
"the",
"features",
"(",
"beatsync",
"framesync",
")",
"from",
"the",
"audio",
"."
] | python | test | 43.962963 |
thunder-project/thunder | thunder/series/series.py | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L628-L649 | def aggregate_by_index(self, function, level=0):
"""
Aggregrate data in each record, grouping by index values.
For each unique value of the index, applies a function to the group
indexed by that value. Returns a Series indexed by those unique values.
For the result to be a valid Series object, the aggregating function should
return a simple numeric type. Also allows selection of levels within a
multi-index. See select_by_index for more info on indices and multi-indices.
Parameters
----------
function : function
Aggregating function to map to Series values. Should take a list or ndarray
as input and return a simple numeric value.
level : list of ints, optional, default=0
Specifies the levels of the multi-index to use when determining unique index values.
If only a single level is desired, can be an int.
"""
result = self._map_by_index(function, level=level)
return result.map(lambda v: array(v), index=result.index) | [
"def",
"aggregate_by_index",
"(",
"self",
",",
"function",
",",
"level",
"=",
"0",
")",
":",
"result",
"=",
"self",
".",
"_map_by_index",
"(",
"function",
",",
"level",
"=",
"level",
")",
"return",
"result",
".",
"map",
"(",
"lambda",
"v",
":",
"array",
"(",
"v",
")",
",",
"index",
"=",
"result",
".",
"index",
")"
] | Aggregrate data in each record, grouping by index values.
For each unique value of the index, applies a function to the group
indexed by that value. Returns a Series indexed by those unique values.
For the result to be a valid Series object, the aggregating function should
return a simple numeric type. Also allows selection of levels within a
multi-index. See select_by_index for more info on indices and multi-indices.
Parameters
----------
function : function
Aggregating function to map to Series values. Should take a list or ndarray
as input and return a simple numeric value.
level : list of ints, optional, default=0
Specifies the levels of the multi-index to use when determining unique index values.
If only a single level is desired, can be an int. | [
"Aggregrate",
"data",
"in",
"each",
"record",
"grouping",
"by",
"index",
"values",
"."
] | python | train | 48.545455 |
ThreatConnect-Inc/tcex | tcex/tcex_ti_indicator.py | https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_ti_indicator.py#L141-L148 | def association(self, group_xid):
"""Add association using xid value.
Args:
group_xid (str): The external id of the Group to associate.
"""
association = {'groupXid': group_xid}
self._indicator_data.setdefault('associatedGroups', []).append(association) | [
"def",
"association",
"(",
"self",
",",
"group_xid",
")",
":",
"association",
"=",
"{",
"'groupXid'",
":",
"group_xid",
"}",
"self",
".",
"_indicator_data",
".",
"setdefault",
"(",
"'associatedGroups'",
",",
"[",
"]",
")",
".",
"append",
"(",
"association",
")"
] | Add association using xid value.
Args:
group_xid (str): The external id of the Group to associate. | [
"Add",
"association",
"using",
"xid",
"value",
"."
] | python | train | 37.375 |
pokerregion/poker | poker/website/twoplustwo.py | https://github.com/pokerregion/poker/blob/2d8cf208fdf2b26bdc935972dcbe7a983a9e9768/poker/website/twoplustwo.py#L125-L129 | def _get_timezone(self, root):
"""Find timezone informatation on bottom of the page."""
tz_str = root.xpath('//div[@class="smallfont" and @align="center"]')[0].text
hours = int(self._tz_re.search(tz_str).group(1))
return tzoffset(tz_str, hours * 60) | [
"def",
"_get_timezone",
"(",
"self",
",",
"root",
")",
":",
"tz_str",
"=",
"root",
".",
"xpath",
"(",
"'//div[@class=\"smallfont\" and @align=\"center\"]'",
")",
"[",
"0",
"]",
".",
"text",
"hours",
"=",
"int",
"(",
"self",
".",
"_tz_re",
".",
"search",
"(",
"tz_str",
")",
".",
"group",
"(",
"1",
")",
")",
"return",
"tzoffset",
"(",
"tz_str",
",",
"hours",
"*",
"60",
")"
] | Find timezone informatation on bottom of the page. | [
"Find",
"timezone",
"informatation",
"on",
"bottom",
"of",
"the",
"page",
"."
] | python | train | 55.4 |
DiamondLightSource/python-workflows | workflows/contrib/status_monitor.py | https://github.com/DiamondLightSource/python-workflows/blob/7ef47b457655b96f4d2ef7ee9863cf1b6d20e023/workflows/contrib/status_monitor.py#L128-L144 | def _erase_card(self, number):
"""Destroy cards with this or higher number."""
with self._lock:
if number < (len(self.cards) - 1):
self._erase_card(number + 1)
if number > (len(self.cards) - 1):
return
max_cards_horiz = int(curses.COLS / 35)
obliterate = curses.newwin(
6,
35,
7 + 6 * (number // max_cards_horiz),
35 * (number % max_cards_horiz),
)
obliterate.erase()
obliterate.noutrefresh()
del self.cards[number] | [
"def",
"_erase_card",
"(",
"self",
",",
"number",
")",
":",
"with",
"self",
".",
"_lock",
":",
"if",
"number",
"<",
"(",
"len",
"(",
"self",
".",
"cards",
")",
"-",
"1",
")",
":",
"self",
".",
"_erase_card",
"(",
"number",
"+",
"1",
")",
"if",
"number",
">",
"(",
"len",
"(",
"self",
".",
"cards",
")",
"-",
"1",
")",
":",
"return",
"max_cards_horiz",
"=",
"int",
"(",
"curses",
".",
"COLS",
"/",
"35",
")",
"obliterate",
"=",
"curses",
".",
"newwin",
"(",
"6",
",",
"35",
",",
"7",
"+",
"6",
"*",
"(",
"number",
"//",
"max_cards_horiz",
")",
",",
"35",
"*",
"(",
"number",
"%",
"max_cards_horiz",
")",
",",
")",
"obliterate",
".",
"erase",
"(",
")",
"obliterate",
".",
"noutrefresh",
"(",
")",
"del",
"self",
".",
"cards",
"[",
"number",
"]"
] | Destroy cards with this or higher number. | [
"Destroy",
"cards",
"with",
"this",
"or",
"higher",
"number",
"."
] | python | train | 35.705882 |
docker/docker-py | docker/api/service.py | https://github.com/docker/docker-py/blob/613d6aad83acc9931ff2ecfd6a6c7bd8061dc125/docker/api/service.py#L224-L239 | def inspect_task(self, task):
"""
Retrieve information about a task.
Args:
task (str): Task ID
Returns:
(dict): Information about the task.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/tasks/{0}', task)
return self._result(self._get(url), True) | [
"def",
"inspect_task",
"(",
"self",
",",
"task",
")",
":",
"url",
"=",
"self",
".",
"_url",
"(",
"'/tasks/{0}'",
",",
"task",
")",
"return",
"self",
".",
"_result",
"(",
"self",
".",
"_get",
"(",
"url",
")",
",",
"True",
")"
] | Retrieve information about a task.
Args:
task (str): Task ID
Returns:
(dict): Information about the task.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | [
"Retrieve",
"information",
"about",
"a",
"task",
"."
] | python | train | 25 |
googleapis/google-cloud-python | runtimeconfig/google/cloud/runtimeconfig/_helpers.py | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/runtimeconfig/google/cloud/runtimeconfig/_helpers.py#L18-L42 | def config_name_from_full_name(full_name):
"""Extract the config name from a full resource name.
>>> config_name_from_full_name('projects/my-proj/configs/my-config')
"my-config"
:type full_name: str
:param full_name:
The full resource name of a config. The full resource name looks like
``projects/project-name/configs/config-name`` and is returned as the
``name`` field of a config resource. See
https://cloud.google.com/deployment-manager/runtime-configurator/reference/rest/v1beta1/projects.configs
:rtype: str
:returns: The config's short name, given its full resource name.
:raises: :class:`ValueError` if ``full_name`` is not the expected format
"""
projects, _, configs, result = full_name.split("/")
if projects != "projects" or configs != "configs":
raise ValueError(
"Unexpected format of resource",
full_name,
'Expected "projects/{proj}/configs/{cfg}"',
)
return result | [
"def",
"config_name_from_full_name",
"(",
"full_name",
")",
":",
"projects",
",",
"_",
",",
"configs",
",",
"result",
"=",
"full_name",
".",
"split",
"(",
"\"/\"",
")",
"if",
"projects",
"!=",
"\"projects\"",
"or",
"configs",
"!=",
"\"configs\"",
":",
"raise",
"ValueError",
"(",
"\"Unexpected format of resource\"",
",",
"full_name",
",",
"'Expected \"projects/{proj}/configs/{cfg}\"'",
",",
")",
"return",
"result"
] | Extract the config name from a full resource name.
>>> config_name_from_full_name('projects/my-proj/configs/my-config')
"my-config"
:type full_name: str
:param full_name:
The full resource name of a config. The full resource name looks like
``projects/project-name/configs/config-name`` and is returned as the
``name`` field of a config resource. See
https://cloud.google.com/deployment-manager/runtime-configurator/reference/rest/v1beta1/projects.configs
:rtype: str
:returns: The config's short name, given its full resource name.
:raises: :class:`ValueError` if ``full_name`` is not the expected format | [
"Extract",
"the",
"config",
"name",
"from",
"a",
"full",
"resource",
"name",
"."
] | python | train | 39.84 |
yougov/pmxbot | pmxbot/commands.py | https://github.com/yougov/pmxbot/blob/5da84a3258a0fd73cb35b60e39769a5d7bfb2ba7/pmxbot/commands.py#L184-L193 | def golfclap(rest):
"Clap for something"
clapv = random.choice(phrases.clapvl)
adv = random.choice(phrases.advl)
adj = random.choice(phrases.adjl)
if rest:
clapee = rest.strip()
karma.Karma.store.change(clapee, 1)
return "/me claps %s for %s, %s %s." % (clapv, rest, adv, adj)
return "/me claps %s, %s %s." % (clapv, adv, adj) | [
"def",
"golfclap",
"(",
"rest",
")",
":",
"clapv",
"=",
"random",
".",
"choice",
"(",
"phrases",
".",
"clapvl",
")",
"adv",
"=",
"random",
".",
"choice",
"(",
"phrases",
".",
"advl",
")",
"adj",
"=",
"random",
".",
"choice",
"(",
"phrases",
".",
"adjl",
")",
"if",
"rest",
":",
"clapee",
"=",
"rest",
".",
"strip",
"(",
")",
"karma",
".",
"Karma",
".",
"store",
".",
"change",
"(",
"clapee",
",",
"1",
")",
"return",
"\"/me claps %s for %s, %s %s.\"",
"%",
"(",
"clapv",
",",
"rest",
",",
"adv",
",",
"adj",
")",
"return",
"\"/me claps %s, %s %s.\"",
"%",
"(",
"clapv",
",",
"adv",
",",
"adj",
")"
] | Clap for something | [
"Clap",
"for",
"something"
] | python | train | 32.9 |
bheinzerling/descriptors | descriptors/Descriptor.py | https://github.com/bheinzerling/descriptors/blob/04fff864649fba9bd6a2d8f8b649cf30994e0e46/descriptors/Descriptor.py#L171-L184 | def exc_thrown_by_descriptor():
"""Return True if the last exception was thrown by a
Descriptor instance.
"""
traceback = sys.exc_info()[2]
tb_locals = traceback.tb_frame.f_locals
# relying on naming convention to get the object that threw
# the exception
if "self" in tb_locals:
if not isinstance(tb_locals["self"], Descriptor):
return False
return True
return False | [
"def",
"exc_thrown_by_descriptor",
"(",
")",
":",
"traceback",
"=",
"sys",
".",
"exc_info",
"(",
")",
"[",
"2",
"]",
"tb_locals",
"=",
"traceback",
".",
"tb_frame",
".",
"f_locals",
"# relying on naming convention to get the object that threw",
"# the exception",
"if",
"\"self\"",
"in",
"tb_locals",
":",
"if",
"not",
"isinstance",
"(",
"tb_locals",
"[",
"\"self\"",
"]",
",",
"Descriptor",
")",
":",
"return",
"False",
"return",
"True",
"return",
"False"
] | Return True if the last exception was thrown by a
Descriptor instance. | [
"Return",
"True",
"if",
"the",
"last",
"exception",
"was",
"thrown",
"by",
"a",
"Descriptor",
"instance",
"."
] | python | test | 33.357143 |
mpg-age-bioinformatics/AGEpy | AGEpy/bed.py | https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/bed.py#L194-L369 | def AnnotateBED(bed, GTF, genome_file, bedcols=None, promoter=[1000,200]):
"""
Annotates a bed file.
:param bed: either a /path/to/file.bed or a Pandas dataframe in bed format. /path/to/file.bed implies bedcols.
:param GTF: /path/to/file.gtf
:param genome_file: /path/to/file.genome - a tab separated values of chr name and size information
:param bedcols: a comma separated string of column headers to use when reading in a bed file. eg: "chr,start,end,name"
:param promoter: a list containing the upstream start of the promoter region from the TSS and the downstream end of the promoter region from the TSS.
:returns: a Pandas dataframe with the annotated bed file. exons and promoters will be reported as well in the annotated_gene_features column.
"""
if type(bed) == type("string"):
bed=pd.read_table(bed,header=None)
bed.columns=bedcols.split(",")
print("Reading GTF file.")
sys.stdout.flush()
GTF=readGTF(GTF)
GTF["gene_name"]=retrieve_GTF_field("gene_name", GTF)
GTF["gene_id"]=retrieve_GTF_field("gene_id", GTF)
GTF["gene_name"]=GTF["gene_name"]+"/"+GTF["gene_id"]
GTF=GTF.drop(["gene_id"],axis=1)
print("Generating promoters annotation.")
sys.stdout.flush()
promoters=GTF[GTF["feature"]=="transcript"]
promoters_plus=promoters[promoters["strand"]=="+"]
promoters_minus=promoters[promoters["strand"]=="-"]
upstream=promoter[0]
downstream=promoter[1]
promoters_plus.loc[:,"promoter_start"]=promoters_plus.loc[:,"start"].astype(int)-upstream
promoters_plus.loc[:,"promoter_end"]=promoters_plus.loc[:,"start"].astype(int)+downstream
promoters_minus.loc[:,"promoter_start"]=promoters_minus["end"].astype(int)-downstream
promoters_minus.loc[:,"promoter_end"]=promoters_minus["end"].astype(int)+upstream
promoters=pd.concat([promoters_plus,promoters_minus])
promoters=promoters[["seqname","feature","promoter_start","promoter_end","gene_name"]]
promoters.columns=["seqname","feature","start","end","gene_name"]
promoters.loc[:,"feature"]="promoter"
promoters.drop_duplicates(inplace=True)
promoters.reset_index(inplace=True, drop=True)
chr_sizes=pd.read_table(genome_file,header=None)
chr_sizes.columns=["seqname","size"]
chr_sizes.loc[:,"seqname"]=chr_sizes["seqname"].astype(str)
promoters.loc[:,"seqname"]=promoters["seqname"].astype(str)
promoters=pd.merge(promoters,chr_sizes,how="left",on=["seqname"])
def CorrectStart(df):
s=df["start"]
if s < 0:
s=0
return s
def CorrectEnd(df):
s=df["end"]
e=df["size"]
if s > e:
s=e
return s
promoters.loc[:,"start"]=promoters.apply(CorrectStart,axis=1)
promoters.loc[:,"end"]=promoters.apply(CorrectEnd,axis=1)
promoters.drop(["size"],axis=1, inplace=True)
GTFs=GTF[["seqname","feature","start","end","gene_name"]]
GTFs=GTFs[ GTFs["feature"]!= "gene"]
GTFs.drop_duplicates(inplace=True)
GTFs.reset_index(inplace=True, drop=True)
GTFs=pd.concat([GTFs,promoters])
def NewName(df):
name=df["gene_name"]
feature=df["feature"]
if feature == "transcript":
res=name
else:
res=name+":"+feature
return res
GTFs.loc[:,"gene_name"]=GTFs.apply(NewName, axis=1)
GTFs=GTFs[["seqname","start","end","gene_name"]]
print( "Intersecting annotation tables and bed." )
sys.stdout.flush()
refGTF=dfTObedtool(GTFs)
pos=dfTObedtool(bed)
colsGTF=GTFs.columns.tolist()
newCols=bed.columns.tolist()
for f in colsGTF:
newCols.append(f+"_")
newCols_=[ s for s in newCols if s not in ["seqname_","start_", "end_"]]
pos=pos.intersect(refGTF, loj=True)
pos=pd.read_table(pos.fn , names=newCols)
pos=pos[newCols_]
print("Merging features.")
sys.stdout.flush()
def GetFeature(x):
if ":" in x:
res=x.split(":")[1]
else:
res=np.nan
return res
def GetName(x):
if ":" in x:
res=x.split(":")[0]
elif type(x) == type("string"):
if x != ".":
res=x
else:
res=np.nan
else:
res=np.nan
return res
pos["gene_feature_"]=pos["gene_name_"].apply( lambda x: GetFeature(x) )
pos["gene_name_"]=pos["gene_name_"].apply( lambda x: GetName(x) )
refcol=pos.columns.tolist()
refcol=[ s for s in refcol if s != "gene_feature_" ]
def CombineAnn(df):
def JOIN(x):
return ', '.join([ str(s) for s in list(set(df[x])) if str(s) != "nan" ] )
return pd.Series(dict( gene_feature_ = JOIN("gene_feature_") ) )
pos_=pos.groupby(refcol).apply(CombineAnn)
pos_.reset_index(inplace=True, drop=False)
def MergeNameFeatures(df):
name=df["gene_name_"]
feature=df["gene_feature_"]
if (type(name) == type("string")) & (name != ".") :
if type(feature) == type("string"):
if len(feature) > 0:
res=name+": "+feature
else:
res=name
else:
res=name
else:
res=np.nan
return res
pos_["annotated_gene_features"]=pos_.apply(MergeNameFeatures,axis=1)
pos_=pos_.drop(["gene_name_","gene_feature_"],axis=1)
def CombineAnn(df):
def JOIN(x):
return '; '.join([ str(s) for s in list(set(df[x])) if str(s) != "nan" ] )
return pd.Series(dict( annotated_gene_features = JOIN("annotated_gene_features") ) )
refcol=[ s for s in refcol if s != "gene_name_" ]
pos_=pos_.groupby(refcol).apply(CombineAnn)
pos_.reset_index(inplace=True, drop=False)
return pos_ | [
"def",
"AnnotateBED",
"(",
"bed",
",",
"GTF",
",",
"genome_file",
",",
"bedcols",
"=",
"None",
",",
"promoter",
"=",
"[",
"1000",
",",
"200",
"]",
")",
":",
"if",
"type",
"(",
"bed",
")",
"==",
"type",
"(",
"\"string\"",
")",
":",
"bed",
"=",
"pd",
".",
"read_table",
"(",
"bed",
",",
"header",
"=",
"None",
")",
"bed",
".",
"columns",
"=",
"bedcols",
".",
"split",
"(",
"\",\"",
")",
"print",
"(",
"\"Reading GTF file.\"",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"GTF",
"=",
"readGTF",
"(",
"GTF",
")",
"GTF",
"[",
"\"gene_name\"",
"]",
"=",
"retrieve_GTF_field",
"(",
"\"gene_name\"",
",",
"GTF",
")",
"GTF",
"[",
"\"gene_id\"",
"]",
"=",
"retrieve_GTF_field",
"(",
"\"gene_id\"",
",",
"GTF",
")",
"GTF",
"[",
"\"gene_name\"",
"]",
"=",
"GTF",
"[",
"\"gene_name\"",
"]",
"+",
"\"/\"",
"+",
"GTF",
"[",
"\"gene_id\"",
"]",
"GTF",
"=",
"GTF",
".",
"drop",
"(",
"[",
"\"gene_id\"",
"]",
",",
"axis",
"=",
"1",
")",
"print",
"(",
"\"Generating promoters annotation.\"",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"promoters",
"=",
"GTF",
"[",
"GTF",
"[",
"\"feature\"",
"]",
"==",
"\"transcript\"",
"]",
"promoters_plus",
"=",
"promoters",
"[",
"promoters",
"[",
"\"strand\"",
"]",
"==",
"\"+\"",
"]",
"promoters_minus",
"=",
"promoters",
"[",
"promoters",
"[",
"\"strand\"",
"]",
"==",
"\"-\"",
"]",
"upstream",
"=",
"promoter",
"[",
"0",
"]",
"downstream",
"=",
"promoter",
"[",
"1",
"]",
"promoters_plus",
".",
"loc",
"[",
":",
",",
"\"promoter_start\"",
"]",
"=",
"promoters_plus",
".",
"loc",
"[",
":",
",",
"\"start\"",
"]",
".",
"astype",
"(",
"int",
")",
"-",
"upstream",
"promoters_plus",
".",
"loc",
"[",
":",
",",
"\"promoter_end\"",
"]",
"=",
"promoters_plus",
".",
"loc",
"[",
":",
",",
"\"start\"",
"]",
".",
"astype",
"(",
"int",
")",
"+",
"downstream",
"promoters_minus",
".",
"loc",
"[",
":",
",",
"\"promoter_start\"",
"]",
"=",
"promoters_minus",
"[",
"\"end\"",
"]",
".",
"astype",
"(",
"int",
")",
"-",
"downstream",
"promoters_minus",
".",
"loc",
"[",
":",
",",
"\"promoter_end\"",
"]",
"=",
"promoters_minus",
"[",
"\"end\"",
"]",
".",
"astype",
"(",
"int",
")",
"+",
"upstream",
"promoters",
"=",
"pd",
".",
"concat",
"(",
"[",
"promoters_plus",
",",
"promoters_minus",
"]",
")",
"promoters",
"=",
"promoters",
"[",
"[",
"\"seqname\"",
",",
"\"feature\"",
",",
"\"promoter_start\"",
",",
"\"promoter_end\"",
",",
"\"gene_name\"",
"]",
"]",
"promoters",
".",
"columns",
"=",
"[",
"\"seqname\"",
",",
"\"feature\"",
",",
"\"start\"",
",",
"\"end\"",
",",
"\"gene_name\"",
"]",
"promoters",
".",
"loc",
"[",
":",
",",
"\"feature\"",
"]",
"=",
"\"promoter\"",
"promoters",
".",
"drop_duplicates",
"(",
"inplace",
"=",
"True",
")",
"promoters",
".",
"reset_index",
"(",
"inplace",
"=",
"True",
",",
"drop",
"=",
"True",
")",
"chr_sizes",
"=",
"pd",
".",
"read_table",
"(",
"genome_file",
",",
"header",
"=",
"None",
")",
"chr_sizes",
".",
"columns",
"=",
"[",
"\"seqname\"",
",",
"\"size\"",
"]",
"chr_sizes",
".",
"loc",
"[",
":",
",",
"\"seqname\"",
"]",
"=",
"chr_sizes",
"[",
"\"seqname\"",
"]",
".",
"astype",
"(",
"str",
")",
"promoters",
".",
"loc",
"[",
":",
",",
"\"seqname\"",
"]",
"=",
"promoters",
"[",
"\"seqname\"",
"]",
".",
"astype",
"(",
"str",
")",
"promoters",
"=",
"pd",
".",
"merge",
"(",
"promoters",
",",
"chr_sizes",
",",
"how",
"=",
"\"left\"",
",",
"on",
"=",
"[",
"\"seqname\"",
"]",
")",
"def",
"CorrectStart",
"(",
"df",
")",
":",
"s",
"=",
"df",
"[",
"\"start\"",
"]",
"if",
"s",
"<",
"0",
":",
"s",
"=",
"0",
"return",
"s",
"def",
"CorrectEnd",
"(",
"df",
")",
":",
"s",
"=",
"df",
"[",
"\"end\"",
"]",
"e",
"=",
"df",
"[",
"\"size\"",
"]",
"if",
"s",
">",
"e",
":",
"s",
"=",
"e",
"return",
"s",
"promoters",
".",
"loc",
"[",
":",
",",
"\"start\"",
"]",
"=",
"promoters",
".",
"apply",
"(",
"CorrectStart",
",",
"axis",
"=",
"1",
")",
"promoters",
".",
"loc",
"[",
":",
",",
"\"end\"",
"]",
"=",
"promoters",
".",
"apply",
"(",
"CorrectEnd",
",",
"axis",
"=",
"1",
")",
"promoters",
".",
"drop",
"(",
"[",
"\"size\"",
"]",
",",
"axis",
"=",
"1",
",",
"inplace",
"=",
"True",
")",
"GTFs",
"=",
"GTF",
"[",
"[",
"\"seqname\"",
",",
"\"feature\"",
",",
"\"start\"",
",",
"\"end\"",
",",
"\"gene_name\"",
"]",
"]",
"GTFs",
"=",
"GTFs",
"[",
"GTFs",
"[",
"\"feature\"",
"]",
"!=",
"\"gene\"",
"]",
"GTFs",
".",
"drop_duplicates",
"(",
"inplace",
"=",
"True",
")",
"GTFs",
".",
"reset_index",
"(",
"inplace",
"=",
"True",
",",
"drop",
"=",
"True",
")",
"GTFs",
"=",
"pd",
".",
"concat",
"(",
"[",
"GTFs",
",",
"promoters",
"]",
")",
"def",
"NewName",
"(",
"df",
")",
":",
"name",
"=",
"df",
"[",
"\"gene_name\"",
"]",
"feature",
"=",
"df",
"[",
"\"feature\"",
"]",
"if",
"feature",
"==",
"\"transcript\"",
":",
"res",
"=",
"name",
"else",
":",
"res",
"=",
"name",
"+",
"\":\"",
"+",
"feature",
"return",
"res",
"GTFs",
".",
"loc",
"[",
":",
",",
"\"gene_name\"",
"]",
"=",
"GTFs",
".",
"apply",
"(",
"NewName",
",",
"axis",
"=",
"1",
")",
"GTFs",
"=",
"GTFs",
"[",
"[",
"\"seqname\"",
",",
"\"start\"",
",",
"\"end\"",
",",
"\"gene_name\"",
"]",
"]",
"print",
"(",
"\"Intersecting annotation tables and bed.\"",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"refGTF",
"=",
"dfTObedtool",
"(",
"GTFs",
")",
"pos",
"=",
"dfTObedtool",
"(",
"bed",
")",
"colsGTF",
"=",
"GTFs",
".",
"columns",
".",
"tolist",
"(",
")",
"newCols",
"=",
"bed",
".",
"columns",
".",
"tolist",
"(",
")",
"for",
"f",
"in",
"colsGTF",
":",
"newCols",
".",
"append",
"(",
"f",
"+",
"\"_\"",
")",
"newCols_",
"=",
"[",
"s",
"for",
"s",
"in",
"newCols",
"if",
"s",
"not",
"in",
"[",
"\"seqname_\"",
",",
"\"start_\"",
",",
"\"end_\"",
"]",
"]",
"pos",
"=",
"pos",
".",
"intersect",
"(",
"refGTF",
",",
"loj",
"=",
"True",
")",
"pos",
"=",
"pd",
".",
"read_table",
"(",
"pos",
".",
"fn",
",",
"names",
"=",
"newCols",
")",
"pos",
"=",
"pos",
"[",
"newCols_",
"]",
"print",
"(",
"\"Merging features.\"",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"def",
"GetFeature",
"(",
"x",
")",
":",
"if",
"\":\"",
"in",
"x",
":",
"res",
"=",
"x",
".",
"split",
"(",
"\":\"",
")",
"[",
"1",
"]",
"else",
":",
"res",
"=",
"np",
".",
"nan",
"return",
"res",
"def",
"GetName",
"(",
"x",
")",
":",
"if",
"\":\"",
"in",
"x",
":",
"res",
"=",
"x",
".",
"split",
"(",
"\":\"",
")",
"[",
"0",
"]",
"elif",
"type",
"(",
"x",
")",
"==",
"type",
"(",
"\"string\"",
")",
":",
"if",
"x",
"!=",
"\".\"",
":",
"res",
"=",
"x",
"else",
":",
"res",
"=",
"np",
".",
"nan",
"else",
":",
"res",
"=",
"np",
".",
"nan",
"return",
"res",
"pos",
"[",
"\"gene_feature_\"",
"]",
"=",
"pos",
"[",
"\"gene_name_\"",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"GetFeature",
"(",
"x",
")",
")",
"pos",
"[",
"\"gene_name_\"",
"]",
"=",
"pos",
"[",
"\"gene_name_\"",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"GetName",
"(",
"x",
")",
")",
"refcol",
"=",
"pos",
".",
"columns",
".",
"tolist",
"(",
")",
"refcol",
"=",
"[",
"s",
"for",
"s",
"in",
"refcol",
"if",
"s",
"!=",
"\"gene_feature_\"",
"]",
"def",
"CombineAnn",
"(",
"df",
")",
":",
"def",
"JOIN",
"(",
"x",
")",
":",
"return",
"', '",
".",
"join",
"(",
"[",
"str",
"(",
"s",
")",
"for",
"s",
"in",
"list",
"(",
"set",
"(",
"df",
"[",
"x",
"]",
")",
")",
"if",
"str",
"(",
"s",
")",
"!=",
"\"nan\"",
"]",
")",
"return",
"pd",
".",
"Series",
"(",
"dict",
"(",
"gene_feature_",
"=",
"JOIN",
"(",
"\"gene_feature_\"",
")",
")",
")",
"pos_",
"=",
"pos",
".",
"groupby",
"(",
"refcol",
")",
".",
"apply",
"(",
"CombineAnn",
")",
"pos_",
".",
"reset_index",
"(",
"inplace",
"=",
"True",
",",
"drop",
"=",
"False",
")",
"def",
"MergeNameFeatures",
"(",
"df",
")",
":",
"name",
"=",
"df",
"[",
"\"gene_name_\"",
"]",
"feature",
"=",
"df",
"[",
"\"gene_feature_\"",
"]",
"if",
"(",
"type",
"(",
"name",
")",
"==",
"type",
"(",
"\"string\"",
")",
")",
"&",
"(",
"name",
"!=",
"\".\"",
")",
":",
"if",
"type",
"(",
"feature",
")",
"==",
"type",
"(",
"\"string\"",
")",
":",
"if",
"len",
"(",
"feature",
")",
">",
"0",
":",
"res",
"=",
"name",
"+",
"\": \"",
"+",
"feature",
"else",
":",
"res",
"=",
"name",
"else",
":",
"res",
"=",
"name",
"else",
":",
"res",
"=",
"np",
".",
"nan",
"return",
"res",
"pos_",
"[",
"\"annotated_gene_features\"",
"]",
"=",
"pos_",
".",
"apply",
"(",
"MergeNameFeatures",
",",
"axis",
"=",
"1",
")",
"pos_",
"=",
"pos_",
".",
"drop",
"(",
"[",
"\"gene_name_\"",
",",
"\"gene_feature_\"",
"]",
",",
"axis",
"=",
"1",
")",
"def",
"CombineAnn",
"(",
"df",
")",
":",
"def",
"JOIN",
"(",
"x",
")",
":",
"return",
"'; '",
".",
"join",
"(",
"[",
"str",
"(",
"s",
")",
"for",
"s",
"in",
"list",
"(",
"set",
"(",
"df",
"[",
"x",
"]",
")",
")",
"if",
"str",
"(",
"s",
")",
"!=",
"\"nan\"",
"]",
")",
"return",
"pd",
".",
"Series",
"(",
"dict",
"(",
"annotated_gene_features",
"=",
"JOIN",
"(",
"\"annotated_gene_features\"",
")",
")",
")",
"refcol",
"=",
"[",
"s",
"for",
"s",
"in",
"refcol",
"if",
"s",
"!=",
"\"gene_name_\"",
"]",
"pos_",
"=",
"pos_",
".",
"groupby",
"(",
"refcol",
")",
".",
"apply",
"(",
"CombineAnn",
")",
"pos_",
".",
"reset_index",
"(",
"inplace",
"=",
"True",
",",
"drop",
"=",
"False",
")",
"return",
"pos_"
] | Annotates a bed file.
:param bed: either a /path/to/file.bed or a Pandas dataframe in bed format. /path/to/file.bed implies bedcols.
:param GTF: /path/to/file.gtf
:param genome_file: /path/to/file.genome - a tab separated values of chr name and size information
:param bedcols: a comma separated string of column headers to use when reading in a bed file. eg: "chr,start,end,name"
:param promoter: a list containing the upstream start of the promoter region from the TSS and the downstream end of the promoter region from the TSS.
:returns: a Pandas dataframe with the annotated bed file. exons and promoters will be reported as well in the annotated_gene_features column. | [
"Annotates",
"a",
"bed",
"file",
"."
] | python | train | 32.107955 |
RonenNess/Fileter | fileter/iterators/grep.py | https://github.com/RonenNess/Fileter/blob/5372221b4049d5d46a9926573b91af17681c81f3/fileter/iterators/grep.py#L39-L55 | def process_file(self, path, dryrun):
"""
Print files path.
"""
# if dryrun just return files
if dryrun:
return path
# scan file and match lines
ret = []
with open(path, "r") as infile:
for line in infile:
if re.search(self.__exp, line):
ret.append(line)
# if found matches return list of lines, else return None
return ret if len(ret) > 0 else None | [
"def",
"process_file",
"(",
"self",
",",
"path",
",",
"dryrun",
")",
":",
"# if dryrun just return files",
"if",
"dryrun",
":",
"return",
"path",
"# scan file and match lines",
"ret",
"=",
"[",
"]",
"with",
"open",
"(",
"path",
",",
"\"r\"",
")",
"as",
"infile",
":",
"for",
"line",
"in",
"infile",
":",
"if",
"re",
".",
"search",
"(",
"self",
".",
"__exp",
",",
"line",
")",
":",
"ret",
".",
"append",
"(",
"line",
")",
"# if found matches return list of lines, else return None",
"return",
"ret",
"if",
"len",
"(",
"ret",
")",
">",
"0",
"else",
"None"
] | Print files path. | [
"Print",
"files",
"path",
"."
] | python | train | 27.941176 |
cloud9ers/gurumate | environment/lib/python2.7/site-packages/IPython/lib/kernel.py | https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/lib/kernel.py#L207-L253 | def tunnel_to_kernel(connection_info, sshserver, sshkey=None):
"""tunnel connections to a kernel via ssh
This will open four SSH tunnels from localhost on this machine to the
ports associated with the kernel. They can be either direct
localhost-localhost tunnels, or if an intermediate server is necessary,
the kernel must be listening on a public IP.
Parameters
----------
connection_info : dict or str (path)
Either a connection dict, or the path to a JSON connection file
sshserver : str
The ssh sever to use to tunnel to the kernel. Can be a full
`user@server:port` string. ssh config aliases are respected.
sshkey : str [optional]
Path to file containing ssh key to use for authentication.
Only necessary if your ssh config does not already associate
a keyfile with the host.
Returns
-------
(shell, iopub, stdin, hb) : ints
The four ports on localhost that have been forwarded to the kernel.
"""
if isinstance(connection_info, basestring):
# it's a path, unpack it
with open(connection_info) as f:
connection_info = json.loads(f.read())
cf = connection_info
lports = tunnel.select_random_ports(4)
rports = cf['shell_port'], cf['iopub_port'], cf['stdin_port'], cf['hb_port']
remote_ip = cf['ip']
if tunnel.try_passwordless_ssh(sshserver, sshkey):
password=False
else:
password = getpass("SSH Password for %s: "%sshserver)
for lp,rp in zip(lports, rports):
tunnel.ssh_tunnel(lp, rp, sshserver, remote_ip, sshkey, password)
return tuple(lports) | [
"def",
"tunnel_to_kernel",
"(",
"connection_info",
",",
"sshserver",
",",
"sshkey",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"connection_info",
",",
"basestring",
")",
":",
"# it's a path, unpack it",
"with",
"open",
"(",
"connection_info",
")",
"as",
"f",
":",
"connection_info",
"=",
"json",
".",
"loads",
"(",
"f",
".",
"read",
"(",
")",
")",
"cf",
"=",
"connection_info",
"lports",
"=",
"tunnel",
".",
"select_random_ports",
"(",
"4",
")",
"rports",
"=",
"cf",
"[",
"'shell_port'",
"]",
",",
"cf",
"[",
"'iopub_port'",
"]",
",",
"cf",
"[",
"'stdin_port'",
"]",
",",
"cf",
"[",
"'hb_port'",
"]",
"remote_ip",
"=",
"cf",
"[",
"'ip'",
"]",
"if",
"tunnel",
".",
"try_passwordless_ssh",
"(",
"sshserver",
",",
"sshkey",
")",
":",
"password",
"=",
"False",
"else",
":",
"password",
"=",
"getpass",
"(",
"\"SSH Password for %s: \"",
"%",
"sshserver",
")",
"for",
"lp",
",",
"rp",
"in",
"zip",
"(",
"lports",
",",
"rports",
")",
":",
"tunnel",
".",
"ssh_tunnel",
"(",
"lp",
",",
"rp",
",",
"sshserver",
",",
"remote_ip",
",",
"sshkey",
",",
"password",
")",
"return",
"tuple",
"(",
"lports",
")"
] | tunnel connections to a kernel via ssh
This will open four SSH tunnels from localhost on this machine to the
ports associated with the kernel. They can be either direct
localhost-localhost tunnels, or if an intermediate server is necessary,
the kernel must be listening on a public IP.
Parameters
----------
connection_info : dict or str (path)
Either a connection dict, or the path to a JSON connection file
sshserver : str
The ssh sever to use to tunnel to the kernel. Can be a full
`user@server:port` string. ssh config aliases are respected.
sshkey : str [optional]
Path to file containing ssh key to use for authentication.
Only necessary if your ssh config does not already associate
a keyfile with the host.
Returns
-------
(shell, iopub, stdin, hb) : ints
The four ports on localhost that have been forwarded to the kernel. | [
"tunnel",
"connections",
"to",
"a",
"kernel",
"via",
"ssh",
"This",
"will",
"open",
"four",
"SSH",
"tunnels",
"from",
"localhost",
"on",
"this",
"machine",
"to",
"the",
"ports",
"associated",
"with",
"the",
"kernel",
".",
"They",
"can",
"be",
"either",
"direct",
"localhost",
"-",
"localhost",
"tunnels",
"or",
"if",
"an",
"intermediate",
"server",
"is",
"necessary",
"the",
"kernel",
"must",
"be",
"listening",
"on",
"a",
"public",
"IP",
".",
"Parameters",
"----------",
"connection_info",
":",
"dict",
"or",
"str",
"(",
"path",
")",
"Either",
"a",
"connection",
"dict",
"or",
"the",
"path",
"to",
"a",
"JSON",
"connection",
"file",
"sshserver",
":",
"str",
"The",
"ssh",
"sever",
"to",
"use",
"to",
"tunnel",
"to",
"the",
"kernel",
".",
"Can",
"be",
"a",
"full",
"user"
] | python | test | 35.148936 |
pawel-kow/domainconnect_python | domainconnect/domainconnect.py | https://github.com/pawel-kow/domainconnect_python/blob/2467093cc4e997234e0fb5c55e71f76b856c1ab1/domainconnect/domainconnect.py#L218-L239 | def get_domain_config(self, domain):
"""Makes a discovery of domain name and resolves configuration of DNS provider
:param domain: str
domain name
:return: DomainConnectConfig
domain connect config
:raises: NoDomainConnectRecordException
when no _domainconnect record found
:raises: NoDomainConnectSettingsException
when settings are not found
"""
domain_root = self.identify_domain_root(domain)
host = ''
if len(domain_root) != len(domain):
host = domain.replace('.' + domain_root, '')
domain_connect_api = self._identify_domain_connect_api(domain_root)
ret = self._get_domain_config_for_root(domain_root, domain_connect_api)
return DomainConnectConfig(domain, domain_root, host, ret) | [
"def",
"get_domain_config",
"(",
"self",
",",
"domain",
")",
":",
"domain_root",
"=",
"self",
".",
"identify_domain_root",
"(",
"domain",
")",
"host",
"=",
"''",
"if",
"len",
"(",
"domain_root",
")",
"!=",
"len",
"(",
"domain",
")",
":",
"host",
"=",
"domain",
".",
"replace",
"(",
"'.'",
"+",
"domain_root",
",",
"''",
")",
"domain_connect_api",
"=",
"self",
".",
"_identify_domain_connect_api",
"(",
"domain_root",
")",
"ret",
"=",
"self",
".",
"_get_domain_config_for_root",
"(",
"domain_root",
",",
"domain_connect_api",
")",
"return",
"DomainConnectConfig",
"(",
"domain",
",",
"domain_root",
",",
"host",
",",
"ret",
")"
] | Makes a discovery of domain name and resolves configuration of DNS provider
:param domain: str
domain name
:return: DomainConnectConfig
domain connect config
:raises: NoDomainConnectRecordException
when no _domainconnect record found
:raises: NoDomainConnectSettingsException
when settings are not found | [
"Makes",
"a",
"discovery",
"of",
"domain",
"name",
"and",
"resolves",
"configuration",
"of",
"DNS",
"provider"
] | python | train | 37.454545 |
aio-libs/aiohttp | examples/legacy/tcp_protocol_parser.py | https://github.com/aio-libs/aiohttp/blob/9504fe2affaaff673fa4f3754c1c44221f8ba47d/examples/legacy/tcp_protocol_parser.py#L23-L46 | def my_protocol_parser(out, buf):
"""Parser is used with StreamParser for incremental protocol parsing.
Parser is a generator function, but it is not a coroutine. Usually
parsers are implemented as a state machine.
more details in asyncio/parsers.py
existing parsers:
* HTTP protocol parsers asyncio/http/protocol.py
* websocket parser asyncio/http/websocket.py
"""
while True:
tp = yield from buf.read(5)
if tp in (MSG_PING, MSG_PONG):
# skip line
yield from buf.skipuntil(b'\r\n')
out.feed_data(Message(tp, None))
elif tp == MSG_STOP:
out.feed_data(Message(tp, None))
elif tp == MSG_TEXT:
# read text
text = yield from buf.readuntil(b'\r\n')
out.feed_data(Message(tp, text.strip().decode('utf-8')))
else:
raise ValueError('Unknown protocol prefix.') | [
"def",
"my_protocol_parser",
"(",
"out",
",",
"buf",
")",
":",
"while",
"True",
":",
"tp",
"=",
"yield",
"from",
"buf",
".",
"read",
"(",
"5",
")",
"if",
"tp",
"in",
"(",
"MSG_PING",
",",
"MSG_PONG",
")",
":",
"# skip line",
"yield",
"from",
"buf",
".",
"skipuntil",
"(",
"b'\\r\\n'",
")",
"out",
".",
"feed_data",
"(",
"Message",
"(",
"tp",
",",
"None",
")",
")",
"elif",
"tp",
"==",
"MSG_STOP",
":",
"out",
".",
"feed_data",
"(",
"Message",
"(",
"tp",
",",
"None",
")",
")",
"elif",
"tp",
"==",
"MSG_TEXT",
":",
"# read text",
"text",
"=",
"yield",
"from",
"buf",
".",
"readuntil",
"(",
"b'\\r\\n'",
")",
"out",
".",
"feed_data",
"(",
"Message",
"(",
"tp",
",",
"text",
".",
"strip",
"(",
")",
".",
"decode",
"(",
"'utf-8'",
")",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Unknown protocol prefix.'",
")"
] | Parser is used with StreamParser for incremental protocol parsing.
Parser is a generator function, but it is not a coroutine. Usually
parsers are implemented as a state machine.
more details in asyncio/parsers.py
existing parsers:
* HTTP protocol parsers asyncio/http/protocol.py
* websocket parser asyncio/http/websocket.py | [
"Parser",
"is",
"used",
"with",
"StreamParser",
"for",
"incremental",
"protocol",
"parsing",
".",
"Parser",
"is",
"a",
"generator",
"function",
"but",
"it",
"is",
"not",
"a",
"coroutine",
".",
"Usually",
"parsers",
"are",
"implemented",
"as",
"a",
"state",
"machine",
"."
] | python | train | 37.708333 |
Erotemic/ubelt | ubelt/util_hash.py | https://github.com/Erotemic/ubelt/blob/db802f3ad8abba025db74b54f86e6892b8927325/ubelt/util_hash.py#L709-L769 | def hash_data(data, hasher=NoParam, base=NoParam, types=False,
hashlen=NoParam, convert=False):
"""
Get a unique hash depending on the state of the data.
Args:
data (object):
Any sort of loosely organized data
hasher (str or HASHER):
Hash algorithm from hashlib, defaults to `sha512`.
base (str or List[str]):
Shorthand key or a list of symbols. Valid keys are: 'abc', 'hex',
and 'dec'. Defaults to 'hex'.
types (bool):
If True data types are included in the hash, otherwise only the raw
data is hashed. Defaults to False.
hashlen (int):
Maximum number of symbols in the returned hash. If not specified,
all are returned. DEPRECATED. Use slice syntax instead.
convert (bool, optional, default=True):
if True, try and convert the data to json an the json is hashed
instead. This can improve runtime in some instances, however the
hash may differ from the case where convert=False.
Notes:
alphabet26 is a pretty nice base, I recommend it.
However we default to hex because it is standard.
This means the output of hashdata with base=sha1 will be the same as
the output of `sha1sum`.
Returns:
str: text - hash string
Example:
>>> import ubelt as ub
>>> print(ub.hash_data([1, 2, (3, '4')], convert=False))
60b758587f599663931057e6ebdf185a...
>>> print(ub.hash_data([1, 2, (3, '4')], base='abc', hasher='sha512')[:32])
hsrgqvfiuxvvhcdnypivhhthmrolkzej
"""
if convert and isinstance(data, six.string_types): # nocover
try:
data = json.dumps(data)
except TypeError as ex:
# import warnings
# warnings.warn('Unable to encode input as json due to: {!r}'.format(ex))
pass
base = _rectify_base(base)
hashlen = _rectify_hashlen(hashlen)
hasher = _rectify_hasher(hasher)()
# Feed the data into the hasher
_update_hasher(hasher, data, types=types)
# Get the hashed representation
text = _digest_hasher(hasher, hashlen, base)
return text | [
"def",
"hash_data",
"(",
"data",
",",
"hasher",
"=",
"NoParam",
",",
"base",
"=",
"NoParam",
",",
"types",
"=",
"False",
",",
"hashlen",
"=",
"NoParam",
",",
"convert",
"=",
"False",
")",
":",
"if",
"convert",
"and",
"isinstance",
"(",
"data",
",",
"six",
".",
"string_types",
")",
":",
"# nocover",
"try",
":",
"data",
"=",
"json",
".",
"dumps",
"(",
"data",
")",
"except",
"TypeError",
"as",
"ex",
":",
"# import warnings",
"# warnings.warn('Unable to encode input as json due to: {!r}'.format(ex))",
"pass",
"base",
"=",
"_rectify_base",
"(",
"base",
")",
"hashlen",
"=",
"_rectify_hashlen",
"(",
"hashlen",
")",
"hasher",
"=",
"_rectify_hasher",
"(",
"hasher",
")",
"(",
")",
"# Feed the data into the hasher",
"_update_hasher",
"(",
"hasher",
",",
"data",
",",
"types",
"=",
"types",
")",
"# Get the hashed representation",
"text",
"=",
"_digest_hasher",
"(",
"hasher",
",",
"hashlen",
",",
"base",
")",
"return",
"text"
] | Get a unique hash depending on the state of the data.
Args:
data (object):
Any sort of loosely organized data
hasher (str or HASHER):
Hash algorithm from hashlib, defaults to `sha512`.
base (str or List[str]):
Shorthand key or a list of symbols. Valid keys are: 'abc', 'hex',
and 'dec'. Defaults to 'hex'.
types (bool):
If True data types are included in the hash, otherwise only the raw
data is hashed. Defaults to False.
hashlen (int):
Maximum number of symbols in the returned hash. If not specified,
all are returned. DEPRECATED. Use slice syntax instead.
convert (bool, optional, default=True):
if True, try and convert the data to json an the json is hashed
instead. This can improve runtime in some instances, however the
hash may differ from the case where convert=False.
Notes:
alphabet26 is a pretty nice base, I recommend it.
However we default to hex because it is standard.
This means the output of hashdata with base=sha1 will be the same as
the output of `sha1sum`.
Returns:
str: text - hash string
Example:
>>> import ubelt as ub
>>> print(ub.hash_data([1, 2, (3, '4')], convert=False))
60b758587f599663931057e6ebdf185a...
>>> print(ub.hash_data([1, 2, (3, '4')], base='abc', hasher='sha512')[:32])
hsrgqvfiuxvvhcdnypivhhthmrolkzej | [
"Get",
"a",
"unique",
"hash",
"depending",
"on",
"the",
"state",
"of",
"the",
"data",
"."
] | python | valid | 35.737705 |
flatangle/flatlib | flatlib/dignities/accidental.py | https://github.com/flatangle/flatlib/blob/44e05b2991a296c678adbc17a1d51b6a21bc867c/flatlib/dignities/accidental.py#L74-L122 | def haiz(obj, chart):
""" Returns if an object is in Haiz. """
objGender = obj.gender()
objFaction = obj.faction()
if obj.id == const.MERCURY:
# Gender and faction of mercury depends on orientality
sun = chart.getObject(const.SUN)
orientalityM = orientality(obj, sun)
if orientalityM == ORIENTAL:
objGender = const.MASCULINE
objFaction = const.DIURNAL
else:
objGender = const.FEMININE
objFaction = const.NOCTURNAL
# Object gender match sign gender?
signGender = props.sign.gender[obj.sign]
genderConformity = (objGender == signGender)
# Match faction
factionConformity = False
diurnalChart = chart.isDiurnal()
if obj.id == const.SUN and not diurnalChart:
# Sun is in conformity only when above horizon
factionConformity = False
else:
# Get list of houses in the chart's diurnal faction
if diurnalChart:
diurnalFaction = props.house.aboveHorizon
nocturnalFaction = props.house.belowHorizon
else:
diurnalFaction = props.house.belowHorizon
nocturnalFaction = props.house.aboveHorizon
# Get the object's house and match factions
objHouse = chart.houses.getObjectHouse(obj)
if (objFaction == const.DIURNAL and objHouse.id in diurnalFaction or
objFaction == const.NOCTURNAL and objHouse.id in nocturnalFaction):
factionConformity = True
# Match things
if (genderConformity and factionConformity):
return HAIZ
elif (not genderConformity and not factionConformity):
return CHAIZ
else:
return None | [
"def",
"haiz",
"(",
"obj",
",",
"chart",
")",
":",
"objGender",
"=",
"obj",
".",
"gender",
"(",
")",
"objFaction",
"=",
"obj",
".",
"faction",
"(",
")",
"if",
"obj",
".",
"id",
"==",
"const",
".",
"MERCURY",
":",
"# Gender and faction of mercury depends on orientality",
"sun",
"=",
"chart",
".",
"getObject",
"(",
"const",
".",
"SUN",
")",
"orientalityM",
"=",
"orientality",
"(",
"obj",
",",
"sun",
")",
"if",
"orientalityM",
"==",
"ORIENTAL",
":",
"objGender",
"=",
"const",
".",
"MASCULINE",
"objFaction",
"=",
"const",
".",
"DIURNAL",
"else",
":",
"objGender",
"=",
"const",
".",
"FEMININE",
"objFaction",
"=",
"const",
".",
"NOCTURNAL",
"# Object gender match sign gender?",
"signGender",
"=",
"props",
".",
"sign",
".",
"gender",
"[",
"obj",
".",
"sign",
"]",
"genderConformity",
"=",
"(",
"objGender",
"==",
"signGender",
")",
"# Match faction",
"factionConformity",
"=",
"False",
"diurnalChart",
"=",
"chart",
".",
"isDiurnal",
"(",
")",
"if",
"obj",
".",
"id",
"==",
"const",
".",
"SUN",
"and",
"not",
"diurnalChart",
":",
"# Sun is in conformity only when above horizon",
"factionConformity",
"=",
"False",
"else",
":",
"# Get list of houses in the chart's diurnal faction",
"if",
"diurnalChart",
":",
"diurnalFaction",
"=",
"props",
".",
"house",
".",
"aboveHorizon",
"nocturnalFaction",
"=",
"props",
".",
"house",
".",
"belowHorizon",
"else",
":",
"diurnalFaction",
"=",
"props",
".",
"house",
".",
"belowHorizon",
"nocturnalFaction",
"=",
"props",
".",
"house",
".",
"aboveHorizon",
"# Get the object's house and match factions",
"objHouse",
"=",
"chart",
".",
"houses",
".",
"getObjectHouse",
"(",
"obj",
")",
"if",
"(",
"objFaction",
"==",
"const",
".",
"DIURNAL",
"and",
"objHouse",
".",
"id",
"in",
"diurnalFaction",
"or",
"objFaction",
"==",
"const",
".",
"NOCTURNAL",
"and",
"objHouse",
".",
"id",
"in",
"nocturnalFaction",
")",
":",
"factionConformity",
"=",
"True",
"# Match things",
"if",
"(",
"genderConformity",
"and",
"factionConformity",
")",
":",
"return",
"HAIZ",
"elif",
"(",
"not",
"genderConformity",
"and",
"not",
"factionConformity",
")",
":",
"return",
"CHAIZ",
"else",
":",
"return",
"None"
] | Returns if an object is in Haiz. | [
"Returns",
"if",
"an",
"object",
"is",
"in",
"Haiz",
"."
] | python | train | 34.734694 |
amcat/amcatclient | demo_wikinews_scraper.py | https://github.com/amcat/amcatclient/blob/bda525f7ace0c26a09fa56d2baf7550f639e62ee/demo_wikinews_scraper.py#L118-L133 | def scrape_wikinews(conn, project, articleset, query):
"""
Scrape wikinews articles from the given query
@param conn: The AmcatAPI object
@param articleset: The target articleset ID
@param category: The wikinews category name
"""
url = "http://en.wikinews.org/w/index.php?search={}&limit=50".format(query)
logging.info(url)
for page in get_pages(url):
urls = get_article_urls(page)
arts = list(get_articles(urls))
logging.info("Adding {} articles to set {}:{}"
.format(len(arts), project, articleset))
conn.create_articles(project=project, articleset=articleset,
json_data=arts) | [
"def",
"scrape_wikinews",
"(",
"conn",
",",
"project",
",",
"articleset",
",",
"query",
")",
":",
"url",
"=",
"\"http://en.wikinews.org/w/index.php?search={}&limit=50\"",
".",
"format",
"(",
"query",
")",
"logging",
".",
"info",
"(",
"url",
")",
"for",
"page",
"in",
"get_pages",
"(",
"url",
")",
":",
"urls",
"=",
"get_article_urls",
"(",
"page",
")",
"arts",
"=",
"list",
"(",
"get_articles",
"(",
"urls",
")",
")",
"logging",
".",
"info",
"(",
"\"Adding {} articles to set {}:{}\"",
".",
"format",
"(",
"len",
"(",
"arts",
")",
",",
"project",
",",
"articleset",
")",
")",
"conn",
".",
"create_articles",
"(",
"project",
"=",
"project",
",",
"articleset",
"=",
"articleset",
",",
"json_data",
"=",
"arts",
")"
] | Scrape wikinews articles from the given query
@param conn: The AmcatAPI object
@param articleset: The target articleset ID
@param category: The wikinews category name | [
"Scrape",
"wikinews",
"articles",
"from",
"the",
"given",
"query"
] | python | train | 42.5 |
saltstack/salt | salt/returners/carbon_return.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/carbon_return.py#L174-L181 | def _send_textmetrics(metrics):
'''
Format metrics for the carbon plaintext protocol
'''
data = [' '.join(map(six.text_type, metric)) for metric in metrics] + ['']
return '\n'.join(data) | [
"def",
"_send_textmetrics",
"(",
"metrics",
")",
":",
"data",
"=",
"[",
"' '",
".",
"join",
"(",
"map",
"(",
"six",
".",
"text_type",
",",
"metric",
")",
")",
"for",
"metric",
"in",
"metrics",
"]",
"+",
"[",
"''",
"]",
"return",
"'\\n'",
".",
"join",
"(",
"data",
")"
] | Format metrics for the carbon plaintext protocol | [
"Format",
"metrics",
"for",
"the",
"carbon",
"plaintext",
"protocol"
] | python | train | 25.125 |
pybel/pybel | src/pybel/manager/lookup_manager.py | https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/manager/lookup_manager.py#L47-L50 | def get_citation_by_reference(self, type: str, reference: str) -> Optional[Citation]:
"""Get a citation object by its type and reference."""
citation_hash = hash_citation(type=type, reference=reference)
return self.get_citation_by_hash(citation_hash) | [
"def",
"get_citation_by_reference",
"(",
"self",
",",
"type",
":",
"str",
",",
"reference",
":",
"str",
")",
"->",
"Optional",
"[",
"Citation",
"]",
":",
"citation_hash",
"=",
"hash_citation",
"(",
"type",
"=",
"type",
",",
"reference",
"=",
"reference",
")",
"return",
"self",
".",
"get_citation_by_hash",
"(",
"citation_hash",
")"
] | Get a citation object by its type and reference. | [
"Get",
"a",
"citation",
"object",
"by",
"its",
"type",
"and",
"reference",
"."
] | python | train | 67.75 |
tanghaibao/jcvi | jcvi/assembly/kmer.py | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/kmer.py#L279-L300 | def entropy(args):
"""
%prog entropy kmc_dump.out
kmc_dump.out contains two columns:
AAAAAAAAAAAGAAGAAAGAAA 34
"""
p = OptionParser(entropy.__doc__)
p.add_option("--threshold", default=0, type="int",
help="Complexity needs to be above")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
kmc_out, = args
fp = open(kmc_out)
for row in fp:
kmer, count = row.split()
score = entropy_score(kmer)
if score >= opts.threshold:
print(" ".join((kmer, count, "{:.2f}".format(score)))) | [
"def",
"entropy",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"entropy",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--threshold\"",
",",
"default",
"=",
"0",
",",
"type",
"=",
"\"int\"",
",",
"help",
"=",
"\"Complexity needs to be above\"",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"1",
":",
"sys",
".",
"exit",
"(",
"not",
"p",
".",
"print_help",
"(",
")",
")",
"kmc_out",
",",
"=",
"args",
"fp",
"=",
"open",
"(",
"kmc_out",
")",
"for",
"row",
"in",
"fp",
":",
"kmer",
",",
"count",
"=",
"row",
".",
"split",
"(",
")",
"score",
"=",
"entropy_score",
"(",
"kmer",
")",
"if",
"score",
">=",
"opts",
".",
"threshold",
":",
"print",
"(",
"\" \"",
".",
"join",
"(",
"(",
"kmer",
",",
"count",
",",
"\"{:.2f}\"",
".",
"format",
"(",
"score",
")",
")",
")",
")"
] | %prog entropy kmc_dump.out
kmc_dump.out contains two columns:
AAAAAAAAAAAGAAGAAAGAAA 34 | [
"%prog",
"entropy",
"kmc_dump",
".",
"out"
] | python | train | 27 |
openstack/horizon | horizon/tabs/views.py | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/horizon/tabs/views.py#L81-L94 | def load_tabs(self):
"""Loads the tab group.
It compiles the table instances for each table attached to
any :class:`horizon.tabs.TableTab` instances on the tab group.
This step is necessary before processing any tab or table actions.
"""
tab_group = self.get_tabs(self.request, **self.kwargs)
tabs = tab_group.get_tabs()
for tab in [t for t in tabs if issubclass(t.__class__, TableTab)]:
self.table_classes.extend(tab.table_classes)
for table in tab._tables.values():
self._table_dict[table._meta.name] = {'table': table,
'tab': tab} | [
"def",
"load_tabs",
"(",
"self",
")",
":",
"tab_group",
"=",
"self",
".",
"get_tabs",
"(",
"self",
".",
"request",
",",
"*",
"*",
"self",
".",
"kwargs",
")",
"tabs",
"=",
"tab_group",
".",
"get_tabs",
"(",
")",
"for",
"tab",
"in",
"[",
"t",
"for",
"t",
"in",
"tabs",
"if",
"issubclass",
"(",
"t",
".",
"__class__",
",",
"TableTab",
")",
"]",
":",
"self",
".",
"table_classes",
".",
"extend",
"(",
"tab",
".",
"table_classes",
")",
"for",
"table",
"in",
"tab",
".",
"_tables",
".",
"values",
"(",
")",
":",
"self",
".",
"_table_dict",
"[",
"table",
".",
"_meta",
".",
"name",
"]",
"=",
"{",
"'table'",
":",
"table",
",",
"'tab'",
":",
"tab",
"}"
] | Loads the tab group.
It compiles the table instances for each table attached to
any :class:`horizon.tabs.TableTab` instances on the tab group.
This step is necessary before processing any tab or table actions. | [
"Loads",
"the",
"tab",
"group",
"."
] | python | train | 48.5 |
zsimic/runez | src/runez/system.py | https://github.com/zsimic/runez/blob/14363b719a1aae1528859a501a22d075ce0abfcc/src/runez/system.py#L21-L66 | def abort(*args, **kwargs):
"""
Usage:
return abort("...") => will sys.exit() by default
return abort("...", fatal=True) => Will sys.exit()
# Not fatal, but will log/print message:
return abort("...", fatal=False) => Will return False
return abort("...", fatal=(False, None)) => Will return None
return abort("...", fatal=(False, -1)) => Will return -1
# Not fatal, will not log/print any message:
return abort("...", fatal=None) => Will return None
return abort("...", fatal=(None, None)) => Will return None
return abort("...", fatal=(None, -1)) => Will return -1
:param args: Args passed through for error reporting
:param kwargs: Args passed through for error reporting
:return: kwargs["return_value"] (default: -1) to signify failure to non-fatal callers
"""
code = kwargs.pop("code", 1)
logger = kwargs.pop("logger", LOG.error if code else LOG.info)
fatal = kwargs.pop("fatal", True)
return_value = fatal
if isinstance(fatal, tuple) and len(fatal) == 2:
fatal, return_value = fatal
if logger and fatal is not None and args:
if logging.root.handlers:
logger(*args, **kwargs)
else:
sys.stderr.write("%s\n" % formatted_string(*args))
if fatal:
if isinstance(fatal, type) and issubclass(fatal, BaseException):
raise fatal(code)
if AbortException is not None:
if isinstance(AbortException, type) and issubclass(AbortException, BaseException):
raise AbortException(code)
return AbortException(code)
return return_value | [
"def",
"abort",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"code",
"=",
"kwargs",
".",
"pop",
"(",
"\"code\"",
",",
"1",
")",
"logger",
"=",
"kwargs",
".",
"pop",
"(",
"\"logger\"",
",",
"LOG",
".",
"error",
"if",
"code",
"else",
"LOG",
".",
"info",
")",
"fatal",
"=",
"kwargs",
".",
"pop",
"(",
"\"fatal\"",
",",
"True",
")",
"return_value",
"=",
"fatal",
"if",
"isinstance",
"(",
"fatal",
",",
"tuple",
")",
"and",
"len",
"(",
"fatal",
")",
"==",
"2",
":",
"fatal",
",",
"return_value",
"=",
"fatal",
"if",
"logger",
"and",
"fatal",
"is",
"not",
"None",
"and",
"args",
":",
"if",
"logging",
".",
"root",
".",
"handlers",
":",
"logger",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"%s\\n\"",
"%",
"formatted_string",
"(",
"*",
"args",
")",
")",
"if",
"fatal",
":",
"if",
"isinstance",
"(",
"fatal",
",",
"type",
")",
"and",
"issubclass",
"(",
"fatal",
",",
"BaseException",
")",
":",
"raise",
"fatal",
"(",
"code",
")",
"if",
"AbortException",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"AbortException",
",",
"type",
")",
"and",
"issubclass",
"(",
"AbortException",
",",
"BaseException",
")",
":",
"raise",
"AbortException",
"(",
"code",
")",
"return",
"AbortException",
"(",
"code",
")",
"return",
"return_value"
] | Usage:
return abort("...") => will sys.exit() by default
return abort("...", fatal=True) => Will sys.exit()
# Not fatal, but will log/print message:
return abort("...", fatal=False) => Will return False
return abort("...", fatal=(False, None)) => Will return None
return abort("...", fatal=(False, -1)) => Will return -1
# Not fatal, will not log/print any message:
return abort("...", fatal=None) => Will return None
return abort("...", fatal=(None, None)) => Will return None
return abort("...", fatal=(None, -1)) => Will return -1
:param args: Args passed through for error reporting
:param kwargs: Args passed through for error reporting
:return: kwargs["return_value"] (default: -1) to signify failure to non-fatal callers | [
"Usage",
":",
"return",
"abort",
"(",
"...",
")",
"=",
">",
"will",
"sys",
".",
"exit",
"()",
"by",
"default",
"return",
"abort",
"(",
"...",
"fatal",
"=",
"True",
")",
"=",
">",
"Will",
"sys",
".",
"exit",
"()"
] | python | train | 35.543478 |
gwastro/pycbc | pycbc/waveform/bank.py | https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/waveform/bank.py#L42-L89 | def sigma_cached(self, psd):
""" Cache sigma calculate for use in tandem with the FilterBank class
"""
if not hasattr(self, '_sigmasq'):
from pycbc.opt import LimitedSizeDict
self._sigmasq = LimitedSizeDict(size_limit=2**5)
key = id(psd)
if not hasattr(psd, '_sigma_cached_key'):
psd._sigma_cached_key = {}
if key not in self._sigmasq or id(self) not in psd._sigma_cached_key:
psd._sigma_cached_key[id(self)] = True
# If possible, we precalculate the sigmasq vector for all possible waveforms
if pycbc.waveform.waveform_norm_exists(self.approximant):
if not hasattr(psd, 'sigmasq_vec'):
psd.sigmasq_vec = {}
if self.approximant not in psd.sigmasq_vec:
psd.sigmasq_vec[self.approximant] = pycbc.waveform.get_waveform_filter_norm(
self.approximant, psd, len(psd), psd.delta_f, self.f_lower)
if not hasattr(self, 'sigma_scale'):
# Get an amplitude normalization (mass dependant constant norm)
amp_norm = pycbc.waveform.get_template_amplitude_norm(
self.params, approximant=self.approximant)
amp_norm = 1 if amp_norm is None else amp_norm
self.sigma_scale = (DYN_RANGE_FAC * amp_norm) ** 2.0
self._sigmasq[key] = self.sigma_scale * \
psd.sigmasq_vec[self.approximant][self.end_idx-1]
else:
if not hasattr(self, 'sigma_view'):
from pycbc.filter.matchedfilter import get_cutoff_indices
N = (len(self) -1) * 2
kmin, kmax = get_cutoff_indices(
self.min_f_lower or self.f_lower, self.end_frequency,
self.delta_f, N)
self.sslice = slice(kmin, kmax)
self.sigma_view = self[self.sslice].squared_norm() * 4.0 * self.delta_f
if not hasattr(psd, 'invsqrt'):
psd.invsqrt = 1.0 / psd[self.sslice]
self._sigmasq[key] = self.sigma_view.inner(psd.invsqrt)
return self._sigmasq[key] | [
"def",
"sigma_cached",
"(",
"self",
",",
"psd",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'_sigmasq'",
")",
":",
"from",
"pycbc",
".",
"opt",
"import",
"LimitedSizeDict",
"self",
".",
"_sigmasq",
"=",
"LimitedSizeDict",
"(",
"size_limit",
"=",
"2",
"**",
"5",
")",
"key",
"=",
"id",
"(",
"psd",
")",
"if",
"not",
"hasattr",
"(",
"psd",
",",
"'_sigma_cached_key'",
")",
":",
"psd",
".",
"_sigma_cached_key",
"=",
"{",
"}",
"if",
"key",
"not",
"in",
"self",
".",
"_sigmasq",
"or",
"id",
"(",
"self",
")",
"not",
"in",
"psd",
".",
"_sigma_cached_key",
":",
"psd",
".",
"_sigma_cached_key",
"[",
"id",
"(",
"self",
")",
"]",
"=",
"True",
"# If possible, we precalculate the sigmasq vector for all possible waveforms",
"if",
"pycbc",
".",
"waveform",
".",
"waveform_norm_exists",
"(",
"self",
".",
"approximant",
")",
":",
"if",
"not",
"hasattr",
"(",
"psd",
",",
"'sigmasq_vec'",
")",
":",
"psd",
".",
"sigmasq_vec",
"=",
"{",
"}",
"if",
"self",
".",
"approximant",
"not",
"in",
"psd",
".",
"sigmasq_vec",
":",
"psd",
".",
"sigmasq_vec",
"[",
"self",
".",
"approximant",
"]",
"=",
"pycbc",
".",
"waveform",
".",
"get_waveform_filter_norm",
"(",
"self",
".",
"approximant",
",",
"psd",
",",
"len",
"(",
"psd",
")",
",",
"psd",
".",
"delta_f",
",",
"self",
".",
"f_lower",
")",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'sigma_scale'",
")",
":",
"# Get an amplitude normalization (mass dependant constant norm)",
"amp_norm",
"=",
"pycbc",
".",
"waveform",
".",
"get_template_amplitude_norm",
"(",
"self",
".",
"params",
",",
"approximant",
"=",
"self",
".",
"approximant",
")",
"amp_norm",
"=",
"1",
"if",
"amp_norm",
"is",
"None",
"else",
"amp_norm",
"self",
".",
"sigma_scale",
"=",
"(",
"DYN_RANGE_FAC",
"*",
"amp_norm",
")",
"**",
"2.0",
"self",
".",
"_sigmasq",
"[",
"key",
"]",
"=",
"self",
".",
"sigma_scale",
"*",
"psd",
".",
"sigmasq_vec",
"[",
"self",
".",
"approximant",
"]",
"[",
"self",
".",
"end_idx",
"-",
"1",
"]",
"else",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'sigma_view'",
")",
":",
"from",
"pycbc",
".",
"filter",
".",
"matchedfilter",
"import",
"get_cutoff_indices",
"N",
"=",
"(",
"len",
"(",
"self",
")",
"-",
"1",
")",
"*",
"2",
"kmin",
",",
"kmax",
"=",
"get_cutoff_indices",
"(",
"self",
".",
"min_f_lower",
"or",
"self",
".",
"f_lower",
",",
"self",
".",
"end_frequency",
",",
"self",
".",
"delta_f",
",",
"N",
")",
"self",
".",
"sslice",
"=",
"slice",
"(",
"kmin",
",",
"kmax",
")",
"self",
".",
"sigma_view",
"=",
"self",
"[",
"self",
".",
"sslice",
"]",
".",
"squared_norm",
"(",
")",
"*",
"4.0",
"*",
"self",
".",
"delta_f",
"if",
"not",
"hasattr",
"(",
"psd",
",",
"'invsqrt'",
")",
":",
"psd",
".",
"invsqrt",
"=",
"1.0",
"/",
"psd",
"[",
"self",
".",
"sslice",
"]",
"self",
".",
"_sigmasq",
"[",
"key",
"]",
"=",
"self",
".",
"sigma_view",
".",
"inner",
"(",
"psd",
".",
"invsqrt",
")",
"return",
"self",
".",
"_sigmasq",
"[",
"key",
"]"
] | Cache sigma calculate for use in tandem with the FilterBank class | [
"Cache",
"sigma",
"calculate",
"for",
"use",
"in",
"tandem",
"with",
"the",
"FilterBank",
"class"
] | python | train | 43.854167 |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/build/virtual_target.py | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/virtual_target.py#L299-L303 | def depends (self, d):
""" Adds additional instances of 'VirtualTarget' that this
one depends on.
"""
self.dependencies_ = unique (self.dependencies_ + d).sort () | [
"def",
"depends",
"(",
"self",
",",
"d",
")",
":",
"self",
".",
"dependencies_",
"=",
"unique",
"(",
"self",
".",
"dependencies_",
"+",
"d",
")",
".",
"sort",
"(",
")"
] | Adds additional instances of 'VirtualTarget' that this
one depends on. | [
"Adds",
"additional",
"instances",
"of",
"VirtualTarget",
"that",
"this",
"one",
"depends",
"on",
"."
] | python | train | 38.8 |
hydpy-dev/hydpy | hydpy/core/autodoctools.py | https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/core/autodoctools.py#L433-L511 | def add_module(self, module, cython=False):
"""Add the given module, its members, and their submembers.
The first examples are based on the site-package |numpy|: which
is passed to method |Substituter.add_module|:
>>> from hydpy.core.autodoctools import Substituter
>>> substituter = Substituter()
>>> import numpy
>>> substituter.add_module(numpy)
Firstly, the module itself is added:
>>> substituter.find('|numpy|')
|numpy| :mod:`~numpy`
Secondly, constants like |numpy.nan| are added:
>>> substituter.find('|numpy.nan|')
|numpy.nan| :const:`~numpy.nan`
Thirdly, functions like |numpy.clip| are added:
>>> substituter.find('|numpy.clip|')
|numpy.clip| :func:`~numpy.clip`
Fourthly, clases line |numpy.ndarray| are added:
>>> substituter.find('|numpy.ndarray|')
|numpy.ndarray| :class:`~numpy.ndarray`
When adding Cython modules, the `cython` flag should be set |True|:
>>> from hydpy.cythons import pointerutils
>>> substituter.add_module(pointerutils, cython=True)
>>> substituter.find('set_pointer')
|PPDouble.set_pointer| \
:func:`~hydpy.cythons.autogen.pointerutils.PPDouble.set_pointer`
|pointerutils.PPDouble.set_pointer| \
:func:`~hydpy.cythons.autogen.pointerutils.PPDouble.set_pointer`
"""
name_module = module.__name__.split('.')[-1]
short = ('|%s|'
% name_module)
long = (':mod:`~%s`'
% module.__name__)
self._short2long[short] = long
for (name_member, member) in vars(module).items():
if self.consider_member(
name_member, member, module):
role = self.get_role(member, cython)
short = ('|%s|'
% name_member)
medium = ('|%s.%s|'
% (name_module,
name_member))
long = (':%s:`~%s.%s`'
% (role,
module.__name__,
name_member))
self.add_substitution(short, medium, long, module)
if inspect.isclass(member):
for name_submember, submember in vars(member).items():
if self.consider_member(
name_submember, submember, module, member):
role = self.get_role(submember, cython)
short = ('|%s.%s|'
% (name_member,
name_submember))
medium = ('|%s.%s.%s|'
% (name_module,
name_member,
name_submember))
long = (':%s:`~%s.%s.%s`'
% (role,
module.__name__,
name_member,
name_submember))
self.add_substitution(short, medium, long, module) | [
"def",
"add_module",
"(",
"self",
",",
"module",
",",
"cython",
"=",
"False",
")",
":",
"name_module",
"=",
"module",
".",
"__name__",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
"short",
"=",
"(",
"'|%s|'",
"%",
"name_module",
")",
"long",
"=",
"(",
"':mod:`~%s`'",
"%",
"module",
".",
"__name__",
")",
"self",
".",
"_short2long",
"[",
"short",
"]",
"=",
"long",
"for",
"(",
"name_member",
",",
"member",
")",
"in",
"vars",
"(",
"module",
")",
".",
"items",
"(",
")",
":",
"if",
"self",
".",
"consider_member",
"(",
"name_member",
",",
"member",
",",
"module",
")",
":",
"role",
"=",
"self",
".",
"get_role",
"(",
"member",
",",
"cython",
")",
"short",
"=",
"(",
"'|%s|'",
"%",
"name_member",
")",
"medium",
"=",
"(",
"'|%s.%s|'",
"%",
"(",
"name_module",
",",
"name_member",
")",
")",
"long",
"=",
"(",
"':%s:`~%s.%s`'",
"%",
"(",
"role",
",",
"module",
".",
"__name__",
",",
"name_member",
")",
")",
"self",
".",
"add_substitution",
"(",
"short",
",",
"medium",
",",
"long",
",",
"module",
")",
"if",
"inspect",
".",
"isclass",
"(",
"member",
")",
":",
"for",
"name_submember",
",",
"submember",
"in",
"vars",
"(",
"member",
")",
".",
"items",
"(",
")",
":",
"if",
"self",
".",
"consider_member",
"(",
"name_submember",
",",
"submember",
",",
"module",
",",
"member",
")",
":",
"role",
"=",
"self",
".",
"get_role",
"(",
"submember",
",",
"cython",
")",
"short",
"=",
"(",
"'|%s.%s|'",
"%",
"(",
"name_member",
",",
"name_submember",
")",
")",
"medium",
"=",
"(",
"'|%s.%s.%s|'",
"%",
"(",
"name_module",
",",
"name_member",
",",
"name_submember",
")",
")",
"long",
"=",
"(",
"':%s:`~%s.%s.%s`'",
"%",
"(",
"role",
",",
"module",
".",
"__name__",
",",
"name_member",
",",
"name_submember",
")",
")",
"self",
".",
"add_substitution",
"(",
"short",
",",
"medium",
",",
"long",
",",
"module",
")"
] | Add the given module, its members, and their submembers.
The first examples are based on the site-package |numpy|: which
is passed to method |Substituter.add_module|:
>>> from hydpy.core.autodoctools import Substituter
>>> substituter = Substituter()
>>> import numpy
>>> substituter.add_module(numpy)
Firstly, the module itself is added:
>>> substituter.find('|numpy|')
|numpy| :mod:`~numpy`
Secondly, constants like |numpy.nan| are added:
>>> substituter.find('|numpy.nan|')
|numpy.nan| :const:`~numpy.nan`
Thirdly, functions like |numpy.clip| are added:
>>> substituter.find('|numpy.clip|')
|numpy.clip| :func:`~numpy.clip`
Fourthly, clases line |numpy.ndarray| are added:
>>> substituter.find('|numpy.ndarray|')
|numpy.ndarray| :class:`~numpy.ndarray`
When adding Cython modules, the `cython` flag should be set |True|:
>>> from hydpy.cythons import pointerutils
>>> substituter.add_module(pointerutils, cython=True)
>>> substituter.find('set_pointer')
|PPDouble.set_pointer| \
:func:`~hydpy.cythons.autogen.pointerutils.PPDouble.set_pointer`
|pointerutils.PPDouble.set_pointer| \
:func:`~hydpy.cythons.autogen.pointerutils.PPDouble.set_pointer` | [
"Add",
"the",
"given",
"module",
"its",
"members",
"and",
"their",
"submembers",
"."
] | python | train | 40.481013 |
jazzband/django-analytical | analytical/templatetags/intercom.py | https://github.com/jazzband/django-analytical/blob/5487fd677bd47bc63fc2cf39597a0adc5d6c9ab3/analytical/templatetags/intercom.py#L40-L49 | def _hashable_bytes(data):
"""
Coerce strings to hashable bytes.
"""
if isinstance(data, bytes):
return data
elif isinstance(data, str):
return data.encode('ascii') # Fail on anything non-ASCII.
else:
raise TypeError(data) | [
"def",
"_hashable_bytes",
"(",
"data",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"bytes",
")",
":",
"return",
"data",
"elif",
"isinstance",
"(",
"data",
",",
"str",
")",
":",
"return",
"data",
".",
"encode",
"(",
"'ascii'",
")",
"# Fail on anything non-ASCII.",
"else",
":",
"raise",
"TypeError",
"(",
"data",
")"
] | Coerce strings to hashable bytes. | [
"Coerce",
"strings",
"to",
"hashable",
"bytes",
"."
] | python | valid | 26.2 |
hyperledger/sawtooth-core | validator/sawtooth_validator/execution/scheduler_parallel.py | https://github.com/hyperledger/sawtooth-core/blob/8cf473bc2207e51f02bd182d825158a57d72b098/validator/sawtooth_validator/execution/scheduler_parallel.py#L812-L848 | def _set_least_batch_id(self, txn_signature):
"""Set the first batch id that doesn't have all results.
Args:
txn_signature (str): The txn identifier of the transaction with
results being set.
"""
batch = self._batches_by_txn_id[txn_signature]
least_index = self._index_of_batch(
self._batches_by_id[self._least_batch_id_wo_results].batch)
current_index = self._index_of_batch(batch)
all_prior = False
if current_index <= least_index:
return
# Test to see if all batches from the least_batch to
# the prior batch to the current batch have results.
if all(
all(t.header_signature in self._txn_results
for t in b.transactions)
for b in self._batches[least_index:current_index]):
all_prior = True
if not all_prior:
return
possible_least = self._batches[current_index].header_signature
# Find the first batch from the current batch on, that doesn't have
# all results.
for b in self._batches[current_index:]:
if not all(t.header_signature in self._txn_results
for t in b.transactions):
possible_least = b.header_signature
break
self._least_batch_id_wo_results = possible_least | [
"def",
"_set_least_batch_id",
"(",
"self",
",",
"txn_signature",
")",
":",
"batch",
"=",
"self",
".",
"_batches_by_txn_id",
"[",
"txn_signature",
"]",
"least_index",
"=",
"self",
".",
"_index_of_batch",
"(",
"self",
".",
"_batches_by_id",
"[",
"self",
".",
"_least_batch_id_wo_results",
"]",
".",
"batch",
")",
"current_index",
"=",
"self",
".",
"_index_of_batch",
"(",
"batch",
")",
"all_prior",
"=",
"False",
"if",
"current_index",
"<=",
"least_index",
":",
"return",
"# Test to see if all batches from the least_batch to",
"# the prior batch to the current batch have results.",
"if",
"all",
"(",
"all",
"(",
"t",
".",
"header_signature",
"in",
"self",
".",
"_txn_results",
"for",
"t",
"in",
"b",
".",
"transactions",
")",
"for",
"b",
"in",
"self",
".",
"_batches",
"[",
"least_index",
":",
"current_index",
"]",
")",
":",
"all_prior",
"=",
"True",
"if",
"not",
"all_prior",
":",
"return",
"possible_least",
"=",
"self",
".",
"_batches",
"[",
"current_index",
"]",
".",
"header_signature",
"# Find the first batch from the current batch on, that doesn't have",
"# all results.",
"for",
"b",
"in",
"self",
".",
"_batches",
"[",
"current_index",
":",
"]",
":",
"if",
"not",
"all",
"(",
"t",
".",
"header_signature",
"in",
"self",
".",
"_txn_results",
"for",
"t",
"in",
"b",
".",
"transactions",
")",
":",
"possible_least",
"=",
"b",
".",
"header_signature",
"break",
"self",
".",
"_least_batch_id_wo_results",
"=",
"possible_least"
] | Set the first batch id that doesn't have all results.
Args:
txn_signature (str): The txn identifier of the transaction with
results being set. | [
"Set",
"the",
"first",
"batch",
"id",
"that",
"doesn",
"t",
"have",
"all",
"results",
"."
] | python | train | 37.297297 |
mushkevych/scheduler | synergy/system/decorator.py | https://github.com/mushkevych/scheduler/blob/6740331360f49083c208085fb5a60ce80ebf418b/synergy/system/decorator.py#L32-L55 | def with_reconnect(func):
"""
Handle when AutoReconnect is raised from pymongo. This is the standard error
raised for everything from "host disconnected" to "couldn't connect to host"
and more.
The sleep handles the edge case when the state of a replica set changes, and
the cursor raises AutoReconnect because the master may have changed. It can
take some time for the replica set to stop raising this exception, and the
small sleep and iteration count gives us a couple of seconds before we fail
completely.
"""
from pymongo.errors import AutoReconnect
@functools.wraps(func)
def _reconnector(*args, **kwargs):
for _ in range(20):
try:
return func(*args, **kwargs)
except AutoReconnect:
time.sleep(0.250)
raise
return _reconnector | [
"def",
"with_reconnect",
"(",
"func",
")",
":",
"from",
"pymongo",
".",
"errors",
"import",
"AutoReconnect",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"_reconnector",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"_",
"in",
"range",
"(",
"20",
")",
":",
"try",
":",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"AutoReconnect",
":",
"time",
".",
"sleep",
"(",
"0.250",
")",
"raise",
"return",
"_reconnector"
] | Handle when AutoReconnect is raised from pymongo. This is the standard error
raised for everything from "host disconnected" to "couldn't connect to host"
and more.
The sleep handles the edge case when the state of a replica set changes, and
the cursor raises AutoReconnect because the master may have changed. It can
take some time for the replica set to stop raising this exception, and the
small sleep and iteration count gives us a couple of seconds before we fail
completely. | [
"Handle",
"when",
"AutoReconnect",
"is",
"raised",
"from",
"pymongo",
".",
"This",
"is",
"the",
"standard",
"error",
"raised",
"for",
"everything",
"from",
"host",
"disconnected",
"to",
"couldn",
"t",
"connect",
"to",
"host",
"and",
"more",
"."
] | python | train | 35 |
basho/riak-python-client | riak/transports/http/transport.py | https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/transports/http/transport.py#L75-L84 | def stats(self):
"""
Gets performance statistics and server information
"""
status, _, body = self._request('GET', self.stats_path(),
{'Accept': 'application/json'})
if status == 200:
return json.loads(bytes_to_str(body))
else:
return None | [
"def",
"stats",
"(",
"self",
")",
":",
"status",
",",
"_",
",",
"body",
"=",
"self",
".",
"_request",
"(",
"'GET'",
",",
"self",
".",
"stats_path",
"(",
")",
",",
"{",
"'Accept'",
":",
"'application/json'",
"}",
")",
"if",
"status",
"==",
"200",
":",
"return",
"json",
".",
"loads",
"(",
"bytes_to_str",
"(",
"body",
")",
")",
"else",
":",
"return",
"None"
] | Gets performance statistics and server information | [
"Gets",
"performance",
"statistics",
"and",
"server",
"information"
] | python | train | 34.2 |
PythonOptimizers/cygenja | cygenja/filters/type_filters.py | https://github.com/PythonOptimizers/cygenja/blob/a9ef91cdfa8452beeeec4f050f928b830379f91c/cygenja/filters/type_filters.py#L97-L120 | def cysparse_real_type_from_real_cysparse_complex_type(cysparse_type):
"""
Returns the **real** type for the real or imaginary part of a **real** complex type.
For instance:
COMPLEX128_t -> FLOAT64_t
Args:
cysparse:
"""
r_type = None
if cysparse_type in ['COMPLEX64_t']:
r_type = 'FLOAT32_t'
elif cysparse_type in ['COMPLEX128_t']:
r_type = 'FLOAT64_t'
elif cysparse_type in ['COMPLEX256_t']:
r_type = 'FLOAT128_t'
else:
raise TypeError("Not a recognized complex type")
return r_type | [
"def",
"cysparse_real_type_from_real_cysparse_complex_type",
"(",
"cysparse_type",
")",
":",
"r_type",
"=",
"None",
"if",
"cysparse_type",
"in",
"[",
"'COMPLEX64_t'",
"]",
":",
"r_type",
"=",
"'FLOAT32_t'",
"elif",
"cysparse_type",
"in",
"[",
"'COMPLEX128_t'",
"]",
":",
"r_type",
"=",
"'FLOAT64_t'",
"elif",
"cysparse_type",
"in",
"[",
"'COMPLEX256_t'",
"]",
":",
"r_type",
"=",
"'FLOAT128_t'",
"else",
":",
"raise",
"TypeError",
"(",
"\"Not a recognized complex type\"",
")",
"return",
"r_type"
] | Returns the **real** type for the real or imaginary part of a **real** complex type.
For instance:
COMPLEX128_t -> FLOAT64_t
Args:
cysparse: | [
"Returns",
"the",
"**",
"real",
"**",
"type",
"for",
"the",
"real",
"or",
"imaginary",
"part",
"of",
"a",
"**",
"real",
"**",
"complex",
"type",
"."
] | python | train | 23.25 |
openstack/networking-cisco | tools/saf_prepare_setup.py | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/tools/saf_prepare_setup.py#L103-L148 | def get_mysql_credentials(cfg_file):
"""Get the credentials and database name from options in config file."""
try:
parser = ConfigParser.ConfigParser()
cfg_fp = open(cfg_file)
parser.readfp(cfg_fp)
cfg_fp.close()
except ConfigParser.NoOptionError:
cfg_fp.close()
print('Failed to find mysql connections credentials.')
sys.exit(1)
except IOError:
print('ERROR: Cannot open %s.', cfg_file)
sys.exit(1)
value = parser.get('dfa_mysql', 'connection')
try:
# Find location of pattern in connection parameter as shown below:
# http://username:password@host/databasename?characterset=encoding'
sobj = re.search(r"(://).*(@).*(/).*(\?)", value)
# The list parameter contains:
# indices[0], is the index of '://'
# indices[1], is the index of '@'
# indices[2], is the index of '/'
# indices[3], is the index of '?'
indices = [sobj.start(1), sobj.start(2), sobj.start(3), sobj.start(4)]
# Get the credentials
cred = value[indices[0] + 3:indices[1]].split(':')
# Get the host name
host = value[indices[1] + 1:indices[2]]
# Get the database name
db_name = value[indices[2] + 1:indices[3]]
# Get the character encoding
charset = value[indices[3] + 1:].split('=')[1]
return cred[0], cred[1], host, db_name, charset
except (ValueError, IndexError, AttributeError):
print('Failed to find mysql connections credentials.')
sys.exit(1) | [
"def",
"get_mysql_credentials",
"(",
"cfg_file",
")",
":",
"try",
":",
"parser",
"=",
"ConfigParser",
".",
"ConfigParser",
"(",
")",
"cfg_fp",
"=",
"open",
"(",
"cfg_file",
")",
"parser",
".",
"readfp",
"(",
"cfg_fp",
")",
"cfg_fp",
".",
"close",
"(",
")",
"except",
"ConfigParser",
".",
"NoOptionError",
":",
"cfg_fp",
".",
"close",
"(",
")",
"print",
"(",
"'Failed to find mysql connections credentials.'",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"except",
"IOError",
":",
"print",
"(",
"'ERROR: Cannot open %s.'",
",",
"cfg_file",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"value",
"=",
"parser",
".",
"get",
"(",
"'dfa_mysql'",
",",
"'connection'",
")",
"try",
":",
"# Find location of pattern in connection parameter as shown below:",
"# http://username:password@host/databasename?characterset=encoding'",
"sobj",
"=",
"re",
".",
"search",
"(",
"r\"(://).*(@).*(/).*(\\?)\"",
",",
"value",
")",
"# The list parameter contains:",
"# indices[0], is the index of '://'",
"# indices[1], is the index of '@'",
"# indices[2], is the index of '/'",
"# indices[3], is the index of '?'",
"indices",
"=",
"[",
"sobj",
".",
"start",
"(",
"1",
")",
",",
"sobj",
".",
"start",
"(",
"2",
")",
",",
"sobj",
".",
"start",
"(",
"3",
")",
",",
"sobj",
".",
"start",
"(",
"4",
")",
"]",
"# Get the credentials",
"cred",
"=",
"value",
"[",
"indices",
"[",
"0",
"]",
"+",
"3",
":",
"indices",
"[",
"1",
"]",
"]",
".",
"split",
"(",
"':'",
")",
"# Get the host name",
"host",
"=",
"value",
"[",
"indices",
"[",
"1",
"]",
"+",
"1",
":",
"indices",
"[",
"2",
"]",
"]",
"# Get the database name",
"db_name",
"=",
"value",
"[",
"indices",
"[",
"2",
"]",
"+",
"1",
":",
"indices",
"[",
"3",
"]",
"]",
"# Get the character encoding",
"charset",
"=",
"value",
"[",
"indices",
"[",
"3",
"]",
"+",
"1",
":",
"]",
".",
"split",
"(",
"'='",
")",
"[",
"1",
"]",
"return",
"cred",
"[",
"0",
"]",
",",
"cred",
"[",
"1",
"]",
",",
"host",
",",
"db_name",
",",
"charset",
"except",
"(",
"ValueError",
",",
"IndexError",
",",
"AttributeError",
")",
":",
"print",
"(",
"'Failed to find mysql connections credentials.'",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] | Get the credentials and database name from options in config file. | [
"Get",
"the",
"credentials",
"and",
"database",
"name",
"from",
"options",
"in",
"config",
"file",
"."
] | python | train | 33.456522 |
aleju/imgaug | imgaug/imgaug.py | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/imgaug.py#L1768-L1792 | def show_grid(images, rows=None, cols=None):
"""
Converts the input images to a grid image and shows it in a new window.
dtype support::
minimum of (
:func:`imgaug.imgaug.draw_grid`,
:func:`imgaug.imgaug.imshow`
)
Parameters
----------
images : (N,H,W,3) ndarray or iterable of (H,W,3) array
See :func:`imgaug.draw_grid`.
rows : None or int, optional
See :func:`imgaug.draw_grid`.
cols : None or int, optional
See :func:`imgaug.draw_grid`.
"""
grid = draw_grid(images, rows=rows, cols=cols)
imshow(grid) | [
"def",
"show_grid",
"(",
"images",
",",
"rows",
"=",
"None",
",",
"cols",
"=",
"None",
")",
":",
"grid",
"=",
"draw_grid",
"(",
"images",
",",
"rows",
"=",
"rows",
",",
"cols",
"=",
"cols",
")",
"imshow",
"(",
"grid",
")"
] | Converts the input images to a grid image and shows it in a new window.
dtype support::
minimum of (
:func:`imgaug.imgaug.draw_grid`,
:func:`imgaug.imgaug.imshow`
)
Parameters
----------
images : (N,H,W,3) ndarray or iterable of (H,W,3) array
See :func:`imgaug.draw_grid`.
rows : None or int, optional
See :func:`imgaug.draw_grid`.
cols : None or int, optional
See :func:`imgaug.draw_grid`. | [
"Converts",
"the",
"input",
"images",
"to",
"a",
"grid",
"image",
"and",
"shows",
"it",
"in",
"a",
"new",
"window",
"."
] | python | valid | 23.72 |
raamana/mrivis | mrivis/base.py | https://github.com/raamana/mrivis/blob/199ad096b8a1d825f69109e7218a81b2f1cec756/mrivis/base.py#L508-L529 | def _make_grid_of_axes(self,
bounding_rect=cfg.bounding_rect_default,
num_rows=cfg.num_rows_per_view_default,
num_cols=cfg.num_cols_grid_default,
axis_pad=cfg.axis_pad_default,
commn_annot=None,
**axis_kwargs):
"""Creates a grid of axes bounded within a given rectangle."""
axes_in_grid = list()
extents = self._compute_cell_extents_grid(bounding_rect=bounding_rect,
num_cols=num_cols,
num_rows=num_rows, axis_pad=axis_pad)
for cell_ext in extents:
ax_cell = self.fig.add_axes(cell_ext, frameon=False, visible=False,
**axis_kwargs)
if commn_annot is not None:
ax_cell.set_title(commn_annot)
ax_cell.set_axis_off()
axes_in_grid.append(ax_cell)
return axes_in_grid | [
"def",
"_make_grid_of_axes",
"(",
"self",
",",
"bounding_rect",
"=",
"cfg",
".",
"bounding_rect_default",
",",
"num_rows",
"=",
"cfg",
".",
"num_rows_per_view_default",
",",
"num_cols",
"=",
"cfg",
".",
"num_cols_grid_default",
",",
"axis_pad",
"=",
"cfg",
".",
"axis_pad_default",
",",
"commn_annot",
"=",
"None",
",",
"*",
"*",
"axis_kwargs",
")",
":",
"axes_in_grid",
"=",
"list",
"(",
")",
"extents",
"=",
"self",
".",
"_compute_cell_extents_grid",
"(",
"bounding_rect",
"=",
"bounding_rect",
",",
"num_cols",
"=",
"num_cols",
",",
"num_rows",
"=",
"num_rows",
",",
"axis_pad",
"=",
"axis_pad",
")",
"for",
"cell_ext",
"in",
"extents",
":",
"ax_cell",
"=",
"self",
".",
"fig",
".",
"add_axes",
"(",
"cell_ext",
",",
"frameon",
"=",
"False",
",",
"visible",
"=",
"False",
",",
"*",
"*",
"axis_kwargs",
")",
"if",
"commn_annot",
"is",
"not",
"None",
":",
"ax_cell",
".",
"set_title",
"(",
"commn_annot",
")",
"ax_cell",
".",
"set_axis_off",
"(",
")",
"axes_in_grid",
".",
"append",
"(",
"ax_cell",
")",
"return",
"axes_in_grid"
] | Creates a grid of axes bounded within a given rectangle. | [
"Creates",
"a",
"grid",
"of",
"axes",
"bounded",
"within",
"a",
"given",
"rectangle",
"."
] | python | train | 47.681818 |
gwastro/pycbc | pycbc/io/record.py | https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/io/record.py#L1069-L1100 | def from_kwargs(cls, **kwargs):
"""Creates a new instance of self from the given keyword arguments.
Each argument will correspond to a field in the returned array, with
the name of the field given by the keyword, and the value(s) whatever
the keyword was set to. Each keyword may be set to a single value or
a list of values. The number of values that each argument is set to
must be the same; this will be the size of the returned array.
Examples
--------
Create an array with fields 'mass1' and 'mass2':
>>> a = FieldArray.from_kwargs(mass1=[1.1, 3.], mass2=[2., 3.])
>>> a.fieldnames
('mass1', 'mass2')
>>> a.mass1, a.mass2
(array([ 1.1, 3. ]), array([ 2., 3.]))
Create an array with only a single element in it:
>>> a = FieldArray.from_kwargs(mass1=1.1, mass2=2.)
>>> a.mass1, a.mass2
(array([ 1.1]), array([ 2.]))
"""
arrays = []
names = []
for p,vals in kwargs.items():
if not isinstance(vals, numpy.ndarray):
if not isinstance(vals, list):
vals = [vals]
vals = numpy.array(vals)
arrays.append(vals)
names.append(p)
return cls.from_arrays(arrays, names=names) | [
"def",
"from_kwargs",
"(",
"cls",
",",
"*",
"*",
"kwargs",
")",
":",
"arrays",
"=",
"[",
"]",
"names",
"=",
"[",
"]",
"for",
"p",
",",
"vals",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"if",
"not",
"isinstance",
"(",
"vals",
",",
"numpy",
".",
"ndarray",
")",
":",
"if",
"not",
"isinstance",
"(",
"vals",
",",
"list",
")",
":",
"vals",
"=",
"[",
"vals",
"]",
"vals",
"=",
"numpy",
".",
"array",
"(",
"vals",
")",
"arrays",
".",
"append",
"(",
"vals",
")",
"names",
".",
"append",
"(",
"p",
")",
"return",
"cls",
".",
"from_arrays",
"(",
"arrays",
",",
"names",
"=",
"names",
")"
] | Creates a new instance of self from the given keyword arguments.
Each argument will correspond to a field in the returned array, with
the name of the field given by the keyword, and the value(s) whatever
the keyword was set to. Each keyword may be set to a single value or
a list of values. The number of values that each argument is set to
must be the same; this will be the size of the returned array.
Examples
--------
Create an array with fields 'mass1' and 'mass2':
>>> a = FieldArray.from_kwargs(mass1=[1.1, 3.], mass2=[2., 3.])
>>> a.fieldnames
('mass1', 'mass2')
>>> a.mass1, a.mass2
(array([ 1.1, 3. ]), array([ 2., 3.]))
Create an array with only a single element in it:
>>> a = FieldArray.from_kwargs(mass1=1.1, mass2=2.)
>>> a.mass1, a.mass2
(array([ 1.1]), array([ 2.])) | [
"Creates",
"a",
"new",
"instance",
"of",
"self",
"from",
"the",
"given",
"keyword",
"arguments",
".",
"Each",
"argument",
"will",
"correspond",
"to",
"a",
"field",
"in",
"the",
"returned",
"array",
"with",
"the",
"name",
"of",
"the",
"field",
"given",
"by",
"the",
"keyword",
"and",
"the",
"value",
"(",
"s",
")",
"whatever",
"the",
"keyword",
"was",
"set",
"to",
".",
"Each",
"keyword",
"may",
"be",
"set",
"to",
"a",
"single",
"value",
"or",
"a",
"list",
"of",
"values",
".",
"The",
"number",
"of",
"values",
"that",
"each",
"argument",
"is",
"set",
"to",
"must",
"be",
"the",
"same",
";",
"this",
"will",
"be",
"the",
"size",
"of",
"the",
"returned",
"array",
"."
] | python | train | 40.9375 |
bio2bel/bio2bel | src/bio2bel/cli.py | https://github.com/bio2bel/bio2bel/blob/d80762d891fa18b248709ff0b0f97ebb65ec64c2/src/bio2bel/cli.py#L262-L266 | def web(connection, host, port):
"""Run a combine web interface."""
from bio2bel.web.application import create_application
app = create_application(connection=connection)
app.run(host=host, port=port) | [
"def",
"web",
"(",
"connection",
",",
"host",
",",
"port",
")",
":",
"from",
"bio2bel",
".",
"web",
".",
"application",
"import",
"create_application",
"app",
"=",
"create_application",
"(",
"connection",
"=",
"connection",
")",
"app",
".",
"run",
"(",
"host",
"=",
"host",
",",
"port",
"=",
"port",
")"
] | Run a combine web interface. | [
"Run",
"a",
"combine",
"web",
"interface",
"."
] | python | valid | 42.4 |
pytroll/pyspectral | pyspectral/near_infrared_reflectance.py | https://github.com/pytroll/pyspectral/blob/fd296c0e0bdf5364fa180134a1292665d6bc50a3/pyspectral/near_infrared_reflectance.py#L184-L293 | def reflectance_from_tbs(self, sun_zenith, tb_near_ir, tb_thermal, **kwargs):
"""
The relfectance calculated is without units and should be between 0 and 1.
Inputs:
sun_zenith: Sun zenith angle for every pixel - in degrees
tb_near_ir: The 3.7 (or 3.9 or equivalent) IR Tb's at every pixel
(Kelvin)
tb_thermal: The 10.8 (or 11 or 12 or equivalent) IR Tb's at every
pixel (Kelvin)
tb_ir_co2: The 13.4 micron channel (or similar - co2 absorption band)
brightness temperatures at every pixel. If None, no CO2
absorption correction will be applied.
"""
# Check for dask arrays
if hasattr(tb_near_ir, 'compute') or hasattr(tb_thermal, 'compute'):
compute = False
else:
compute = True
if hasattr(tb_near_ir, 'mask') or hasattr(tb_thermal, 'mask'):
is_masked = True
else:
is_masked = False
if np.isscalar(tb_near_ir):
tb_nir = np.array([tb_near_ir, ])
else:
tb_nir = np.asanyarray(tb_near_ir)
if np.isscalar(tb_thermal):
tb_therm = np.array([tb_thermal, ])
else:
tb_therm = np.asanyarray(tb_thermal)
if tb_therm.shape != tb_nir.shape:
errmsg = 'Dimensions do not match! {0} and {1}'.format(
str(tb_therm.shape), str(tb_nir.shape))
raise ValueError(errmsg)
tb_ir_co2 = kwargs.get('tb_ir_co2')
lut = kwargs.get('lut', self.lut)
if tb_ir_co2 is None:
co2corr = False
tbco2 = None
else:
co2corr = True
if np.isscalar(tb_ir_co2):
tbco2 = np.array([tb_ir_co2, ])
else:
tbco2 = np.asanyarray(tb_ir_co2)
if not self.rsr:
raise NotImplementedError("Reflectance calculations without "
"rsr not yet supported!")
# Assume rsr is in microns!!!
# FIXME!
self._rad3x_t11 = self.tb2radiance(tb_therm, lut=lut)['radiance']
thermal_emiss_one = self._rad3x_t11 * self.rsr_integral
l_nir = self.tb2radiance(tb_nir, lut=lut)['radiance'] * self.rsr_integral
if thermal_emiss_one.ravel().shape[0] < 10:
LOG.info('thermal_emiss_one = %s', str(thermal_emiss_one))
if l_nir.ravel().shape[0] < 10:
LOG.info('l_nir = %s', str(l_nir))
sunzmask = (sun_zenith < 0.0) | (sun_zenith > 88.0)
sunz = where(sunzmask, 88.0, sun_zenith)
mu0 = np.cos(np.deg2rad(sunz))
# mu0 = np.where(np.less(mu0, 0.1), 0.1, mu0)
self._rad3x = l_nir
self._solar_radiance = self.solar_flux * mu0 / np.pi
# CO2 correction to the 3.9 radiance, only if tbs of a co2 band around
# 13.4 micron is provided:
if co2corr:
self.derive_rad39_corr(tb_therm, tbco2)
LOG.info("CO2 correction applied...")
else:
self._rad3x_correction = 1.0
nomin = l_nir - thermal_emiss_one * self._rad3x_correction
denom = self._solar_radiance - thermal_emiss_one * self._rad3x_correction
data = nomin / denom
mask = (self._solar_radiance - thermal_emiss_one *
self._rad3x_correction) < EPSILON
logical_or(sunzmask, mask, out=mask)
logical_or(mask, np.isnan(tb_nir), out=mask)
self._r3x = where(mask, np.nan, data)
# Reflectances should be between 0 and 1, but values above 1 is
# perfectly possible and okay! (Multiply by 100 to get reflectances
# in percent)
if hasattr(self._r3x, 'compute') and compute:
res = self._r3x.compute()
else:
res = self._r3x
if is_masked:
res = np.ma.masked_array(res, mask=np.isnan(res))
return res | [
"def",
"reflectance_from_tbs",
"(",
"self",
",",
"sun_zenith",
",",
"tb_near_ir",
",",
"tb_thermal",
",",
"*",
"*",
"kwargs",
")",
":",
"# Check for dask arrays",
"if",
"hasattr",
"(",
"tb_near_ir",
",",
"'compute'",
")",
"or",
"hasattr",
"(",
"tb_thermal",
",",
"'compute'",
")",
":",
"compute",
"=",
"False",
"else",
":",
"compute",
"=",
"True",
"if",
"hasattr",
"(",
"tb_near_ir",
",",
"'mask'",
")",
"or",
"hasattr",
"(",
"tb_thermal",
",",
"'mask'",
")",
":",
"is_masked",
"=",
"True",
"else",
":",
"is_masked",
"=",
"False",
"if",
"np",
".",
"isscalar",
"(",
"tb_near_ir",
")",
":",
"tb_nir",
"=",
"np",
".",
"array",
"(",
"[",
"tb_near_ir",
",",
"]",
")",
"else",
":",
"tb_nir",
"=",
"np",
".",
"asanyarray",
"(",
"tb_near_ir",
")",
"if",
"np",
".",
"isscalar",
"(",
"tb_thermal",
")",
":",
"tb_therm",
"=",
"np",
".",
"array",
"(",
"[",
"tb_thermal",
",",
"]",
")",
"else",
":",
"tb_therm",
"=",
"np",
".",
"asanyarray",
"(",
"tb_thermal",
")",
"if",
"tb_therm",
".",
"shape",
"!=",
"tb_nir",
".",
"shape",
":",
"errmsg",
"=",
"'Dimensions do not match! {0} and {1}'",
".",
"format",
"(",
"str",
"(",
"tb_therm",
".",
"shape",
")",
",",
"str",
"(",
"tb_nir",
".",
"shape",
")",
")",
"raise",
"ValueError",
"(",
"errmsg",
")",
"tb_ir_co2",
"=",
"kwargs",
".",
"get",
"(",
"'tb_ir_co2'",
")",
"lut",
"=",
"kwargs",
".",
"get",
"(",
"'lut'",
",",
"self",
".",
"lut",
")",
"if",
"tb_ir_co2",
"is",
"None",
":",
"co2corr",
"=",
"False",
"tbco2",
"=",
"None",
"else",
":",
"co2corr",
"=",
"True",
"if",
"np",
".",
"isscalar",
"(",
"tb_ir_co2",
")",
":",
"tbco2",
"=",
"np",
".",
"array",
"(",
"[",
"tb_ir_co2",
",",
"]",
")",
"else",
":",
"tbco2",
"=",
"np",
".",
"asanyarray",
"(",
"tb_ir_co2",
")",
"if",
"not",
"self",
".",
"rsr",
":",
"raise",
"NotImplementedError",
"(",
"\"Reflectance calculations without \"",
"\"rsr not yet supported!\"",
")",
"# Assume rsr is in microns!!!",
"# FIXME!",
"self",
".",
"_rad3x_t11",
"=",
"self",
".",
"tb2radiance",
"(",
"tb_therm",
",",
"lut",
"=",
"lut",
")",
"[",
"'radiance'",
"]",
"thermal_emiss_one",
"=",
"self",
".",
"_rad3x_t11",
"*",
"self",
".",
"rsr_integral",
"l_nir",
"=",
"self",
".",
"tb2radiance",
"(",
"tb_nir",
",",
"lut",
"=",
"lut",
")",
"[",
"'radiance'",
"]",
"*",
"self",
".",
"rsr_integral",
"if",
"thermal_emiss_one",
".",
"ravel",
"(",
")",
".",
"shape",
"[",
"0",
"]",
"<",
"10",
":",
"LOG",
".",
"info",
"(",
"'thermal_emiss_one = %s'",
",",
"str",
"(",
"thermal_emiss_one",
")",
")",
"if",
"l_nir",
".",
"ravel",
"(",
")",
".",
"shape",
"[",
"0",
"]",
"<",
"10",
":",
"LOG",
".",
"info",
"(",
"'l_nir = %s'",
",",
"str",
"(",
"l_nir",
")",
")",
"sunzmask",
"=",
"(",
"sun_zenith",
"<",
"0.0",
")",
"|",
"(",
"sun_zenith",
">",
"88.0",
")",
"sunz",
"=",
"where",
"(",
"sunzmask",
",",
"88.0",
",",
"sun_zenith",
")",
"mu0",
"=",
"np",
".",
"cos",
"(",
"np",
".",
"deg2rad",
"(",
"sunz",
")",
")",
"# mu0 = np.where(np.less(mu0, 0.1), 0.1, mu0)",
"self",
".",
"_rad3x",
"=",
"l_nir",
"self",
".",
"_solar_radiance",
"=",
"self",
".",
"solar_flux",
"*",
"mu0",
"/",
"np",
".",
"pi",
"# CO2 correction to the 3.9 radiance, only if tbs of a co2 band around",
"# 13.4 micron is provided:",
"if",
"co2corr",
":",
"self",
".",
"derive_rad39_corr",
"(",
"tb_therm",
",",
"tbco2",
")",
"LOG",
".",
"info",
"(",
"\"CO2 correction applied...\"",
")",
"else",
":",
"self",
".",
"_rad3x_correction",
"=",
"1.0",
"nomin",
"=",
"l_nir",
"-",
"thermal_emiss_one",
"*",
"self",
".",
"_rad3x_correction",
"denom",
"=",
"self",
".",
"_solar_radiance",
"-",
"thermal_emiss_one",
"*",
"self",
".",
"_rad3x_correction",
"data",
"=",
"nomin",
"/",
"denom",
"mask",
"=",
"(",
"self",
".",
"_solar_radiance",
"-",
"thermal_emiss_one",
"*",
"self",
".",
"_rad3x_correction",
")",
"<",
"EPSILON",
"logical_or",
"(",
"sunzmask",
",",
"mask",
",",
"out",
"=",
"mask",
")",
"logical_or",
"(",
"mask",
",",
"np",
".",
"isnan",
"(",
"tb_nir",
")",
",",
"out",
"=",
"mask",
")",
"self",
".",
"_r3x",
"=",
"where",
"(",
"mask",
",",
"np",
".",
"nan",
",",
"data",
")",
"# Reflectances should be between 0 and 1, but values above 1 is",
"# perfectly possible and okay! (Multiply by 100 to get reflectances",
"# in percent)",
"if",
"hasattr",
"(",
"self",
".",
"_r3x",
",",
"'compute'",
")",
"and",
"compute",
":",
"res",
"=",
"self",
".",
"_r3x",
".",
"compute",
"(",
")",
"else",
":",
"res",
"=",
"self",
".",
"_r3x",
"if",
"is_masked",
":",
"res",
"=",
"np",
".",
"ma",
".",
"masked_array",
"(",
"res",
",",
"mask",
"=",
"np",
".",
"isnan",
"(",
"res",
")",
")",
"return",
"res"
] | The relfectance calculated is without units and should be between 0 and 1.
Inputs:
sun_zenith: Sun zenith angle for every pixel - in degrees
tb_near_ir: The 3.7 (or 3.9 or equivalent) IR Tb's at every pixel
(Kelvin)
tb_thermal: The 10.8 (or 11 or 12 or equivalent) IR Tb's at every
pixel (Kelvin)
tb_ir_co2: The 13.4 micron channel (or similar - co2 absorption band)
brightness temperatures at every pixel. If None, no CO2
absorption correction will be applied. | [
"The",
"relfectance",
"calculated",
"is",
"without",
"units",
"and",
"should",
"be",
"between",
"0",
"and",
"1",
"."
] | python | train | 35.145455 |
dourvaris/nano-python | src/nano/rpc.py | https://github.com/dourvaris/nano-python/blob/f26b8bc895b997067780f925049a70e82c0c2479/src/nano/rpc.py#L2698-L2725 | def password_change(self, wallet, password):
"""
Changes the password for **wallet** to **password**
.. enable_control required
:param wallet: Wallet to change password for
:type wallet: str
:param password: Password to set
:type password: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.password_change(
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F",
... password="test"
... )
True
"""
wallet = self._process_value(wallet, 'wallet')
payload = {"wallet": wallet, "password": password}
resp = self.call('password_change', payload)
return resp['changed'] == '1' | [
"def",
"password_change",
"(",
"self",
",",
"wallet",
",",
"password",
")",
":",
"wallet",
"=",
"self",
".",
"_process_value",
"(",
"wallet",
",",
"'wallet'",
")",
"payload",
"=",
"{",
"\"wallet\"",
":",
"wallet",
",",
"\"password\"",
":",
"password",
"}",
"resp",
"=",
"self",
".",
"call",
"(",
"'password_change'",
",",
"payload",
")",
"return",
"resp",
"[",
"'changed'",
"]",
"==",
"'1'"
] | Changes the password for **wallet** to **password**
.. enable_control required
:param wallet: Wallet to change password for
:type wallet: str
:param password: Password to set
:type password: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.password_change(
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F",
... password="test"
... )
True | [
"Changes",
"the",
"password",
"for",
"**",
"wallet",
"**",
"to",
"**",
"password",
"**"
] | python | train | 26.071429 |
inasafe/inasafe | safe/impact_function/style.py | https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/impact_function/style.py#L99-L220 | def generate_classified_legend(
analysis,
exposure,
hazard,
use_rounding,
debug_mode):
"""Generate an ordered python structure with the classified symbology.
:param analysis: The analysis layer.
:type analysis: QgsVectorLayer
:param exposure: The exposure layer.
:type exposure: QgsVectorLayer
:param hazard: The hazard layer.
:type hazard: QgsVectorLayer
:param use_rounding: Boolean if we round number in the legend.
:type use_rounding: bool
:param debug_mode: Boolean if run in debug mode,to display the not exposed.
:type debug_mode: bool
:return: The ordered dictionary to use to build the classified style.
:rtype: OrderedDict
"""
# We need to read the analysis layer to get the number of features.
analysis_row = next(analysis.getFeatures())
# Let's style the hazard class in each layers.
hazard_classification = hazard.keywords['classification']
hazard_classification = definition(hazard_classification)
# Let's check if there is some thresholds:
thresholds = hazard.keywords.get('thresholds')
if thresholds:
hazard_unit = hazard.keywords.get('continuous_hazard_unit')
hazard_unit = definition(hazard_unit)['abbreviation']
else:
hazard_unit = None
exposure = exposure.keywords['exposure']
exposure_definitions = definition(exposure)
exposure_units = exposure_definitions['units']
exposure_unit = exposure_units[0]
coefficient = 1
# We check if can use a greater unit, such as kilometre for instance.
if len(exposure_units) > 1:
# We use only two units for now.
delta = coefficient_between_units(
exposure_units[1], exposure_units[0])
all_values_are_greater = True
# We check if all values are greater than the coefficient
for i, hazard_class in enumerate(hazard_classification['classes']):
field_name = hazard_count_field['field_name'] % hazard_class['key']
try:
value = analysis_row[field_name]
except KeyError:
value = 0
if 0 < value < delta:
# 0 is fine, we can still keep the second unit.
all_values_are_greater = False
if all_values_are_greater:
# If yes, we can use this unit.
exposure_unit = exposure_units[1]
coefficient = delta
classes = OrderedDict()
for i, hazard_class in enumerate(hazard_classification['classes']):
# Get the hazard class name.
field_name = hazard_count_field['field_name'] % hazard_class['key']
# Get the number of affected feature by this hazard class.
try:
value = analysis_row[field_name]
except KeyError:
# The field might not exist if no feature impacted in this hazard
# zone.
value = 0
value = format_number(
value,
use_rounding,
exposure_definitions['use_population_rounding'],
coefficient)
minimum = None
maximum = None
# Check if we need to add thresholds.
if thresholds:
if i == 0:
minimum = thresholds[hazard_class['key']][0]
elif i == len(hazard_classification['classes']) - 1:
maximum = thresholds[hazard_class['key']][1]
else:
minimum = thresholds[hazard_class['key']][0]
maximum = thresholds[hazard_class['key']][1]
label = _format_label(
hazard_class=hazard_class['name'],
value=value,
exposure_unit=exposure_unit['abbreviation'],
minimum=minimum,
maximum=maximum,
hazard_unit=hazard_unit)
classes[hazard_class['key']] = (hazard_class['color'], label)
if exposure_definitions['display_not_exposed'] or debug_mode:
classes[not_exposed_class['key']] = _add_not_exposed(
analysis_row,
use_rounding,
exposure_definitions['use_population_rounding'],
exposure_unit['abbreviation'],
coefficient)
return classes | [
"def",
"generate_classified_legend",
"(",
"analysis",
",",
"exposure",
",",
"hazard",
",",
"use_rounding",
",",
"debug_mode",
")",
":",
"# We need to read the analysis layer to get the number of features.",
"analysis_row",
"=",
"next",
"(",
"analysis",
".",
"getFeatures",
"(",
")",
")",
"# Let's style the hazard class in each layers.",
"hazard_classification",
"=",
"hazard",
".",
"keywords",
"[",
"'classification'",
"]",
"hazard_classification",
"=",
"definition",
"(",
"hazard_classification",
")",
"# Let's check if there is some thresholds:",
"thresholds",
"=",
"hazard",
".",
"keywords",
".",
"get",
"(",
"'thresholds'",
")",
"if",
"thresholds",
":",
"hazard_unit",
"=",
"hazard",
".",
"keywords",
".",
"get",
"(",
"'continuous_hazard_unit'",
")",
"hazard_unit",
"=",
"definition",
"(",
"hazard_unit",
")",
"[",
"'abbreviation'",
"]",
"else",
":",
"hazard_unit",
"=",
"None",
"exposure",
"=",
"exposure",
".",
"keywords",
"[",
"'exposure'",
"]",
"exposure_definitions",
"=",
"definition",
"(",
"exposure",
")",
"exposure_units",
"=",
"exposure_definitions",
"[",
"'units'",
"]",
"exposure_unit",
"=",
"exposure_units",
"[",
"0",
"]",
"coefficient",
"=",
"1",
"# We check if can use a greater unit, such as kilometre for instance.",
"if",
"len",
"(",
"exposure_units",
")",
">",
"1",
":",
"# We use only two units for now.",
"delta",
"=",
"coefficient_between_units",
"(",
"exposure_units",
"[",
"1",
"]",
",",
"exposure_units",
"[",
"0",
"]",
")",
"all_values_are_greater",
"=",
"True",
"# We check if all values are greater than the coefficient",
"for",
"i",
",",
"hazard_class",
"in",
"enumerate",
"(",
"hazard_classification",
"[",
"'classes'",
"]",
")",
":",
"field_name",
"=",
"hazard_count_field",
"[",
"'field_name'",
"]",
"%",
"hazard_class",
"[",
"'key'",
"]",
"try",
":",
"value",
"=",
"analysis_row",
"[",
"field_name",
"]",
"except",
"KeyError",
":",
"value",
"=",
"0",
"if",
"0",
"<",
"value",
"<",
"delta",
":",
"# 0 is fine, we can still keep the second unit.",
"all_values_are_greater",
"=",
"False",
"if",
"all_values_are_greater",
":",
"# If yes, we can use this unit.",
"exposure_unit",
"=",
"exposure_units",
"[",
"1",
"]",
"coefficient",
"=",
"delta",
"classes",
"=",
"OrderedDict",
"(",
")",
"for",
"i",
",",
"hazard_class",
"in",
"enumerate",
"(",
"hazard_classification",
"[",
"'classes'",
"]",
")",
":",
"# Get the hazard class name.",
"field_name",
"=",
"hazard_count_field",
"[",
"'field_name'",
"]",
"%",
"hazard_class",
"[",
"'key'",
"]",
"# Get the number of affected feature by this hazard class.",
"try",
":",
"value",
"=",
"analysis_row",
"[",
"field_name",
"]",
"except",
"KeyError",
":",
"# The field might not exist if no feature impacted in this hazard",
"# zone.",
"value",
"=",
"0",
"value",
"=",
"format_number",
"(",
"value",
",",
"use_rounding",
",",
"exposure_definitions",
"[",
"'use_population_rounding'",
"]",
",",
"coefficient",
")",
"minimum",
"=",
"None",
"maximum",
"=",
"None",
"# Check if we need to add thresholds.",
"if",
"thresholds",
":",
"if",
"i",
"==",
"0",
":",
"minimum",
"=",
"thresholds",
"[",
"hazard_class",
"[",
"'key'",
"]",
"]",
"[",
"0",
"]",
"elif",
"i",
"==",
"len",
"(",
"hazard_classification",
"[",
"'classes'",
"]",
")",
"-",
"1",
":",
"maximum",
"=",
"thresholds",
"[",
"hazard_class",
"[",
"'key'",
"]",
"]",
"[",
"1",
"]",
"else",
":",
"minimum",
"=",
"thresholds",
"[",
"hazard_class",
"[",
"'key'",
"]",
"]",
"[",
"0",
"]",
"maximum",
"=",
"thresholds",
"[",
"hazard_class",
"[",
"'key'",
"]",
"]",
"[",
"1",
"]",
"label",
"=",
"_format_label",
"(",
"hazard_class",
"=",
"hazard_class",
"[",
"'name'",
"]",
",",
"value",
"=",
"value",
",",
"exposure_unit",
"=",
"exposure_unit",
"[",
"'abbreviation'",
"]",
",",
"minimum",
"=",
"minimum",
",",
"maximum",
"=",
"maximum",
",",
"hazard_unit",
"=",
"hazard_unit",
")",
"classes",
"[",
"hazard_class",
"[",
"'key'",
"]",
"]",
"=",
"(",
"hazard_class",
"[",
"'color'",
"]",
",",
"label",
")",
"if",
"exposure_definitions",
"[",
"'display_not_exposed'",
"]",
"or",
"debug_mode",
":",
"classes",
"[",
"not_exposed_class",
"[",
"'key'",
"]",
"]",
"=",
"_add_not_exposed",
"(",
"analysis_row",
",",
"use_rounding",
",",
"exposure_definitions",
"[",
"'use_population_rounding'",
"]",
",",
"exposure_unit",
"[",
"'abbreviation'",
"]",
",",
"coefficient",
")",
"return",
"classes"
] | Generate an ordered python structure with the classified symbology.
:param analysis: The analysis layer.
:type analysis: QgsVectorLayer
:param exposure: The exposure layer.
:type exposure: QgsVectorLayer
:param hazard: The hazard layer.
:type hazard: QgsVectorLayer
:param use_rounding: Boolean if we round number in the legend.
:type use_rounding: bool
:param debug_mode: Boolean if run in debug mode,to display the not exposed.
:type debug_mode: bool
:return: The ordered dictionary to use to build the classified style.
:rtype: OrderedDict | [
"Generate",
"an",
"ordered",
"python",
"structure",
"with",
"the",
"classified",
"symbology",
"."
] | python | train | 33.606557 |
jobovy/galpy | galpy/potential/KuzminKutuzovStaeckelPotential.py | https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/potential/KuzminKutuzovStaeckelPotential.py#L70-L87 | def _evaluate(self,R,z,phi=0.,t=0.):
"""
NAME:
_evaluate
PURPOSE:
evaluate the potential at R,z
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
Phi(R,z)
HISTORY:
2015-02-15 - Written - Trick (MPIA)
"""
l,n = bovy_coords.Rz_to_lambdanu(R,z,ac=self._ac,Delta=self._Delta)
return -1./(nu.sqrt(l) + nu.sqrt(n)) | [
"def",
"_evaluate",
"(",
"self",
",",
"R",
",",
"z",
",",
"phi",
"=",
"0.",
",",
"t",
"=",
"0.",
")",
":",
"l",
",",
"n",
"=",
"bovy_coords",
".",
"Rz_to_lambdanu",
"(",
"R",
",",
"z",
",",
"ac",
"=",
"self",
".",
"_ac",
",",
"Delta",
"=",
"self",
".",
"_Delta",
")",
"return",
"-",
"1.",
"/",
"(",
"nu",
".",
"sqrt",
"(",
"l",
")",
"+",
"nu",
".",
"sqrt",
"(",
"n",
")",
")"
] | NAME:
_evaluate
PURPOSE:
evaluate the potential at R,z
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
Phi(R,z)
HISTORY:
2015-02-15 - Written - Trick (MPIA) | [
"NAME",
":",
"_evaluate",
"PURPOSE",
":",
"evaluate",
"the",
"potential",
"at",
"R",
"z",
"INPUT",
":",
"R",
"-",
"Galactocentric",
"cylindrical",
"radius",
"z",
"-",
"vertical",
"height",
"phi",
"-",
"azimuth",
"t",
"-",
"time",
"OUTPUT",
":",
"Phi",
"(",
"R",
"z",
")",
"HISTORY",
":",
"2015",
"-",
"02",
"-",
"15",
"-",
"Written",
"-",
"Trick",
"(",
"MPIA",
")"
] | python | train | 28.055556 |
numenta/htmresearch | htmresearch/frameworks/grid_cell_learning/DynamicCAN.py | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/grid_cell_learning/DynamicCAN.py#L1569-L1604 | def plotActivation(self, position=None, time=None, velocity=None):
"""
Plot the activation of the current cell populations. Assumes that
two axes have already been created, ax1 and ax2. If done in a Jupyter
notebook, this plotting will overwrite the old plot.
:param position: The current location of the animal
:param time: The current time in the simulation
:param velocity: The current velocity of the animal
"""
self.ax1.clear()
y = self.activations["n"] + self.activations["s"] + self.activations["e"] + \
self.activations["w"]
self.ax1.matshow(y.reshape(self.dimensions))
self.ax2.clear()
self.ax2.matshow(self.activationsI.reshape(self.dimensions))
self.ax3.clear()
self.ax3.matshow(self.activationHistoryI.reshape(self.dimensions))
titleString = ""
if time is not None:
titleString += "Time = {}".format(str(time))
if velocity is not None:
titleString += " Velocity = {}".format(str(velocity)[:4])
if position is not None:
titleString += " Position = {}".format(str(position)[:4])
plt.suptitle(titleString)
self.ax1.set_xlabel("Excitatory activity")
self.ax2.set_xlabel("Inhibitory activity")
self.ax3.set_xlabel("Boosting activity")
plt.tight_layout()
self.fig.canvas.draw() | [
"def",
"plotActivation",
"(",
"self",
",",
"position",
"=",
"None",
",",
"time",
"=",
"None",
",",
"velocity",
"=",
"None",
")",
":",
"self",
".",
"ax1",
".",
"clear",
"(",
")",
"y",
"=",
"self",
".",
"activations",
"[",
"\"n\"",
"]",
"+",
"self",
".",
"activations",
"[",
"\"s\"",
"]",
"+",
"self",
".",
"activations",
"[",
"\"e\"",
"]",
"+",
"self",
".",
"activations",
"[",
"\"w\"",
"]",
"self",
".",
"ax1",
".",
"matshow",
"(",
"y",
".",
"reshape",
"(",
"self",
".",
"dimensions",
")",
")",
"self",
".",
"ax2",
".",
"clear",
"(",
")",
"self",
".",
"ax2",
".",
"matshow",
"(",
"self",
".",
"activationsI",
".",
"reshape",
"(",
"self",
".",
"dimensions",
")",
")",
"self",
".",
"ax3",
".",
"clear",
"(",
")",
"self",
".",
"ax3",
".",
"matshow",
"(",
"self",
".",
"activationHistoryI",
".",
"reshape",
"(",
"self",
".",
"dimensions",
")",
")",
"titleString",
"=",
"\"\"",
"if",
"time",
"is",
"not",
"None",
":",
"titleString",
"+=",
"\"Time = {}\"",
".",
"format",
"(",
"str",
"(",
"time",
")",
")",
"if",
"velocity",
"is",
"not",
"None",
":",
"titleString",
"+=",
"\" Velocity = {}\"",
".",
"format",
"(",
"str",
"(",
"velocity",
")",
"[",
":",
"4",
"]",
")",
"if",
"position",
"is",
"not",
"None",
":",
"titleString",
"+=",
"\" Position = {}\"",
".",
"format",
"(",
"str",
"(",
"position",
")",
"[",
":",
"4",
"]",
")",
"plt",
".",
"suptitle",
"(",
"titleString",
")",
"self",
".",
"ax1",
".",
"set_xlabel",
"(",
"\"Excitatory activity\"",
")",
"self",
".",
"ax2",
".",
"set_xlabel",
"(",
"\"Inhibitory activity\"",
")",
"self",
".",
"ax3",
".",
"set_xlabel",
"(",
"\"Boosting activity\"",
")",
"plt",
".",
"tight_layout",
"(",
")",
"self",
".",
"fig",
".",
"canvas",
".",
"draw",
"(",
")"
] | Plot the activation of the current cell populations. Assumes that
two axes have already been created, ax1 and ax2. If done in a Jupyter
notebook, this plotting will overwrite the old plot.
:param position: The current location of the animal
:param time: The current time in the simulation
:param velocity: The current velocity of the animal | [
"Plot",
"the",
"activation",
"of",
"the",
"current",
"cell",
"populations",
".",
"Assumes",
"that",
"two",
"axes",
"have",
"already",
"been",
"created",
"ax1",
"and",
"ax2",
".",
"If",
"done",
"in",
"a",
"Jupyter",
"notebook",
"this",
"plotting",
"will",
"overwrite",
"the",
"old",
"plot",
".",
":",
"param",
"position",
":",
"The",
"current",
"location",
"of",
"the",
"animal",
":",
"param",
"time",
":",
"The",
"current",
"time",
"in",
"the",
"simulation",
":",
"param",
"velocity",
":",
"The",
"current",
"velocity",
"of",
"the",
"animal"
] | python | train | 35.666667 |
pytroll/satpy | satpy/readers/modis_l1b.py | https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/readers/modis_l1b.py#L245-L356 | def get_dataset(self, key, info):
"""Read data from file and return the corresponding projectables."""
datadict = {
1000: ['EV_250_Aggr1km_RefSB',
'EV_500_Aggr1km_RefSB',
'EV_1KM_RefSB',
'EV_1KM_Emissive'],
500: ['EV_250_Aggr500_RefSB',
'EV_500_RefSB'],
250: ['EV_250_RefSB']}
platform_name = self.metadata['INVENTORYMETADATA']['ASSOCIATEDPLATFORMINSTRUMENTSENSOR'][
'ASSOCIATEDPLATFORMINSTRUMENTSENSORCONTAINER']['ASSOCIATEDPLATFORMSHORTNAME']['VALUE']
info.update({'platform_name': 'EOS-' + platform_name})
info.update({'sensor': 'modis'})
if self.resolution != key.resolution:
return
datasets = datadict[self.resolution]
for dataset in datasets:
subdata = self.sd.select(dataset)
var_attrs = subdata.attributes()
band_names = var_attrs["band_names"].split(",")
# get the relative indices of the desired channel
try:
index = band_names.index(key.name)
except ValueError:
continue
uncertainty = self.sd.select(dataset + "_Uncert_Indexes")
array = xr.DataArray(from_sds(subdata, chunks=CHUNK_SIZE)[index, :, :],
dims=['y', 'x']).astype(np.float32)
valid_range = var_attrs['valid_range']
# Fill values:
# Data Value Meaning
# 65535 Fill Value (includes reflective band data at night mode
# and completely missing L1A scans)
# 65534 L1A DN is missing within a scan
# 65533 Detector is saturated
# 65532 Cannot compute zero point DN, e.g., SV is saturated
# 65531 Detector is dead (see comments below)
# 65530 RSB dn** below the minimum of the scaling range
# 65529 TEB radiance or RSB dn** exceeds the maximum of the
# scaling range
# 65528 Aggregation algorithm failure
# 65527 Rotation of Earth view Sector from nominal science
# collection position
# 65526 Calibration coefficient b1 could not be computed
# 65525 Subframe is dead
# 65524 Both sides of the PCLW electronics on simultaneously
# 65501 - 65523 (reserved for future use)
# 65500 NAD closed upper limit
array = array.where(array >= np.float32(valid_range[0]))
array = array.where(array <= np.float32(valid_range[1]))
array = array.where(from_sds(uncertainty, chunks=CHUNK_SIZE)[index, :, :] < 15)
if key.calibration == 'brightness_temperature':
projectable = calibrate_bt(array, var_attrs, index, key.name)
info.setdefault('units', 'K')
info.setdefault('standard_name', 'toa_brightness_temperature')
elif key.calibration == 'reflectance':
projectable = calibrate_refl(array, var_attrs, index)
info.setdefault('units', '%')
info.setdefault('standard_name',
'toa_bidirectional_reflectance')
elif key.calibration == 'radiance':
projectable = calibrate_radiance(array, var_attrs, index)
info.setdefault('units', var_attrs.get('radiance_units'))
info.setdefault('standard_name',
'toa_outgoing_radiance_per_unit_wavelength')
elif key.calibration == 'counts':
projectable = calibrate_counts(array, var_attrs, index)
info.setdefault('units', 'counts')
info.setdefault('standard_name', 'counts') # made up
else:
raise ValueError("Unknown calibration for "
"key: {}".format(key))
projectable.attrs = info
# if ((platform_name == 'Aqua' and key.name in ["6", "27", "36"]) or
# (platform_name == 'Terra' and key.name in ["29"])):
# height, width = projectable.shape
# row_indices = projectable.mask.sum(1) == width
# if row_indices.sum() != height:
# projectable.mask[row_indices, :] = True
# Get the orbit number
# if not satscene.orbit:
# mda = self.data.attributes()["CoreMetadata.0"]
# orbit_idx = mda.index("ORBITNUMBER")
# satscene.orbit = mda[orbit_idx + 111:orbit_idx + 116]
# Trimming out dead sensor lines (detectors) on terra:
# (in addition channel 27, 30, 34, 35, and 36 are nosiy)
# if satscene.satname == "terra":
# for band in ["29"]:
# if not satscene[band].is_loaded() or satscene[band].data.mask.all():
# continue
# width = satscene[band].data.shape[1]
# height = satscene[band].data.shape[0]
# indices = satscene[band].data.mask.sum(1) < width
# if indices.sum() == height:
# continue
# satscene[band] = satscene[band].data[indices, :]
# satscene[band].area = geometry.SwathDefinition(
# lons=satscene[band].area.lons[indices, :],
# lats=satscene[band].area.lats[indices, :])
return projectable | [
"def",
"get_dataset",
"(",
"self",
",",
"key",
",",
"info",
")",
":",
"datadict",
"=",
"{",
"1000",
":",
"[",
"'EV_250_Aggr1km_RefSB'",
",",
"'EV_500_Aggr1km_RefSB'",
",",
"'EV_1KM_RefSB'",
",",
"'EV_1KM_Emissive'",
"]",
",",
"500",
":",
"[",
"'EV_250_Aggr500_RefSB'",
",",
"'EV_500_RefSB'",
"]",
",",
"250",
":",
"[",
"'EV_250_RefSB'",
"]",
"}",
"platform_name",
"=",
"self",
".",
"metadata",
"[",
"'INVENTORYMETADATA'",
"]",
"[",
"'ASSOCIATEDPLATFORMINSTRUMENTSENSOR'",
"]",
"[",
"'ASSOCIATEDPLATFORMINSTRUMENTSENSORCONTAINER'",
"]",
"[",
"'ASSOCIATEDPLATFORMSHORTNAME'",
"]",
"[",
"'VALUE'",
"]",
"info",
".",
"update",
"(",
"{",
"'platform_name'",
":",
"'EOS-'",
"+",
"platform_name",
"}",
")",
"info",
".",
"update",
"(",
"{",
"'sensor'",
":",
"'modis'",
"}",
")",
"if",
"self",
".",
"resolution",
"!=",
"key",
".",
"resolution",
":",
"return",
"datasets",
"=",
"datadict",
"[",
"self",
".",
"resolution",
"]",
"for",
"dataset",
"in",
"datasets",
":",
"subdata",
"=",
"self",
".",
"sd",
".",
"select",
"(",
"dataset",
")",
"var_attrs",
"=",
"subdata",
".",
"attributes",
"(",
")",
"band_names",
"=",
"var_attrs",
"[",
"\"band_names\"",
"]",
".",
"split",
"(",
"\",\"",
")",
"# get the relative indices of the desired channel",
"try",
":",
"index",
"=",
"band_names",
".",
"index",
"(",
"key",
".",
"name",
")",
"except",
"ValueError",
":",
"continue",
"uncertainty",
"=",
"self",
".",
"sd",
".",
"select",
"(",
"dataset",
"+",
"\"_Uncert_Indexes\"",
")",
"array",
"=",
"xr",
".",
"DataArray",
"(",
"from_sds",
"(",
"subdata",
",",
"chunks",
"=",
"CHUNK_SIZE",
")",
"[",
"index",
",",
":",
",",
":",
"]",
",",
"dims",
"=",
"[",
"'y'",
",",
"'x'",
"]",
")",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"valid_range",
"=",
"var_attrs",
"[",
"'valid_range'",
"]",
"# Fill values:",
"# Data Value Meaning",
"# 65535 Fill Value (includes reflective band data at night mode",
"# and completely missing L1A scans)",
"# 65534 L1A DN is missing within a scan",
"# 65533 Detector is saturated",
"# 65532 Cannot compute zero point DN, e.g., SV is saturated",
"# 65531 Detector is dead (see comments below)",
"# 65530 RSB dn** below the minimum of the scaling range",
"# 65529 TEB radiance or RSB dn** exceeds the maximum of the",
"# scaling range",
"# 65528 Aggregation algorithm failure",
"# 65527 Rotation of Earth view Sector from nominal science",
"# collection position",
"# 65526 Calibration coefficient b1 could not be computed",
"# 65525 Subframe is dead",
"# 65524 Both sides of the PCLW electronics on simultaneously",
"# 65501 - 65523 (reserved for future use)",
"# 65500 NAD closed upper limit",
"array",
"=",
"array",
".",
"where",
"(",
"array",
">=",
"np",
".",
"float32",
"(",
"valid_range",
"[",
"0",
"]",
")",
")",
"array",
"=",
"array",
".",
"where",
"(",
"array",
"<=",
"np",
".",
"float32",
"(",
"valid_range",
"[",
"1",
"]",
")",
")",
"array",
"=",
"array",
".",
"where",
"(",
"from_sds",
"(",
"uncertainty",
",",
"chunks",
"=",
"CHUNK_SIZE",
")",
"[",
"index",
",",
":",
",",
":",
"]",
"<",
"15",
")",
"if",
"key",
".",
"calibration",
"==",
"'brightness_temperature'",
":",
"projectable",
"=",
"calibrate_bt",
"(",
"array",
",",
"var_attrs",
",",
"index",
",",
"key",
".",
"name",
")",
"info",
".",
"setdefault",
"(",
"'units'",
",",
"'K'",
")",
"info",
".",
"setdefault",
"(",
"'standard_name'",
",",
"'toa_brightness_temperature'",
")",
"elif",
"key",
".",
"calibration",
"==",
"'reflectance'",
":",
"projectable",
"=",
"calibrate_refl",
"(",
"array",
",",
"var_attrs",
",",
"index",
")",
"info",
".",
"setdefault",
"(",
"'units'",
",",
"'%'",
")",
"info",
".",
"setdefault",
"(",
"'standard_name'",
",",
"'toa_bidirectional_reflectance'",
")",
"elif",
"key",
".",
"calibration",
"==",
"'radiance'",
":",
"projectable",
"=",
"calibrate_radiance",
"(",
"array",
",",
"var_attrs",
",",
"index",
")",
"info",
".",
"setdefault",
"(",
"'units'",
",",
"var_attrs",
".",
"get",
"(",
"'radiance_units'",
")",
")",
"info",
".",
"setdefault",
"(",
"'standard_name'",
",",
"'toa_outgoing_radiance_per_unit_wavelength'",
")",
"elif",
"key",
".",
"calibration",
"==",
"'counts'",
":",
"projectable",
"=",
"calibrate_counts",
"(",
"array",
",",
"var_attrs",
",",
"index",
")",
"info",
".",
"setdefault",
"(",
"'units'",
",",
"'counts'",
")",
"info",
".",
"setdefault",
"(",
"'standard_name'",
",",
"'counts'",
")",
"# made up",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unknown calibration for \"",
"\"key: {}\"",
".",
"format",
"(",
"key",
")",
")",
"projectable",
".",
"attrs",
"=",
"info",
"# if ((platform_name == 'Aqua' and key.name in [\"6\", \"27\", \"36\"]) or",
"# (platform_name == 'Terra' and key.name in [\"29\"])):",
"# height, width = projectable.shape",
"# row_indices = projectable.mask.sum(1) == width",
"# if row_indices.sum() != height:",
"# projectable.mask[row_indices, :] = True",
"# Get the orbit number",
"# if not satscene.orbit:",
"# mda = self.data.attributes()[\"CoreMetadata.0\"]",
"# orbit_idx = mda.index(\"ORBITNUMBER\")",
"# satscene.orbit = mda[orbit_idx + 111:orbit_idx + 116]",
"# Trimming out dead sensor lines (detectors) on terra:",
"# (in addition channel 27, 30, 34, 35, and 36 are nosiy)",
"# if satscene.satname == \"terra\":",
"# for band in [\"29\"]:",
"# if not satscene[band].is_loaded() or satscene[band].data.mask.all():",
"# continue",
"# width = satscene[band].data.shape[1]",
"# height = satscene[band].data.shape[0]",
"# indices = satscene[band].data.mask.sum(1) < width",
"# if indices.sum() == height:",
"# continue",
"# satscene[band] = satscene[band].data[indices, :]",
"# satscene[band].area = geometry.SwathDefinition(",
"# lons=satscene[band].area.lons[indices, :],",
"# lats=satscene[band].area.lats[indices, :])",
"return",
"projectable"
] | Read data from file and return the corresponding projectables. | [
"Read",
"data",
"from",
"file",
"and",
"return",
"the",
"corresponding",
"projectables",
"."
] | python | train | 48.669643 |
dmlc/gluon-nlp | src/gluonnlp/model/attention_cell.py | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/model/attention_cell.py#L99-L120 | def _read_by_weight(self, F, att_weights, value):
"""Read from the value matrix given the attention weights.
Parameters
----------
F : symbol or ndarray
att_weights : Symbol or NDArray
Attention weights.
For single-head attention,
Shape (batch_size, query_length, memory_length).
For multi-head attention,
Shape (batch_size, num_heads, query_length, memory_length).
value : Symbol or NDArray
Value of the memory. Shape (batch_size, memory_length, total_value_dim)
Returns
-------
context_vec: Symbol or NDArray
Shape (batch_size, query_length, context_vec_dim)
"""
output = F.batch_dot(att_weights, value)
return output | [
"def",
"_read_by_weight",
"(",
"self",
",",
"F",
",",
"att_weights",
",",
"value",
")",
":",
"output",
"=",
"F",
".",
"batch_dot",
"(",
"att_weights",
",",
"value",
")",
"return",
"output"
] | Read from the value matrix given the attention weights.
Parameters
----------
F : symbol or ndarray
att_weights : Symbol or NDArray
Attention weights.
For single-head attention,
Shape (batch_size, query_length, memory_length).
For multi-head attention,
Shape (batch_size, num_heads, query_length, memory_length).
value : Symbol or NDArray
Value of the memory. Shape (batch_size, memory_length, total_value_dim)
Returns
-------
context_vec: Symbol or NDArray
Shape (batch_size, query_length, context_vec_dim) | [
"Read",
"from",
"the",
"value",
"matrix",
"given",
"the",
"attention",
"weights",
"."
] | python | train | 35.818182 |
geometalab/pyGeoTile | pygeotile/tile.py | https://github.com/geometalab/pyGeoTile/blob/b1f44271698f5fc4d18c2add935797ed43254aa6/pygeotile/tile.py#L56-L60 | def for_meters(cls, meter_x, meter_y, zoom):
"""Creates a tile from X Y meters in Spherical Mercator EPSG:900913"""
point = Point.from_meters(meter_x=meter_x, meter_y=meter_y)
pixel_x, pixel_y = point.pixels(zoom=zoom)
return cls.for_pixels(pixel_x=pixel_x, pixel_y=pixel_y, zoom=zoom) | [
"def",
"for_meters",
"(",
"cls",
",",
"meter_x",
",",
"meter_y",
",",
"zoom",
")",
":",
"point",
"=",
"Point",
".",
"from_meters",
"(",
"meter_x",
"=",
"meter_x",
",",
"meter_y",
"=",
"meter_y",
")",
"pixel_x",
",",
"pixel_y",
"=",
"point",
".",
"pixels",
"(",
"zoom",
"=",
"zoom",
")",
"return",
"cls",
".",
"for_pixels",
"(",
"pixel_x",
"=",
"pixel_x",
",",
"pixel_y",
"=",
"pixel_y",
",",
"zoom",
"=",
"zoom",
")"
] | Creates a tile from X Y meters in Spherical Mercator EPSG:900913 | [
"Creates",
"a",
"tile",
"from",
"X",
"Y",
"meters",
"in",
"Spherical",
"Mercator",
"EPSG",
":",
"900913"
] | python | train | 62.6 |
biocore/deblur | deblur/workflow.py | https://github.com/biocore/deblur/blob/4b4badccdbac8fe9d8f8b3f1349f3700e31b5d7b/deblur/workflow.py#L496-L529 | def multiple_sequence_alignment(seqs_fp, threads=1):
"""Perform multiple sequence alignment on FASTA file using MAFFT.
Parameters
----------
seqs_fp: string
filepath to FASTA file for multiple sequence alignment
threads: integer, optional
number of threads to use. 0 to use all threads
Returns
-------
msa_fp : str
name of output alignment file or None if error encountered
"""
logger = logging.getLogger(__name__)
logger.info('multiple_sequence_alignment seqs file %s' % seqs_fp)
# for mafft we use -1 to denote all threads and not 0
if threads == 0:
threads = -1
if stat(seqs_fp).st_size == 0:
logger.warning('msa failed. file %s has no reads' % seqs_fp)
return None
msa_fp = seqs_fp + '.msa'
params = ['mafft', '--quiet', '--preservecase', '--parttree', '--auto',
'--thread', str(threads), seqs_fp]
sout, serr, res = _system_call(params, stdoutfilename=msa_fp)
if not res == 0:
logger.info('msa failed for file %s (maybe only 1 read?)' % seqs_fp)
logger.debug('stderr : %s' % serr)
return None
return msa_fp | [
"def",
"multiple_sequence_alignment",
"(",
"seqs_fp",
",",
"threads",
"=",
"1",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"logger",
".",
"info",
"(",
"'multiple_sequence_alignment seqs file %s'",
"%",
"seqs_fp",
")",
"# for mafft we use -1 to denote all threads and not 0",
"if",
"threads",
"==",
"0",
":",
"threads",
"=",
"-",
"1",
"if",
"stat",
"(",
"seqs_fp",
")",
".",
"st_size",
"==",
"0",
":",
"logger",
".",
"warning",
"(",
"'msa failed. file %s has no reads'",
"%",
"seqs_fp",
")",
"return",
"None",
"msa_fp",
"=",
"seqs_fp",
"+",
"'.msa'",
"params",
"=",
"[",
"'mafft'",
",",
"'--quiet'",
",",
"'--preservecase'",
",",
"'--parttree'",
",",
"'--auto'",
",",
"'--thread'",
",",
"str",
"(",
"threads",
")",
",",
"seqs_fp",
"]",
"sout",
",",
"serr",
",",
"res",
"=",
"_system_call",
"(",
"params",
",",
"stdoutfilename",
"=",
"msa_fp",
")",
"if",
"not",
"res",
"==",
"0",
":",
"logger",
".",
"info",
"(",
"'msa failed for file %s (maybe only 1 read?)'",
"%",
"seqs_fp",
")",
"logger",
".",
"debug",
"(",
"'stderr : %s'",
"%",
"serr",
")",
"return",
"None",
"return",
"msa_fp"
] | Perform multiple sequence alignment on FASTA file using MAFFT.
Parameters
----------
seqs_fp: string
filepath to FASTA file for multiple sequence alignment
threads: integer, optional
number of threads to use. 0 to use all threads
Returns
-------
msa_fp : str
name of output alignment file or None if error encountered | [
"Perform",
"multiple",
"sequence",
"alignment",
"on",
"FASTA",
"file",
"using",
"MAFFT",
"."
] | python | train | 33.617647 |
weld-project/weld | python/grizzly/grizzly/seriesweld.py | https://github.com/weld-project/weld/blob/8ddd6db6b28878bef0892da44b1d2002b564389c/python/grizzly/grizzly/seriesweld.py#L215-L236 | def contains(self, string):
"""Summary
Returns:
TYPE: Description
"""
# Check that self.weld_type is a string type
vectype = self.weld_type
if isinstance(vectype, WeldVec):
elem_type = vectype.elemType
if isinstance(elem_type, WeldChar):
return SeriesWeld(
grizzly_impl.contains(
self.expr,
elem_type,
string
),
WeldBit(),
self.df,
self.column_name
)
raise Exception("Cannot call to_lower on non string type") | [
"def",
"contains",
"(",
"self",
",",
"string",
")",
":",
"# Check that self.weld_type is a string type",
"vectype",
"=",
"self",
".",
"weld_type",
"if",
"isinstance",
"(",
"vectype",
",",
"WeldVec",
")",
":",
"elem_type",
"=",
"vectype",
".",
"elemType",
"if",
"isinstance",
"(",
"elem_type",
",",
"WeldChar",
")",
":",
"return",
"SeriesWeld",
"(",
"grizzly_impl",
".",
"contains",
"(",
"self",
".",
"expr",
",",
"elem_type",
",",
"string",
")",
",",
"WeldBit",
"(",
")",
",",
"self",
".",
"df",
",",
"self",
".",
"column_name",
")",
"raise",
"Exception",
"(",
"\"Cannot call to_lower on non string type\"",
")"
] | Summary
Returns:
TYPE: Description | [
"Summary"
] | python | train | 30.954545 |
apache/incubator-superset | superset/utils/core.py | https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/utils/core.py#L81-L89 | def flasher(msg, severity=None):
"""Flask's flash if available, logging call if not"""
try:
flash(msg, severity)
except RuntimeError:
if severity == 'danger':
logging.error(msg)
else:
logging.info(msg) | [
"def",
"flasher",
"(",
"msg",
",",
"severity",
"=",
"None",
")",
":",
"try",
":",
"flash",
"(",
"msg",
",",
"severity",
")",
"except",
"RuntimeError",
":",
"if",
"severity",
"==",
"'danger'",
":",
"logging",
".",
"error",
"(",
"msg",
")",
"else",
":",
"logging",
".",
"info",
"(",
"msg",
")"
] | Flask's flash if available, logging call if not | [
"Flask",
"s",
"flash",
"if",
"available",
"logging",
"call",
"if",
"not"
] | python | train | 28.111111 |
spacetelescope/stsci.tools | lib/stsci/tools/configobj.py | https://github.com/spacetelescope/stsci.tools/blob/9a022503ad24ca54ce83331482dfa3ff6de9f403/lib/stsci/tools/configobj.py#L381-L412 | def _fetch(self, key):
"""Helper function to fetch values from owning section.
Returns a 2-tuple: the value, and the section where it was found.
"""
# switch off interpolation before we try and fetch anything !
save_interp = self.section.main.interpolation
self.section.main.interpolation = False
# Start at section that "owns" this InterpolationEngine
current_section = self.section
while True:
# try the current section first
val = current_section.get(key)
if val is not None and not isinstance(val, Section):
break
# try "DEFAULT" next
val = current_section.get('DEFAULT', {}).get(key)
if val is not None and not isinstance(val, Section):
break
# move up to parent and try again
# top-level's parent is itself
if current_section.parent is current_section:
# reached top level, time to give up
break
current_section = current_section.parent
# restore interpolation to previous value before returning
self.section.main.interpolation = save_interp
if val is None:
raise MissingInterpolationOption(key)
return val, current_section | [
"def",
"_fetch",
"(",
"self",
",",
"key",
")",
":",
"# switch off interpolation before we try and fetch anything !",
"save_interp",
"=",
"self",
".",
"section",
".",
"main",
".",
"interpolation",
"self",
".",
"section",
".",
"main",
".",
"interpolation",
"=",
"False",
"# Start at section that \"owns\" this InterpolationEngine",
"current_section",
"=",
"self",
".",
"section",
"while",
"True",
":",
"# try the current section first",
"val",
"=",
"current_section",
".",
"get",
"(",
"key",
")",
"if",
"val",
"is",
"not",
"None",
"and",
"not",
"isinstance",
"(",
"val",
",",
"Section",
")",
":",
"break",
"# try \"DEFAULT\" next",
"val",
"=",
"current_section",
".",
"get",
"(",
"'DEFAULT'",
",",
"{",
"}",
")",
".",
"get",
"(",
"key",
")",
"if",
"val",
"is",
"not",
"None",
"and",
"not",
"isinstance",
"(",
"val",
",",
"Section",
")",
":",
"break",
"# move up to parent and try again",
"# top-level's parent is itself",
"if",
"current_section",
".",
"parent",
"is",
"current_section",
":",
"# reached top level, time to give up",
"break",
"current_section",
"=",
"current_section",
".",
"parent",
"# restore interpolation to previous value before returning",
"self",
".",
"section",
".",
"main",
".",
"interpolation",
"=",
"save_interp",
"if",
"val",
"is",
"None",
":",
"raise",
"MissingInterpolationOption",
"(",
"key",
")",
"return",
"val",
",",
"current_section"
] | Helper function to fetch values from owning section.
Returns a 2-tuple: the value, and the section where it was found. | [
"Helper",
"function",
"to",
"fetch",
"values",
"from",
"owning",
"section",
"."
] | python | train | 40.65625 |
appknox/google-chartwrapper | GChartWrapper/GChart.py | https://github.com/appknox/google-chartwrapper/blob/3769aecbef6c83b6cd93ee72ece478ffe433ac57/GChartWrapper/GChart.py#L550-L555 | def url(self):
"""
Returns the rendered URL of the chart
"""
self.render()
return self._apiurl + '&'.join(self._parts()).replace(' ','+') | [
"def",
"url",
"(",
"self",
")",
":",
"self",
".",
"render",
"(",
")",
"return",
"self",
".",
"_apiurl",
"+",
"'&'",
".",
"join",
"(",
"self",
".",
"_parts",
"(",
")",
")",
".",
"replace",
"(",
"' '",
",",
"'+'",
")"
] | Returns the rendered URL of the chart | [
"Returns",
"the",
"rendered",
"URL",
"of",
"the",
"chart"
] | python | test | 30 |
matthew-brett/delocate | delocate/tools.py | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/tools.py#L271-L287 | def set_install_id(filename, install_id):
""" Set install id for library named in `filename`
Parameters
----------
filename : str
filename of library
install_id : str
install id for library `filename`
Raises
------
RuntimeError if `filename` has not install id
"""
if get_install_id(filename) is None:
raise InstallNameError('{0} has no install id'.format(filename))
back_tick(['install_name_tool', '-id', install_id, filename]) | [
"def",
"set_install_id",
"(",
"filename",
",",
"install_id",
")",
":",
"if",
"get_install_id",
"(",
"filename",
")",
"is",
"None",
":",
"raise",
"InstallNameError",
"(",
"'{0} has no install id'",
".",
"format",
"(",
"filename",
")",
")",
"back_tick",
"(",
"[",
"'install_name_tool'",
",",
"'-id'",
",",
"install_id",
",",
"filename",
"]",
")"
] | Set install id for library named in `filename`
Parameters
----------
filename : str
filename of library
install_id : str
install id for library `filename`
Raises
------
RuntimeError if `filename` has not install id | [
"Set",
"install",
"id",
"for",
"library",
"named",
"in",
"filename"
] | python | train | 28.352941 |
wkentaro/fcn | fcn/trainer.py | https://github.com/wkentaro/fcn/blob/a29e167b67b11418a06566ad1ddbbc6949575e05/fcn/trainer.py#L166-L223 | def train(self):
"""Train the network using the training dataset.
Parameters
----------
None
Returns
-------
None
"""
self.stamp_start = time.time()
for iteration, batch in tqdm.tqdm(enumerate(self.iter_train),
desc='train', total=self.max_iter,
ncols=80):
self.epoch = self.iter_train.epoch
self.iteration = iteration
############
# validate #
############
if self.interval_validate and \
self.iteration % self.interval_validate == 0:
self.validate()
#########
# train #
#########
batch = map(datasets.transform_lsvrc2012_vgg16, batch)
in_vars = utils.batch_to_vars(batch, device=self.device)
self.model.zerograds()
loss = self.model(*in_vars)
if loss is not None:
loss.backward()
self.optimizer.update()
lbl_true = zip(*batch)[1]
lbl_pred = chainer.functions.argmax(self.model.score, axis=1)
lbl_pred = chainer.cuda.to_cpu(lbl_pred.data)
acc = utils.label_accuracy_score(
lbl_true, lbl_pred, self.model.n_class)
self._write_log(**{
'epoch': self.epoch,
'iteration': self.iteration,
'elapsed_time': time.time() - self.stamp_start,
'train/loss': float(loss.data),
'train/acc': acc[0],
'train/acc_cls': acc[1],
'train/mean_iu': acc[2],
'train/fwavacc': acc[3],
})
if iteration >= self.max_iter:
self._save_model()
break | [
"def",
"train",
"(",
"self",
")",
":",
"self",
".",
"stamp_start",
"=",
"time",
".",
"time",
"(",
")",
"for",
"iteration",
",",
"batch",
"in",
"tqdm",
".",
"tqdm",
"(",
"enumerate",
"(",
"self",
".",
"iter_train",
")",
",",
"desc",
"=",
"'train'",
",",
"total",
"=",
"self",
".",
"max_iter",
",",
"ncols",
"=",
"80",
")",
":",
"self",
".",
"epoch",
"=",
"self",
".",
"iter_train",
".",
"epoch",
"self",
".",
"iteration",
"=",
"iteration",
"############",
"# validate #",
"############",
"if",
"self",
".",
"interval_validate",
"and",
"self",
".",
"iteration",
"%",
"self",
".",
"interval_validate",
"==",
"0",
":",
"self",
".",
"validate",
"(",
")",
"#########",
"# train #",
"#########",
"batch",
"=",
"map",
"(",
"datasets",
".",
"transform_lsvrc2012_vgg16",
",",
"batch",
")",
"in_vars",
"=",
"utils",
".",
"batch_to_vars",
"(",
"batch",
",",
"device",
"=",
"self",
".",
"device",
")",
"self",
".",
"model",
".",
"zerograds",
"(",
")",
"loss",
"=",
"self",
".",
"model",
"(",
"*",
"in_vars",
")",
"if",
"loss",
"is",
"not",
"None",
":",
"loss",
".",
"backward",
"(",
")",
"self",
".",
"optimizer",
".",
"update",
"(",
")",
"lbl_true",
"=",
"zip",
"(",
"*",
"batch",
")",
"[",
"1",
"]",
"lbl_pred",
"=",
"chainer",
".",
"functions",
".",
"argmax",
"(",
"self",
".",
"model",
".",
"score",
",",
"axis",
"=",
"1",
")",
"lbl_pred",
"=",
"chainer",
".",
"cuda",
".",
"to_cpu",
"(",
"lbl_pred",
".",
"data",
")",
"acc",
"=",
"utils",
".",
"label_accuracy_score",
"(",
"lbl_true",
",",
"lbl_pred",
",",
"self",
".",
"model",
".",
"n_class",
")",
"self",
".",
"_write_log",
"(",
"*",
"*",
"{",
"'epoch'",
":",
"self",
".",
"epoch",
",",
"'iteration'",
":",
"self",
".",
"iteration",
",",
"'elapsed_time'",
":",
"time",
".",
"time",
"(",
")",
"-",
"self",
".",
"stamp_start",
",",
"'train/loss'",
":",
"float",
"(",
"loss",
".",
"data",
")",
",",
"'train/acc'",
":",
"acc",
"[",
"0",
"]",
",",
"'train/acc_cls'",
":",
"acc",
"[",
"1",
"]",
",",
"'train/mean_iu'",
":",
"acc",
"[",
"2",
"]",
",",
"'train/fwavacc'",
":",
"acc",
"[",
"3",
"]",
",",
"}",
")",
"if",
"iteration",
">=",
"self",
".",
"max_iter",
":",
"self",
".",
"_save_model",
"(",
")",
"break"
] | Train the network using the training dataset.
Parameters
----------
None
Returns
-------
None | [
"Train",
"the",
"network",
"using",
"the",
"training",
"dataset",
"."
] | python | train | 32.586207 |
CLARIAH/grlc | src/gquery.py | https://github.com/CLARIAH/grlc/blob/f5664e34f039010c00ef8ebb69917c05e8ce75d7/src/gquery.py#L105-L180 | def get_parameters(rq, variables, endpoint, query_metadata, auth=None):
"""
?_name The variable specifies the API mandatory parameter name. The value is incorporated in the query as plain literal.
?__name The parameter name is optional.
?_name_iri The variable is substituted with the parameter value as a IRI (also: number or literal).
?_name_en The parameter value is considered as literal with the language 'en' (e.g., en,it,es, etc.).
?_name_integer The parameter value is considered as literal and the XSD datatype 'integer' is added during substitution.
?_name_prefix_datatype The parameter value is considered as literal and the datatype 'prefix:datatype' is added during substitution. The prefix must be specified according to the SPARQL syntax.
"""
# variables = translateQuery(Query.parseString(rq, parseAll=True)).algebra['_vars']
## Aggregates
internal_matcher = re.compile("__agg_\d+__")
## Basil-style variables
variable_matcher = re.compile(
"(?P<required>[_]{1,2})(?P<name>[^_]+)_?(?P<type>[a-zA-Z0-9]+)?_?(?P<userdefined>[a-zA-Z0-9]+)?.*$")
parameters = {}
for v in variables:
if internal_matcher.match(v):
continue
match = variable_matcher.match(v)
# TODO: currently only one parameter per triple pattern is supported
if match:
vname = match.group('name')
vrequired = True if match.group('required') == '_' else False
vtype = 'string'
# All these can be None
vcodes = get_enumeration(rq, vname, endpoint, query_metadata, auth)
vdefault = get_defaults(rq, vname, query_metadata)
vlang = None
vdatatype = None
vformat = None
mtype = match.group('type')
muserdefined = match.group('userdefined')
if mtype in ['number', 'literal', 'string']:
vtype = mtype
elif mtype in ['iri']: # TODO: proper form validation of input parameter uris
vtype = 'string'
vformat = 'iri'
elif mtype:
vtype = 'string'
if mtype in static.XSD_DATATYPES:
vdatatype = 'xsd:{}'.format(mtype)
elif len(mtype) == 2:
vlang = mtype
elif muserdefined:
vdatatype = '{}:{}'.format(mtype, muserdefined)
parameters[vname] = {
'original': '?{}'.format(v),
'required': vrequired,
'name': vname,
'type': vtype
}
# Possibly None parameter attributes
if vcodes is not None:
parameters[vname]['enum'] = sorted(vcodes)
if vlang is not None:
parameters[vname]['lang'] = vlang
if vdatatype is not None:
parameters[vname]['datatype'] = vdatatype
if vformat is not None:
parameters[vname]['format'] = vformat
if vdefault is not None:
parameters[vname]['default'] = vdefault
glogger.info('Finished parsing the following parameters: {}'.format(parameters))
return parameters | [
"def",
"get_parameters",
"(",
"rq",
",",
"variables",
",",
"endpoint",
",",
"query_metadata",
",",
"auth",
"=",
"None",
")",
":",
"# variables = translateQuery(Query.parseString(rq, parseAll=True)).algebra['_vars']",
"## Aggregates",
"internal_matcher",
"=",
"re",
".",
"compile",
"(",
"\"__agg_\\d+__\"",
")",
"## Basil-style variables",
"variable_matcher",
"=",
"re",
".",
"compile",
"(",
"\"(?P<required>[_]{1,2})(?P<name>[^_]+)_?(?P<type>[a-zA-Z0-9]+)?_?(?P<userdefined>[a-zA-Z0-9]+)?.*$\"",
")",
"parameters",
"=",
"{",
"}",
"for",
"v",
"in",
"variables",
":",
"if",
"internal_matcher",
".",
"match",
"(",
"v",
")",
":",
"continue",
"match",
"=",
"variable_matcher",
".",
"match",
"(",
"v",
")",
"# TODO: currently only one parameter per triple pattern is supported",
"if",
"match",
":",
"vname",
"=",
"match",
".",
"group",
"(",
"'name'",
")",
"vrequired",
"=",
"True",
"if",
"match",
".",
"group",
"(",
"'required'",
")",
"==",
"'_'",
"else",
"False",
"vtype",
"=",
"'string'",
"# All these can be None",
"vcodes",
"=",
"get_enumeration",
"(",
"rq",
",",
"vname",
",",
"endpoint",
",",
"query_metadata",
",",
"auth",
")",
"vdefault",
"=",
"get_defaults",
"(",
"rq",
",",
"vname",
",",
"query_metadata",
")",
"vlang",
"=",
"None",
"vdatatype",
"=",
"None",
"vformat",
"=",
"None",
"mtype",
"=",
"match",
".",
"group",
"(",
"'type'",
")",
"muserdefined",
"=",
"match",
".",
"group",
"(",
"'userdefined'",
")",
"if",
"mtype",
"in",
"[",
"'number'",
",",
"'literal'",
",",
"'string'",
"]",
":",
"vtype",
"=",
"mtype",
"elif",
"mtype",
"in",
"[",
"'iri'",
"]",
":",
"# TODO: proper form validation of input parameter uris",
"vtype",
"=",
"'string'",
"vformat",
"=",
"'iri'",
"elif",
"mtype",
":",
"vtype",
"=",
"'string'",
"if",
"mtype",
"in",
"static",
".",
"XSD_DATATYPES",
":",
"vdatatype",
"=",
"'xsd:{}'",
".",
"format",
"(",
"mtype",
")",
"elif",
"len",
"(",
"mtype",
")",
"==",
"2",
":",
"vlang",
"=",
"mtype",
"elif",
"muserdefined",
":",
"vdatatype",
"=",
"'{}:{}'",
".",
"format",
"(",
"mtype",
",",
"muserdefined",
")",
"parameters",
"[",
"vname",
"]",
"=",
"{",
"'original'",
":",
"'?{}'",
".",
"format",
"(",
"v",
")",
",",
"'required'",
":",
"vrequired",
",",
"'name'",
":",
"vname",
",",
"'type'",
":",
"vtype",
"}",
"# Possibly None parameter attributes",
"if",
"vcodes",
"is",
"not",
"None",
":",
"parameters",
"[",
"vname",
"]",
"[",
"'enum'",
"]",
"=",
"sorted",
"(",
"vcodes",
")",
"if",
"vlang",
"is",
"not",
"None",
":",
"parameters",
"[",
"vname",
"]",
"[",
"'lang'",
"]",
"=",
"vlang",
"if",
"vdatatype",
"is",
"not",
"None",
":",
"parameters",
"[",
"vname",
"]",
"[",
"'datatype'",
"]",
"=",
"vdatatype",
"if",
"vformat",
"is",
"not",
"None",
":",
"parameters",
"[",
"vname",
"]",
"[",
"'format'",
"]",
"=",
"vformat",
"if",
"vdefault",
"is",
"not",
"None",
":",
"parameters",
"[",
"vname",
"]",
"[",
"'default'",
"]",
"=",
"vdefault",
"glogger",
".",
"info",
"(",
"'Finished parsing the following parameters: {}'",
".",
"format",
"(",
"parameters",
")",
")",
"return",
"parameters"
] | ?_name The variable specifies the API mandatory parameter name. The value is incorporated in the query as plain literal.
?__name The parameter name is optional.
?_name_iri The variable is substituted with the parameter value as a IRI (also: number or literal).
?_name_en The parameter value is considered as literal with the language 'en' (e.g., en,it,es, etc.).
?_name_integer The parameter value is considered as literal and the XSD datatype 'integer' is added during substitution.
?_name_prefix_datatype The parameter value is considered as literal and the datatype 'prefix:datatype' is added during substitution. The prefix must be specified according to the SPARQL syntax. | [
"?_name",
"The",
"variable",
"specifies",
"the",
"API",
"mandatory",
"parameter",
"name",
".",
"The",
"value",
"is",
"incorporated",
"in",
"the",
"query",
"as",
"plain",
"literal",
".",
"?__name",
"The",
"parameter",
"name",
"is",
"optional",
".",
"?_name_iri",
"The",
"variable",
"is",
"substituted",
"with",
"the",
"parameter",
"value",
"as",
"a",
"IRI",
"(",
"also",
":",
"number",
"or",
"literal",
")",
".",
"?_name_en",
"The",
"parameter",
"value",
"is",
"considered",
"as",
"literal",
"with",
"the",
"language",
"en",
"(",
"e",
".",
"g",
".",
"en",
"it",
"es",
"etc",
".",
")",
".",
"?_name_integer",
"The",
"parameter",
"value",
"is",
"considered",
"as",
"literal",
"and",
"the",
"XSD",
"datatype",
"integer",
"is",
"added",
"during",
"substitution",
".",
"?_name_prefix_datatype",
"The",
"parameter",
"value",
"is",
"considered",
"as",
"literal",
"and",
"the",
"datatype",
"prefix",
":",
"datatype",
"is",
"added",
"during",
"substitution",
".",
"The",
"prefix",
"must",
"be",
"specified",
"according",
"to",
"the",
"SPARQL",
"syntax",
"."
] | python | train | 42.210526 |
inveniosoftware/invenio-records-ui | invenio_records_ui/views.py | https://github.com/inveniosoftware/invenio-records-ui/blob/ae92367978f2e1e96634685bd296f0fd92b4da54/invenio_records_ui/views.py#L48-L84 | def create_blueprint(endpoints):
"""Create Invenio-Records-UI blueprint.
The factory installs one URL route per endpoint defined, and adds an
error handler for rendering tombstones.
:param endpoints: Dictionary of endpoints to be installed. See usage
documentation for further details.
:returns: The initialized blueprint.
"""
blueprint = Blueprint(
'invenio_records_ui',
__name__,
url_prefix='',
template_folder='templates',
static_folder='static',
)
@blueprint.errorhandler(PIDDeletedError)
def tombstone_errorhandler(error):
return render_template(
current_app.config['RECORDS_UI_TOMBSTONE_TEMPLATE'],
pid=error.pid,
record=error.record or {},
), 410
@blueprint.context_processor
def inject_export_formats():
return dict(
export_formats=(
current_app.extensions['invenio-records-ui'].export_formats)
)
for endpoint, options in (endpoints or {}).items():
blueprint.add_url_rule(**create_url_rule(endpoint, **options))
return blueprint | [
"def",
"create_blueprint",
"(",
"endpoints",
")",
":",
"blueprint",
"=",
"Blueprint",
"(",
"'invenio_records_ui'",
",",
"__name__",
",",
"url_prefix",
"=",
"''",
",",
"template_folder",
"=",
"'templates'",
",",
"static_folder",
"=",
"'static'",
",",
")",
"@",
"blueprint",
".",
"errorhandler",
"(",
"PIDDeletedError",
")",
"def",
"tombstone_errorhandler",
"(",
"error",
")",
":",
"return",
"render_template",
"(",
"current_app",
".",
"config",
"[",
"'RECORDS_UI_TOMBSTONE_TEMPLATE'",
"]",
",",
"pid",
"=",
"error",
".",
"pid",
",",
"record",
"=",
"error",
".",
"record",
"or",
"{",
"}",
",",
")",
",",
"410",
"@",
"blueprint",
".",
"context_processor",
"def",
"inject_export_formats",
"(",
")",
":",
"return",
"dict",
"(",
"export_formats",
"=",
"(",
"current_app",
".",
"extensions",
"[",
"'invenio-records-ui'",
"]",
".",
"export_formats",
")",
")",
"for",
"endpoint",
",",
"options",
"in",
"(",
"endpoints",
"or",
"{",
"}",
")",
".",
"items",
"(",
")",
":",
"blueprint",
".",
"add_url_rule",
"(",
"*",
"*",
"create_url_rule",
"(",
"endpoint",
",",
"*",
"*",
"options",
")",
")",
"return",
"blueprint"
] | Create Invenio-Records-UI blueprint.
The factory installs one URL route per endpoint defined, and adds an
error handler for rendering tombstones.
:param endpoints: Dictionary of endpoints to be installed. See usage
documentation for further details.
:returns: The initialized blueprint. | [
"Create",
"Invenio",
"-",
"Records",
"-",
"UI",
"blueprint",
"."
] | python | test | 30.216216 |
nameko/nameko | nameko/rpc.py | https://github.com/nameko/nameko/blob/88d7e5211de4fcc1c34cd7f84d7c77f0619c5f5d/nameko/rpc.py#L70-L80 | def stop(self):
""" Stop the RpcConsumer.
The RpcConsumer ordinary unregisters from the QueueConsumer when the
last Rpc subclass unregisters from it. If no providers were registered,
we should unregister from the QueueConsumer as soon as we're asked
to stop.
"""
if not self._providers_registered:
self.queue_consumer.unregister_provider(self)
self._unregistered_from_queue_consumer.send(True) | [
"def",
"stop",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_providers_registered",
":",
"self",
".",
"queue_consumer",
".",
"unregister_provider",
"(",
"self",
")",
"self",
".",
"_unregistered_from_queue_consumer",
".",
"send",
"(",
"True",
")"
] | Stop the RpcConsumer.
The RpcConsumer ordinary unregisters from the QueueConsumer when the
last Rpc subclass unregisters from it. If no providers were registered,
we should unregister from the QueueConsumer as soon as we're asked
to stop. | [
"Stop",
"the",
"RpcConsumer",
"."
] | python | train | 42.181818 |
shmir/PyTrafficGenerator | trafficgenerator/tgn_object.py | https://github.com/shmir/PyTrafficGenerator/blob/382e5d549c83404af2a6571fe19c9e71df8bac14/trafficgenerator/tgn_object.py#L189-L199 | def get_object_or_child_by_type(self, *types):
""" Get object if child already been read or get child.
Use this method for fast access to objects in case of static configurations.
:param types: requested object types.
:return: all children of the specified types.
"""
objects = self.get_objects_or_children_by_type(*types)
return objects[0] if any(objects) else None | [
"def",
"get_object_or_child_by_type",
"(",
"self",
",",
"*",
"types",
")",
":",
"objects",
"=",
"self",
".",
"get_objects_or_children_by_type",
"(",
"*",
"types",
")",
"return",
"objects",
"[",
"0",
"]",
"if",
"any",
"(",
"objects",
")",
"else",
"None"
] | Get object if child already been read or get child.
Use this method for fast access to objects in case of static configurations.
:param types: requested object types.
:return: all children of the specified types. | [
"Get",
"object",
"if",
"child",
"already",
"been",
"read",
"or",
"get",
"child",
"."
] | python | train | 37.727273 |
sods/ods | pods/assesser.py | https://github.com/sods/ods/blob/3995c659f25a0a640f6009ed7fcc2559ce659b1d/pods/assesser.py#L289-L292 | def answer(part, module='mlai2014.json'):
"""Returns the answers to the lab classes."""
marks = json.load(open(os.path.join(data_directory, module), 'rb'))
return marks['Lab ' + str(part+1)] | [
"def",
"answer",
"(",
"part",
",",
"module",
"=",
"'mlai2014.json'",
")",
":",
"marks",
"=",
"json",
".",
"load",
"(",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"data_directory",
",",
"module",
")",
",",
"'rb'",
")",
")",
"return",
"marks",
"[",
"'Lab '",
"+",
"str",
"(",
"part",
"+",
"1",
")",
"]"
] | Returns the answers to the lab classes. | [
"Returns",
"the",
"answers",
"to",
"the",
"lab",
"classes",
"."
] | python | train | 50 |
geophysics-ubonn/reda | lib/reda/importers/legacy/eit160.py | https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/importers/legacy/eit160.py#L61-L145 | def import_medusa_data(mat_filename, config_file):
"""Import measurement data (a .mat file) of the FZJ EIT160 system. This
data format is identified as 'FZJ-EZ-2017'.
Parameters
----------
mat_filename: string
filename to the .mat data file. Note that only MNU0 single-potentials
are supported!
config_file: string
filename for configuration file. The configuration file contains N rows
with 4 columns each (a, b, m, n)
Returns
-------
"""
df_emd, df_md = _read_mat_mnu0(mat_filename)
# 'configs' can be a numpy array or a filename
if not isinstance(config_file, np.ndarray):
configs = np.loadtxt(config_file).astype(int)
else:
configs = config_file
# construct four-point measurements via superposition
print('constructing four-point measurements')
quadpole_list = []
if df_emd is not None:
index = 0
for Ar, Br, M, N in configs:
# print('constructing', Ar, Br, M, N)
# the order of A and B doesn't concern us
A = np.min((Ar, Br))
B = np.max((Ar, Br))
# first choice: correct ordering
query_M = df_emd.query('a=={0} and b=={1} and p=={2}'.format(
A, B, M
))
query_N = df_emd.query('a=={0} and b=={1} and p=={2}'.format(
A, B, N
))
if query_M.size == 0 or query_N.size == 0:
continue
index += 1
# keep these columns as they are (no subtracting)
keep_cols = [
'datetime',
'frequency',
'a', 'b',
'Zg1', 'Zg2', 'Zg3',
'Is',
'Il',
'Zg',
'Iab',
]
df4 = pd.DataFrame()
diff_cols = ['Zt', ]
df4[keep_cols] = query_M[keep_cols]
for col in diff_cols:
df4[col] = query_M[col].values - query_N[col].values
df4['m'] = query_M['p'].values
df4['n'] = query_N['p'].values
quadpole_list.append(df4)
if quadpole_list:
dfn = pd.concat(quadpole_list)
Rsign = np.sign(dfn['Zt'].real)
dfn['r'] = Rsign * np.abs(dfn['Zt'])
dfn['Vmn'] = dfn['r'] * dfn['Iab']
dfn['rpha'] = np.arctan2(
np.imag(dfn['Zt'].values),
np.real(dfn['Zt'].values)
) * 1e3
else:
dfn = pd.DataFrame()
return dfn, df_md | [
"def",
"import_medusa_data",
"(",
"mat_filename",
",",
"config_file",
")",
":",
"df_emd",
",",
"df_md",
"=",
"_read_mat_mnu0",
"(",
"mat_filename",
")",
"# 'configs' can be a numpy array or a filename",
"if",
"not",
"isinstance",
"(",
"config_file",
",",
"np",
".",
"ndarray",
")",
":",
"configs",
"=",
"np",
".",
"loadtxt",
"(",
"config_file",
")",
".",
"astype",
"(",
"int",
")",
"else",
":",
"configs",
"=",
"config_file",
"# construct four-point measurements via superposition",
"print",
"(",
"'constructing four-point measurements'",
")",
"quadpole_list",
"=",
"[",
"]",
"if",
"df_emd",
"is",
"not",
"None",
":",
"index",
"=",
"0",
"for",
"Ar",
",",
"Br",
",",
"M",
",",
"N",
"in",
"configs",
":",
"# print('constructing', Ar, Br, M, N)",
"# the order of A and B doesn't concern us",
"A",
"=",
"np",
".",
"min",
"(",
"(",
"Ar",
",",
"Br",
")",
")",
"B",
"=",
"np",
".",
"max",
"(",
"(",
"Ar",
",",
"Br",
")",
")",
"# first choice: correct ordering",
"query_M",
"=",
"df_emd",
".",
"query",
"(",
"'a=={0} and b=={1} and p=={2}'",
".",
"format",
"(",
"A",
",",
"B",
",",
"M",
")",
")",
"query_N",
"=",
"df_emd",
".",
"query",
"(",
"'a=={0} and b=={1} and p=={2}'",
".",
"format",
"(",
"A",
",",
"B",
",",
"N",
")",
")",
"if",
"query_M",
".",
"size",
"==",
"0",
"or",
"query_N",
".",
"size",
"==",
"0",
":",
"continue",
"index",
"+=",
"1",
"# keep these columns as they are (no subtracting)",
"keep_cols",
"=",
"[",
"'datetime'",
",",
"'frequency'",
",",
"'a'",
",",
"'b'",
",",
"'Zg1'",
",",
"'Zg2'",
",",
"'Zg3'",
",",
"'Is'",
",",
"'Il'",
",",
"'Zg'",
",",
"'Iab'",
",",
"]",
"df4",
"=",
"pd",
".",
"DataFrame",
"(",
")",
"diff_cols",
"=",
"[",
"'Zt'",
",",
"]",
"df4",
"[",
"keep_cols",
"]",
"=",
"query_M",
"[",
"keep_cols",
"]",
"for",
"col",
"in",
"diff_cols",
":",
"df4",
"[",
"col",
"]",
"=",
"query_M",
"[",
"col",
"]",
".",
"values",
"-",
"query_N",
"[",
"col",
"]",
".",
"values",
"df4",
"[",
"'m'",
"]",
"=",
"query_M",
"[",
"'p'",
"]",
".",
"values",
"df4",
"[",
"'n'",
"]",
"=",
"query_N",
"[",
"'p'",
"]",
".",
"values",
"quadpole_list",
".",
"append",
"(",
"df4",
")",
"if",
"quadpole_list",
":",
"dfn",
"=",
"pd",
".",
"concat",
"(",
"quadpole_list",
")",
"Rsign",
"=",
"np",
".",
"sign",
"(",
"dfn",
"[",
"'Zt'",
"]",
".",
"real",
")",
"dfn",
"[",
"'r'",
"]",
"=",
"Rsign",
"*",
"np",
".",
"abs",
"(",
"dfn",
"[",
"'Zt'",
"]",
")",
"dfn",
"[",
"'Vmn'",
"]",
"=",
"dfn",
"[",
"'r'",
"]",
"*",
"dfn",
"[",
"'Iab'",
"]",
"dfn",
"[",
"'rpha'",
"]",
"=",
"np",
".",
"arctan2",
"(",
"np",
".",
"imag",
"(",
"dfn",
"[",
"'Zt'",
"]",
".",
"values",
")",
",",
"np",
".",
"real",
"(",
"dfn",
"[",
"'Zt'",
"]",
".",
"values",
")",
")",
"*",
"1e3",
"else",
":",
"dfn",
"=",
"pd",
".",
"DataFrame",
"(",
")",
"return",
"dfn",
",",
"df_md"
] | Import measurement data (a .mat file) of the FZJ EIT160 system. This
data format is identified as 'FZJ-EZ-2017'.
Parameters
----------
mat_filename: string
filename to the .mat data file. Note that only MNU0 single-potentials
are supported!
config_file: string
filename for configuration file. The configuration file contains N rows
with 4 columns each (a, b, m, n)
Returns
------- | [
"Import",
"measurement",
"data",
"(",
"a",
".",
"mat",
"file",
")",
"of",
"the",
"FZJ",
"EIT160",
"system",
".",
"This",
"data",
"format",
"is",
"identified",
"as",
"FZJ",
"-",
"EZ",
"-",
"2017",
"."
] | python | train | 29 |
mozilla/moz-sql-parser | moz_sql_parser/formatting.py | https://github.com/mozilla/moz-sql-parser/blob/35fcc69b8f73b48e1fd48025cae1e174d57c3921/moz_sql_parser/formatting.py#L39-L51 | def escape(identifier, ansi_quotes, should_quote):
"""
Escape identifiers.
ANSI uses single quotes, but many databases use back quotes.
"""
if not should_quote(identifier):
return identifier
quote = '"' if ansi_quotes else '`'
identifier = identifier.replace(quote, 2*quote)
return '{0}{1}{2}'.format(quote, identifier, quote) | [
"def",
"escape",
"(",
"identifier",
",",
"ansi_quotes",
",",
"should_quote",
")",
":",
"if",
"not",
"should_quote",
"(",
"identifier",
")",
":",
"return",
"identifier",
"quote",
"=",
"'\"'",
"if",
"ansi_quotes",
"else",
"'`'",
"identifier",
"=",
"identifier",
".",
"replace",
"(",
"quote",
",",
"2",
"*",
"quote",
")",
"return",
"'{0}{1}{2}'",
".",
"format",
"(",
"quote",
",",
"identifier",
",",
"quote",
")"
] | Escape identifiers.
ANSI uses single quotes, but many databases use back quotes. | [
"Escape",
"identifiers",
"."
] | python | train | 27.461538 |
wummel/linkchecker | linkcheck/network/iputil.py | https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/network/iputil.py#L104-L111 | def is_valid_ipv4 (ip):
"""
Return True if given ip is a valid IPv4 address.
"""
if not _ipv4_re.match(ip):
return False
a, b, c, d = [int(i) for i in ip.split(".")]
return a <= 255 and b <= 255 and c <= 255 and d <= 255 | [
"def",
"is_valid_ipv4",
"(",
"ip",
")",
":",
"if",
"not",
"_ipv4_re",
".",
"match",
"(",
"ip",
")",
":",
"return",
"False",
"a",
",",
"b",
",",
"c",
",",
"d",
"=",
"[",
"int",
"(",
"i",
")",
"for",
"i",
"in",
"ip",
".",
"split",
"(",
"\".\"",
")",
"]",
"return",
"a",
"<=",
"255",
"and",
"b",
"<=",
"255",
"and",
"c",
"<=",
"255",
"and",
"d",
"<=",
"255"
] | Return True if given ip is a valid IPv4 address. | [
"Return",
"True",
"if",
"given",
"ip",
"is",
"a",
"valid",
"IPv4",
"address",
"."
] | python | train | 30.625 |
Miserlou/Zappa | zappa/core.py | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/core.py#L2261-L2282 | def get_api_id(self, lambda_name):
"""
Given a lambda_name, return the API id.
"""
try:
response = self.cf_client.describe_stack_resource(StackName=lambda_name,
LogicalResourceId='Api')
return response['StackResourceDetail'].get('PhysicalResourceId', None)
except: # pragma: no cover
try:
# Try the old method (project was probably made on an older, non CF version)
response = self.apigateway_client.get_rest_apis(limit=500)
for item in response['items']:
if item['name'] == lambda_name:
return item['id']
logger.exception('Could not get API ID.')
return None
except: # pragma: no cover
# We don't even have an API deployed. That's okay!
return None | [
"def",
"get_api_id",
"(",
"self",
",",
"lambda_name",
")",
":",
"try",
":",
"response",
"=",
"self",
".",
"cf_client",
".",
"describe_stack_resource",
"(",
"StackName",
"=",
"lambda_name",
",",
"LogicalResourceId",
"=",
"'Api'",
")",
"return",
"response",
"[",
"'StackResourceDetail'",
"]",
".",
"get",
"(",
"'PhysicalResourceId'",
",",
"None",
")",
"except",
":",
"# pragma: no cover",
"try",
":",
"# Try the old method (project was probably made on an older, non CF version)",
"response",
"=",
"self",
".",
"apigateway_client",
".",
"get_rest_apis",
"(",
"limit",
"=",
"500",
")",
"for",
"item",
"in",
"response",
"[",
"'items'",
"]",
":",
"if",
"item",
"[",
"'name'",
"]",
"==",
"lambda_name",
":",
"return",
"item",
"[",
"'id'",
"]",
"logger",
".",
"exception",
"(",
"'Could not get API ID.'",
")",
"return",
"None",
"except",
":",
"# pragma: no cover",
"# We don't even have an API deployed. That's okay!",
"return",
"None"
] | Given a lambda_name, return the API id. | [
"Given",
"a",
"lambda_name",
"return",
"the",
"API",
"id",
"."
] | python | train | 42.545455 |
StorjOld/heartbeat | heartbeat/Merkle/Merkle.py | https://github.com/StorjOld/heartbeat/blob/4d54f2011f1e9f688073d4347bc51bb7bd682718/heartbeat/Merkle/Merkle.py#L194-L207 | def fromdict(dict):
"""Takes a dictionary as an argument and returns a new State object
from the dictionary.
:param dict: the dictionary to convert
"""
index = dict['index']
seed = hb_decode(dict['seed'])
n = dict['n']
root = hb_decode(dict['root'])
hmac = hb_decode(dict['hmac'])
timestamp = dict['timestamp']
self = State(index, seed, n, root, hmac, timestamp)
return self | [
"def",
"fromdict",
"(",
"dict",
")",
":",
"index",
"=",
"dict",
"[",
"'index'",
"]",
"seed",
"=",
"hb_decode",
"(",
"dict",
"[",
"'seed'",
"]",
")",
"n",
"=",
"dict",
"[",
"'n'",
"]",
"root",
"=",
"hb_decode",
"(",
"dict",
"[",
"'root'",
"]",
")",
"hmac",
"=",
"hb_decode",
"(",
"dict",
"[",
"'hmac'",
"]",
")",
"timestamp",
"=",
"dict",
"[",
"'timestamp'",
"]",
"self",
"=",
"State",
"(",
"index",
",",
"seed",
",",
"n",
",",
"root",
",",
"hmac",
",",
"timestamp",
")",
"return",
"self"
] | Takes a dictionary as an argument and returns a new State object
from the dictionary.
:param dict: the dictionary to convert | [
"Takes",
"a",
"dictionary",
"as",
"an",
"argument",
"and",
"returns",
"a",
"new",
"State",
"object",
"from",
"the",
"dictionary",
"."
] | python | train | 32.714286 |
SteveMcGrath/pySecurityCenter | securitycenter/sc4.py | https://github.com/SteveMcGrath/pySecurityCenter/blob/f0b10b1bcd4fd23a8d4d09ca6774cdf5e1cfd880/securitycenter/sc4.py#L951-L968 | def group_add(self, name, restrict, repos, lces=[], assets=[], queries=[],
policies=[], dashboards=[], credentials=[], description=''):
'''group_add name, restrict, repos
'''
return self.raw_query('group', 'add', data={
'lces': [{'id': i} for i in lces],
'assets': [{'id': i} for i in assets],
'queries': [{'id': i} for i in queries],
'policies': [{'id': i} for i in policies],
'dashboardTabs': [{'id': i} for i in dashboards],
'credentials': [{'id': i} for i in credentials],
'repositories': [{'id': i} for i in repos],
'definingAssets': [{'id': i} for i in restrict],
'name': name,
'description': description,
'users': [],
'context': ''
}) | [
"def",
"group_add",
"(",
"self",
",",
"name",
",",
"restrict",
",",
"repos",
",",
"lces",
"=",
"[",
"]",
",",
"assets",
"=",
"[",
"]",
",",
"queries",
"=",
"[",
"]",
",",
"policies",
"=",
"[",
"]",
",",
"dashboards",
"=",
"[",
"]",
",",
"credentials",
"=",
"[",
"]",
",",
"description",
"=",
"''",
")",
":",
"return",
"self",
".",
"raw_query",
"(",
"'group'",
",",
"'add'",
",",
"data",
"=",
"{",
"'lces'",
":",
"[",
"{",
"'id'",
":",
"i",
"}",
"for",
"i",
"in",
"lces",
"]",
",",
"'assets'",
":",
"[",
"{",
"'id'",
":",
"i",
"}",
"for",
"i",
"in",
"assets",
"]",
",",
"'queries'",
":",
"[",
"{",
"'id'",
":",
"i",
"}",
"for",
"i",
"in",
"queries",
"]",
",",
"'policies'",
":",
"[",
"{",
"'id'",
":",
"i",
"}",
"for",
"i",
"in",
"policies",
"]",
",",
"'dashboardTabs'",
":",
"[",
"{",
"'id'",
":",
"i",
"}",
"for",
"i",
"in",
"dashboards",
"]",
",",
"'credentials'",
":",
"[",
"{",
"'id'",
":",
"i",
"}",
"for",
"i",
"in",
"credentials",
"]",
",",
"'repositories'",
":",
"[",
"{",
"'id'",
":",
"i",
"}",
"for",
"i",
"in",
"repos",
"]",
",",
"'definingAssets'",
":",
"[",
"{",
"'id'",
":",
"i",
"}",
"for",
"i",
"in",
"restrict",
"]",
",",
"'name'",
":",
"name",
",",
"'description'",
":",
"description",
",",
"'users'",
":",
"[",
"]",
",",
"'context'",
":",
"''",
"}",
")"
] | group_add name, restrict, repos | [
"group_add",
"name",
"restrict",
"repos"
] | python | train | 45.444444 |
RedHatInsights/insights-core | insights/client/auto_config.py | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/auto_config.py#L27-L50 | def verify_connectivity(config):
"""
Verify connectivity to satellite server
"""
logger.debug("Verifying Connectivity")
ic = InsightsConnection(config)
try:
branch_info = ic.get_branch_info()
except requests.ConnectionError as e:
logger.debug(e)
logger.debug("Failed to connect to satellite")
return False
except LookupError as e:
logger.debug(e)
logger.debug("Failed to parse response from satellite")
return False
try:
remote_leaf = branch_info['remote_leaf']
return remote_leaf
except LookupError as e:
logger.debug(e)
logger.debug("Failed to find accurate branch_info")
return False | [
"def",
"verify_connectivity",
"(",
"config",
")",
":",
"logger",
".",
"debug",
"(",
"\"Verifying Connectivity\"",
")",
"ic",
"=",
"InsightsConnection",
"(",
"config",
")",
"try",
":",
"branch_info",
"=",
"ic",
".",
"get_branch_info",
"(",
")",
"except",
"requests",
".",
"ConnectionError",
"as",
"e",
":",
"logger",
".",
"debug",
"(",
"e",
")",
"logger",
".",
"debug",
"(",
"\"Failed to connect to satellite\"",
")",
"return",
"False",
"except",
"LookupError",
"as",
"e",
":",
"logger",
".",
"debug",
"(",
"e",
")",
"logger",
".",
"debug",
"(",
"\"Failed to parse response from satellite\"",
")",
"return",
"False",
"try",
":",
"remote_leaf",
"=",
"branch_info",
"[",
"'remote_leaf'",
"]",
"return",
"remote_leaf",
"except",
"LookupError",
"as",
"e",
":",
"logger",
".",
"debug",
"(",
"e",
")",
"logger",
".",
"debug",
"(",
"\"Failed to find accurate branch_info\"",
")",
"return",
"False"
] | Verify connectivity to satellite server | [
"Verify",
"connectivity",
"to",
"satellite",
"server"
] | python | train | 29.166667 |
astraw/stdeb | stdeb/transport.py | https://github.com/astraw/stdeb/blob/493ab88e8a60be053b1baef81fb39b45e17ceef5/stdeb/transport.py#L66-L88 | def parse_response(self, resp):
"""
Parse the xmlrpc response.
"""
p, u = self.getparser()
if hasattr(resp,'text'):
# modern requests will do this for us
text = resp.text # this is unicode(py2)/str(py3)
else:
encoding = requests.utils.get_encoding_from_headers(resp.headers)
if encoding is None:
encoding='utf-8' # FIXME: what to do here?
if sys.version_info[0]==2:
text = unicode(resp.content, encoding, errors='replace')
else:
assert sys.version_info[0]==3
text = str(resp.content, encoding, errors='replace')
p.feed(text)
p.close()
return u.close() | [
"def",
"parse_response",
"(",
"self",
",",
"resp",
")",
":",
"p",
",",
"u",
"=",
"self",
".",
"getparser",
"(",
")",
"if",
"hasattr",
"(",
"resp",
",",
"'text'",
")",
":",
"# modern requests will do this for us",
"text",
"=",
"resp",
".",
"text",
"# this is unicode(py2)/str(py3)",
"else",
":",
"encoding",
"=",
"requests",
".",
"utils",
".",
"get_encoding_from_headers",
"(",
"resp",
".",
"headers",
")",
"if",
"encoding",
"is",
"None",
":",
"encoding",
"=",
"'utf-8'",
"# FIXME: what to do here?",
"if",
"sys",
".",
"version_info",
"[",
"0",
"]",
"==",
"2",
":",
"text",
"=",
"unicode",
"(",
"resp",
".",
"content",
",",
"encoding",
",",
"errors",
"=",
"'replace'",
")",
"else",
":",
"assert",
"sys",
".",
"version_info",
"[",
"0",
"]",
"==",
"3",
"text",
"=",
"str",
"(",
"resp",
".",
"content",
",",
"encoding",
",",
"errors",
"=",
"'replace'",
")",
"p",
".",
"feed",
"(",
"text",
")",
"p",
".",
"close",
"(",
")",
"return",
"u",
".",
"close",
"(",
")"
] | Parse the xmlrpc response. | [
"Parse",
"the",
"xmlrpc",
"response",
"."
] | python | train | 32.173913 |
vburenin/xjpath | xjpath/xjpath.py | https://github.com/vburenin/xjpath/blob/98a19fd6e6d0bcdc5ecbd3651ffa8915f06d7d44/xjpath/xjpath.py#L267-L287 | def validate_path(xj_path):
"""Validates XJ path.
:param str xj_path: XJ Path
:raise: XJPathError if validation fails.
"""
if not isinstance(xj_path, str):
raise XJPathError('XJPath must be a string')
for path in split(xj_path, '.'):
if path == '*':
continue
if path.startswith('@'):
if path == '@first' or path == '@last':
continue
try:
int(path[1:])
except ValueError:
raise XJPathError('Array index must be either integer or '
'@first or @last') | [
"def",
"validate_path",
"(",
"xj_path",
")",
":",
"if",
"not",
"isinstance",
"(",
"xj_path",
",",
"str",
")",
":",
"raise",
"XJPathError",
"(",
"'XJPath must be a string'",
")",
"for",
"path",
"in",
"split",
"(",
"xj_path",
",",
"'.'",
")",
":",
"if",
"path",
"==",
"'*'",
":",
"continue",
"if",
"path",
".",
"startswith",
"(",
"'@'",
")",
":",
"if",
"path",
"==",
"'@first'",
"or",
"path",
"==",
"'@last'",
":",
"continue",
"try",
":",
"int",
"(",
"path",
"[",
"1",
":",
"]",
")",
"except",
"ValueError",
":",
"raise",
"XJPathError",
"(",
"'Array index must be either integer or '",
"'@first or @last'",
")"
] | Validates XJ path.
:param str xj_path: XJ Path
:raise: XJPathError if validation fails. | [
"Validates",
"XJ",
"path",
"."
] | python | train | 29 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/input_readers.py | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/input_readers.py#L1476-L1494 | def next(self):
"""Returns the next input from this input reader as (ZipInfo, opener) tuple.
Returns:
The next input from this input reader, in the form of a 2-tuple.
The first element of the tuple is a zipfile.ZipInfo object.
The second element of the tuple is a zero-argument function that, when
called, returns the complete body of the file.
"""
if not self._zip:
self._zip = zipfile.ZipFile(self._reader(self._blob_key))
# Get a list of entries, reversed so we can pop entries off in order
self._entries = self._zip.infolist()[self._start_index:self._end_index]
self._entries.reverse()
if not self._entries:
raise StopIteration()
entry = self._entries.pop()
self._start_index += 1
return (entry, lambda: self._read(entry)) | [
"def",
"next",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_zip",
":",
"self",
".",
"_zip",
"=",
"zipfile",
".",
"ZipFile",
"(",
"self",
".",
"_reader",
"(",
"self",
".",
"_blob_key",
")",
")",
"# Get a list of entries, reversed so we can pop entries off in order",
"self",
".",
"_entries",
"=",
"self",
".",
"_zip",
".",
"infolist",
"(",
")",
"[",
"self",
".",
"_start_index",
":",
"self",
".",
"_end_index",
"]",
"self",
".",
"_entries",
".",
"reverse",
"(",
")",
"if",
"not",
"self",
".",
"_entries",
":",
"raise",
"StopIteration",
"(",
")",
"entry",
"=",
"self",
".",
"_entries",
".",
"pop",
"(",
")",
"self",
".",
"_start_index",
"+=",
"1",
"return",
"(",
"entry",
",",
"lambda",
":",
"self",
".",
"_read",
"(",
"entry",
")",
")"
] | Returns the next input from this input reader as (ZipInfo, opener) tuple.
Returns:
The next input from this input reader, in the form of a 2-tuple.
The first element of the tuple is a zipfile.ZipInfo object.
The second element of the tuple is a zero-argument function that, when
called, returns the complete body of the file. | [
"Returns",
"the",
"next",
"input",
"from",
"this",
"input",
"reader",
"as",
"(",
"ZipInfo",
"opener",
")",
"tuple",
"."
] | python | train | 41.842105 |
wandb/client | wandb/vendor/prompt_toolkit/document.py | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/document.py#L680-L707 | def find_enclosing_bracket_left(self, left_ch, right_ch, start_pos=None):
"""
Find the left bracket enclosing current position. Return the relative
position to the cursor position.
When `start_pos` is given, don't look past the position.
"""
if self.current_char == left_ch:
return 0
if start_pos is None:
start_pos = 0
else:
start_pos = max(0, start_pos)
stack = 1
# Look backward.
for i in range(self.cursor_position - 1, start_pos - 1, -1):
c = self.text[i]
if c == right_ch:
stack += 1
elif c == left_ch:
stack -= 1
if stack == 0:
return i - self.cursor_position | [
"def",
"find_enclosing_bracket_left",
"(",
"self",
",",
"left_ch",
",",
"right_ch",
",",
"start_pos",
"=",
"None",
")",
":",
"if",
"self",
".",
"current_char",
"==",
"left_ch",
":",
"return",
"0",
"if",
"start_pos",
"is",
"None",
":",
"start_pos",
"=",
"0",
"else",
":",
"start_pos",
"=",
"max",
"(",
"0",
",",
"start_pos",
")",
"stack",
"=",
"1",
"# Look backward.",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"cursor_position",
"-",
"1",
",",
"start_pos",
"-",
"1",
",",
"-",
"1",
")",
":",
"c",
"=",
"self",
".",
"text",
"[",
"i",
"]",
"if",
"c",
"==",
"right_ch",
":",
"stack",
"+=",
"1",
"elif",
"c",
"==",
"left_ch",
":",
"stack",
"-=",
"1",
"if",
"stack",
"==",
"0",
":",
"return",
"i",
"-",
"self",
".",
"cursor_position"
] | Find the left bracket enclosing current position. Return the relative
position to the cursor position.
When `start_pos` is given, don't look past the position. | [
"Find",
"the",
"left",
"bracket",
"enclosing",
"current",
"position",
".",
"Return",
"the",
"relative",
"position",
"to",
"the",
"cursor",
"position",
"."
] | python | train | 27.321429 |
DataBiosphere/dsub | dsub/providers/google_base.py | https://github.com/DataBiosphere/dsub/blob/443ce31daa6023dc2fd65ef2051796e19d18d5a7/dsub/providers/google_base.py#L439-L466 | def cancel(batch_fn, cancel_fn, ops):
"""Cancel operations.
Args:
batch_fn: API-specific batch function.
cancel_fn: API-specific cancel function.
ops: A list of operations to cancel.
Returns:
A list of operations canceled and a list of error messages.
"""
# Canceling many operations one-by-one can be slow.
# The Pipelines API doesn't directly support a list of operations to cancel,
# but the requests can be performed in batch.
canceled_ops = []
error_messages = []
max_batch = 256
total_ops = len(ops)
for first_op in range(0, total_ops, max_batch):
batch_canceled, batch_messages = _cancel_batch(
batch_fn, cancel_fn, ops[first_op:first_op + max_batch])
canceled_ops.extend(batch_canceled)
error_messages.extend(batch_messages)
return canceled_ops, error_messages | [
"def",
"cancel",
"(",
"batch_fn",
",",
"cancel_fn",
",",
"ops",
")",
":",
"# Canceling many operations one-by-one can be slow.",
"# The Pipelines API doesn't directly support a list of operations to cancel,",
"# but the requests can be performed in batch.",
"canceled_ops",
"=",
"[",
"]",
"error_messages",
"=",
"[",
"]",
"max_batch",
"=",
"256",
"total_ops",
"=",
"len",
"(",
"ops",
")",
"for",
"first_op",
"in",
"range",
"(",
"0",
",",
"total_ops",
",",
"max_batch",
")",
":",
"batch_canceled",
",",
"batch_messages",
"=",
"_cancel_batch",
"(",
"batch_fn",
",",
"cancel_fn",
",",
"ops",
"[",
"first_op",
":",
"first_op",
"+",
"max_batch",
"]",
")",
"canceled_ops",
".",
"extend",
"(",
"batch_canceled",
")",
"error_messages",
".",
"extend",
"(",
"batch_messages",
")",
"return",
"canceled_ops",
",",
"error_messages"
] | Cancel operations.
Args:
batch_fn: API-specific batch function.
cancel_fn: API-specific cancel function.
ops: A list of operations to cancel.
Returns:
A list of operations canceled and a list of error messages. | [
"Cancel",
"operations",
"."
] | python | valid | 28.892857 |
chrisspen/burlap | burlap/vm.py | https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/vm.py#L548-L594 | def get_or_create(name=None, group=None, config=None, extra=0, verbose=0, backend_opts=None):
"""
Creates a virtual machine instance.
"""
require('vm_type', 'vm_group')
backend_opts = backend_opts or {}
verbose = int(verbose)
extra = int(extra)
if config:
config_fn = common.find_template(config)
config = yaml.load(open(config_fn))
env.update(config)
env.vm_type = (env.vm_type or '').lower()
assert env.vm_type, 'No VM type specified.'
group = group or env.vm_group
assert group, 'No VM group specified.'
ret = exists(name=name, group=group)
if not extra and ret:
if verbose:
print('VM %s:%s exists.' % (name, group))
return ret
today = datetime.date.today()
release = int('%i%02i%02i' % (today.year, today.month, today.day))
if not name:
existing_instances = list_instances(
group=group,
release=release,
verbose=verbose)
name = env.vm_name_template.format(index=len(existing_instances)+1)
if env.vm_type == EC2:
return get_or_create_ec2_instance(
name=name,
group=group,
release=release,
verbose=verbose,
backend_opts=backend_opts)
else:
raise NotImplementedError | [
"def",
"get_or_create",
"(",
"name",
"=",
"None",
",",
"group",
"=",
"None",
",",
"config",
"=",
"None",
",",
"extra",
"=",
"0",
",",
"verbose",
"=",
"0",
",",
"backend_opts",
"=",
"None",
")",
":",
"require",
"(",
"'vm_type'",
",",
"'vm_group'",
")",
"backend_opts",
"=",
"backend_opts",
"or",
"{",
"}",
"verbose",
"=",
"int",
"(",
"verbose",
")",
"extra",
"=",
"int",
"(",
"extra",
")",
"if",
"config",
":",
"config_fn",
"=",
"common",
".",
"find_template",
"(",
"config",
")",
"config",
"=",
"yaml",
".",
"load",
"(",
"open",
"(",
"config_fn",
")",
")",
"env",
".",
"update",
"(",
"config",
")",
"env",
".",
"vm_type",
"=",
"(",
"env",
".",
"vm_type",
"or",
"''",
")",
".",
"lower",
"(",
")",
"assert",
"env",
".",
"vm_type",
",",
"'No VM type specified.'",
"group",
"=",
"group",
"or",
"env",
".",
"vm_group",
"assert",
"group",
",",
"'No VM group specified.'",
"ret",
"=",
"exists",
"(",
"name",
"=",
"name",
",",
"group",
"=",
"group",
")",
"if",
"not",
"extra",
"and",
"ret",
":",
"if",
"verbose",
":",
"print",
"(",
"'VM %s:%s exists.'",
"%",
"(",
"name",
",",
"group",
")",
")",
"return",
"ret",
"today",
"=",
"datetime",
".",
"date",
".",
"today",
"(",
")",
"release",
"=",
"int",
"(",
"'%i%02i%02i'",
"%",
"(",
"today",
".",
"year",
",",
"today",
".",
"month",
",",
"today",
".",
"day",
")",
")",
"if",
"not",
"name",
":",
"existing_instances",
"=",
"list_instances",
"(",
"group",
"=",
"group",
",",
"release",
"=",
"release",
",",
"verbose",
"=",
"verbose",
")",
"name",
"=",
"env",
".",
"vm_name_template",
".",
"format",
"(",
"index",
"=",
"len",
"(",
"existing_instances",
")",
"+",
"1",
")",
"if",
"env",
".",
"vm_type",
"==",
"EC2",
":",
"return",
"get_or_create_ec2_instance",
"(",
"name",
"=",
"name",
",",
"group",
"=",
"group",
",",
"release",
"=",
"release",
",",
"verbose",
"=",
"verbose",
",",
"backend_opts",
"=",
"backend_opts",
")",
"else",
":",
"raise",
"NotImplementedError"
] | Creates a virtual machine instance. | [
"Creates",
"a",
"virtual",
"machine",
"instance",
"."
] | python | valid | 27.361702 |
CalebBell/thermo | thermo/chemical.py | https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/chemical.py#L2576-L2600 | def SG(self):
r'''Specific gravity of the chemical, [dimensionless].
For gas-phase conditions, this is calculated at 15.6 °C (60 °F) and 1
atm for the chemical and the reference fluid, air.
For liquid and solid phase conditions, this is calculated based on a
reference fluid of water at 4°C at 1 atm, but the with the liquid or
solid chemical's density at the currently specified conditions.
Examples
--------
>>> Chemical('MTBE').SG
0.7428160596603596
'''
phase = self.phase
if phase == 'l':
return self.SGl
elif phase == 's':
return self.SGs
elif phase == 'g':
return self.SGg
rho = self.rho
if rho is not None:
return SG(rho)
return None | [
"def",
"SG",
"(",
"self",
")",
":",
"phase",
"=",
"self",
".",
"phase",
"if",
"phase",
"==",
"'l'",
":",
"return",
"self",
".",
"SGl",
"elif",
"phase",
"==",
"'s'",
":",
"return",
"self",
".",
"SGs",
"elif",
"phase",
"==",
"'g'",
":",
"return",
"self",
".",
"SGg",
"rho",
"=",
"self",
".",
"rho",
"if",
"rho",
"is",
"not",
"None",
":",
"return",
"SG",
"(",
"rho",
")",
"return",
"None"
] | r'''Specific gravity of the chemical, [dimensionless].
For gas-phase conditions, this is calculated at 15.6 °C (60 °F) and 1
atm for the chemical and the reference fluid, air.
For liquid and solid phase conditions, this is calculated based on a
reference fluid of water at 4°C at 1 atm, but the with the liquid or
solid chemical's density at the currently specified conditions.
Examples
--------
>>> Chemical('MTBE').SG
0.7428160596603596 | [
"r",
"Specific",
"gravity",
"of",
"the",
"chemical",
"[",
"dimensionless",
"]",
".",
"For",
"gas",
"-",
"phase",
"conditions",
"this",
"is",
"calculated",
"at",
"15",
".",
"6",
"°C",
"(",
"60",
"°F",
")",
"and",
"1",
"atm",
"for",
"the",
"chemical",
"and",
"the",
"reference",
"fluid",
"air",
".",
"For",
"liquid",
"and",
"solid",
"phase",
"conditions",
"this",
"is",
"calculated",
"based",
"on",
"a",
"reference",
"fluid",
"of",
"water",
"at",
"4°C",
"at",
"1",
"atm",
"but",
"the",
"with",
"the",
"liquid",
"or",
"solid",
"chemical",
"s",
"density",
"at",
"the",
"currently",
"specified",
"conditions",
"."
] | python | valid | 32.92 |
Kane610/deconz | pydeconz/light.py | https://github.com/Kane610/deconz/blob/8a9498dbbc8c168d4a081173ad6c3b1e17fffdf6/pydeconz/light.py#L74-L88 | def xy(self):
"""CIE xy color space coordinates as array [x, y] of real values (0..1)."""
if self._xy != (None, None):
self._x, self._y = self._xy
if self._x is not None and self._y is not None:
x = self._x
if self._x > 1:
x = self._x / 65555
y = self._y
if self._y > 1:
y = self._y / 65555
return (x, y)
return None | [
"def",
"xy",
"(",
"self",
")",
":",
"if",
"self",
".",
"_xy",
"!=",
"(",
"None",
",",
"None",
")",
":",
"self",
".",
"_x",
",",
"self",
".",
"_y",
"=",
"self",
".",
"_xy",
"if",
"self",
".",
"_x",
"is",
"not",
"None",
"and",
"self",
".",
"_y",
"is",
"not",
"None",
":",
"x",
"=",
"self",
".",
"_x",
"if",
"self",
".",
"_x",
">",
"1",
":",
"x",
"=",
"self",
".",
"_x",
"/",
"65555",
"y",
"=",
"self",
".",
"_y",
"if",
"self",
".",
"_y",
">",
"1",
":",
"y",
"=",
"self",
".",
"_y",
"/",
"65555",
"return",
"(",
"x",
",",
"y",
")",
"return",
"None"
] | CIE xy color space coordinates as array [x, y] of real values (0..1). | [
"CIE",
"xy",
"color",
"space",
"coordinates",
"as",
"array",
"[",
"x",
"y",
"]",
"of",
"real",
"values",
"(",
"0",
"..",
"1",
")",
"."
] | python | train | 29.333333 |
Azure/azure-uamqp-python | uamqp/client.py | https://github.com/Azure/azure-uamqp-python/blob/b67e4fcaf2e8a337636947523570239c10a58ae2/uamqp/client.py#L894-L914 | def _client_run(self):
"""MessageReceiver Link is now open - start receiving messages.
Will return True if operation successful and client can remain open for
further work.
:rtype: bool
"""
self._connection.work()
now = self._counter.get_current_ms()
if self._last_activity_timestamp and not self._was_message_received:
# If no messages are coming through, back off a little to keep CPU use low.
time.sleep(0.05)
if self._timeout > 0:
timespan = now - self._last_activity_timestamp
if timespan >= self._timeout:
_logger.info("Timeout reached, closing receiver.")
self._shutdown = True
else:
self._last_activity_timestamp = now
self._was_message_received = False
return True | [
"def",
"_client_run",
"(",
"self",
")",
":",
"self",
".",
"_connection",
".",
"work",
"(",
")",
"now",
"=",
"self",
".",
"_counter",
".",
"get_current_ms",
"(",
")",
"if",
"self",
".",
"_last_activity_timestamp",
"and",
"not",
"self",
".",
"_was_message_received",
":",
"# If no messages are coming through, back off a little to keep CPU use low.",
"time",
".",
"sleep",
"(",
"0.05",
")",
"if",
"self",
".",
"_timeout",
">",
"0",
":",
"timespan",
"=",
"now",
"-",
"self",
".",
"_last_activity_timestamp",
"if",
"timespan",
">=",
"self",
".",
"_timeout",
":",
"_logger",
".",
"info",
"(",
"\"Timeout reached, closing receiver.\"",
")",
"self",
".",
"_shutdown",
"=",
"True",
"else",
":",
"self",
".",
"_last_activity_timestamp",
"=",
"now",
"self",
".",
"_was_message_received",
"=",
"False",
"return",
"True"
] | MessageReceiver Link is now open - start receiving messages.
Will return True if operation successful and client can remain open for
further work.
:rtype: bool | [
"MessageReceiver",
"Link",
"is",
"now",
"open",
"-",
"start",
"receiving",
"messages",
".",
"Will",
"return",
"True",
"if",
"operation",
"successful",
"and",
"client",
"can",
"remain",
"open",
"for",
"further",
"work",
"."
] | python | train | 41.047619 |
spyder-ide/spyder-kernels | spyder_kernels/customize/spydercustomize.py | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/customize/spydercustomize.py#L424-L433 | def user_return(self, frame, return_value):
"""This function is called when a return trap is set here."""
# This is useful when debugging in an active interpreter (otherwise,
# the debugger will stop before reaching the target file)
if self._wait_for_mainpyfile:
if (self.mainpyfile != self.canonic(frame.f_code.co_filename)
or frame.f_lineno<= 0):
return
self._wait_for_mainpyfile = 0
self._old_Pdb_user_return(frame, return_value) | [
"def",
"user_return",
"(",
"self",
",",
"frame",
",",
"return_value",
")",
":",
"# This is useful when debugging in an active interpreter (otherwise,",
"# the debugger will stop before reaching the target file)",
"if",
"self",
".",
"_wait_for_mainpyfile",
":",
"if",
"(",
"self",
".",
"mainpyfile",
"!=",
"self",
".",
"canonic",
"(",
"frame",
".",
"f_code",
".",
"co_filename",
")",
"or",
"frame",
".",
"f_lineno",
"<=",
"0",
")",
":",
"return",
"self",
".",
"_wait_for_mainpyfile",
"=",
"0",
"self",
".",
"_old_Pdb_user_return",
"(",
"frame",
",",
"return_value",
")"
] | This function is called when a return trap is set here. | [
"This",
"function",
"is",
"called",
"when",
"a",
"return",
"trap",
"is",
"set",
"here",
"."
] | python | train | 48.3 |
marvin-ai/marvin-python-toolbox | marvin_python_toolbox/common/data.py | https://github.com/marvin-ai/marvin-python-toolbox/blob/7c95cb2f9698b989150ab94c1285f3a9eaaba423/marvin_python_toolbox/common/data.py#L47-L65 | def get_data_path(cls):
"""
Read data path from the following sources in order of priority:
1. Environment variable
If not found raises an exception
:return: str - datapath
"""
marvin_path = os.environ.get(cls._key)
if not marvin_path:
raise InvalidConfigException('Data path not set!')
is_path_created = check_path(marvin_path, create=True)
if not is_path_created:
raise InvalidConfigException('Data path does not exist!')
return marvin_path | [
"def",
"get_data_path",
"(",
"cls",
")",
":",
"marvin_path",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"cls",
".",
"_key",
")",
"if",
"not",
"marvin_path",
":",
"raise",
"InvalidConfigException",
"(",
"'Data path not set!'",
")",
"is_path_created",
"=",
"check_path",
"(",
"marvin_path",
",",
"create",
"=",
"True",
")",
"if",
"not",
"is_path_created",
":",
"raise",
"InvalidConfigException",
"(",
"'Data path does not exist!'",
")",
"return",
"marvin_path"
] | Read data path from the following sources in order of priority:
1. Environment variable
If not found raises an exception
:return: str - datapath | [
"Read",
"data",
"path",
"from",
"the",
"following",
"sources",
"in",
"order",
"of",
"priority",
":"
] | python | train | 28.473684 |
meejah/txtorcon | txtorcon/socks.py | https://github.com/meejah/txtorcon/blob/14053b95adf0b4bd9dd9c317bece912a26578a93/txtorcon/socks.py#L184-L218 | def _parse_request_reply(self):
"waiting for a reply to our request"
# we need at least 6 bytes of data: 4 for the "header", such
# as it is, and 2 more if it's DOMAINNAME (for the size) or 4
# or 16 more if it's an IPv4/6 address reply. plus there's 2
# bytes on the end for the bound port.
if len(self._data) < 8:
return
msg = self._data[:4]
# not changing self._data yet, in case we've not got
# enough bytes so far.
(version, reply, _, typ) = struct.unpack('BBBB', msg)
if version != 5:
self.reply_error(SocksError(
"Expected version 5, got {}".format(version)))
return
if reply != self.SUCCEEDED:
self.reply_error(_create_socks_error(reply))
return
reply_dispatcher = {
self.REPLY_IPV4: self._parse_ipv4_reply,
self.REPLY_HOST: self._parse_domain_name_reply,
self.REPLY_IPV6: self._parse_ipv6_reply,
}
try:
method = reply_dispatcher[typ]
except KeyError:
self.reply_error(SocksError(
"Unexpected response type {}".format(typ)))
return
method() | [
"def",
"_parse_request_reply",
"(",
"self",
")",
":",
"# we need at least 6 bytes of data: 4 for the \"header\", such",
"# as it is, and 2 more if it's DOMAINNAME (for the size) or 4",
"# or 16 more if it's an IPv4/6 address reply. plus there's 2",
"# bytes on the end for the bound port.",
"if",
"len",
"(",
"self",
".",
"_data",
")",
"<",
"8",
":",
"return",
"msg",
"=",
"self",
".",
"_data",
"[",
":",
"4",
"]",
"# not changing self._data yet, in case we've not got",
"# enough bytes so far.",
"(",
"version",
",",
"reply",
",",
"_",
",",
"typ",
")",
"=",
"struct",
".",
"unpack",
"(",
"'BBBB'",
",",
"msg",
")",
"if",
"version",
"!=",
"5",
":",
"self",
".",
"reply_error",
"(",
"SocksError",
"(",
"\"Expected version 5, got {}\"",
".",
"format",
"(",
"version",
")",
")",
")",
"return",
"if",
"reply",
"!=",
"self",
".",
"SUCCEEDED",
":",
"self",
".",
"reply_error",
"(",
"_create_socks_error",
"(",
"reply",
")",
")",
"return",
"reply_dispatcher",
"=",
"{",
"self",
".",
"REPLY_IPV4",
":",
"self",
".",
"_parse_ipv4_reply",
",",
"self",
".",
"REPLY_HOST",
":",
"self",
".",
"_parse_domain_name_reply",
",",
"self",
".",
"REPLY_IPV6",
":",
"self",
".",
"_parse_ipv6_reply",
",",
"}",
"try",
":",
"method",
"=",
"reply_dispatcher",
"[",
"typ",
"]",
"except",
"KeyError",
":",
"self",
".",
"reply_error",
"(",
"SocksError",
"(",
"\"Unexpected response type {}\"",
".",
"format",
"(",
"typ",
")",
")",
")",
"return",
"method",
"(",
")"
] | waiting for a reply to our request | [
"waiting",
"for",
"a",
"reply",
"to",
"our",
"request"
] | python | train | 34.8 |
aiogram/aiogram | aiogram/utils/markdown.py | https://github.com/aiogram/aiogram/blob/2af930149ce2482547721e2c8755c10307295e48/aiogram/utils/markdown.py#L67-L75 | def bold(*content, sep=' '):
"""
Make bold text (Markdown)
:param content:
:param sep:
:return:
"""
return _md(_join(*content, sep=sep), symbols=MD_SYMBOLS[0]) | [
"def",
"bold",
"(",
"*",
"content",
",",
"sep",
"=",
"' '",
")",
":",
"return",
"_md",
"(",
"_join",
"(",
"*",
"content",
",",
"sep",
"=",
"sep",
")",
",",
"symbols",
"=",
"MD_SYMBOLS",
"[",
"0",
"]",
")"
] | Make bold text (Markdown)
:param content:
:param sep:
:return: | [
"Make",
"bold",
"text",
"(",
"Markdown",
")"
] | python | train | 20 |
ejeschke/ginga | ginga/cmap.py | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/cmap.py#L13279-L13285 | def matplotlib_to_ginga_cmap(cm, name=None):
"""Convert matplotlib colormap to Ginga's."""
if name is None:
name = cm.name
arr = cm(np.arange(0, min_cmap_len) / np.float(min_cmap_len - 1))
clst = arr[:, 0:3]
return ColorMap(name, clst) | [
"def",
"matplotlib_to_ginga_cmap",
"(",
"cm",
",",
"name",
"=",
"None",
")",
":",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"cm",
".",
"name",
"arr",
"=",
"cm",
"(",
"np",
".",
"arange",
"(",
"0",
",",
"min_cmap_len",
")",
"/",
"np",
".",
"float",
"(",
"min_cmap_len",
"-",
"1",
")",
")",
"clst",
"=",
"arr",
"[",
":",
",",
"0",
":",
"3",
"]",
"return",
"ColorMap",
"(",
"name",
",",
"clst",
")"
] | Convert matplotlib colormap to Ginga's. | [
"Convert",
"matplotlib",
"colormap",
"to",
"Ginga",
"s",
"."
] | python | train | 36.714286 |
slok/prometheus-python | examples/memory_cpu_usage_example.py | https://github.com/slok/prometheus-python/blob/51c6de3cdcd4e36eae6e1643b136f486b57a18cd/examples/memory_cpu_usage_example.py#L20-L50 | def gather_data(registry):
"""Gathers the metrics"""
# Get the host name of the machine
host = socket.gethostname()
# Create our collectors
ram_metric = Gauge("memory_usage_bytes", "Memory usage in bytes.",
{'host': host})
cpu_metric = Gauge("cpu_usage_percent", "CPU usage percent.",
{'host': host})
# register the metric collectors
registry.register(ram_metric)
registry.register(cpu_metric)
# Start gathering metrics every second
while True:
time.sleep(1)
# Add ram metrics
ram = psutil.virtual_memory()
swap = psutil.swap_memory()
ram_metric.set({'type': "virtual", }, ram.used)
ram_metric.set({'type': "virtual", 'status': "cached"}, ram.cached)
ram_metric.set({'type': "swap"}, swap.used)
# Add cpu metrics
for c, p in enumerate(psutil.cpu_percent(interval=1, percpu=True)):
cpu_metric.set({'core': c}, p) | [
"def",
"gather_data",
"(",
"registry",
")",
":",
"# Get the host name of the machine",
"host",
"=",
"socket",
".",
"gethostname",
"(",
")",
"# Create our collectors",
"ram_metric",
"=",
"Gauge",
"(",
"\"memory_usage_bytes\"",
",",
"\"Memory usage in bytes.\"",
",",
"{",
"'host'",
":",
"host",
"}",
")",
"cpu_metric",
"=",
"Gauge",
"(",
"\"cpu_usage_percent\"",
",",
"\"CPU usage percent.\"",
",",
"{",
"'host'",
":",
"host",
"}",
")",
"# register the metric collectors",
"registry",
".",
"register",
"(",
"ram_metric",
")",
"registry",
".",
"register",
"(",
"cpu_metric",
")",
"# Start gathering metrics every second",
"while",
"True",
":",
"time",
".",
"sleep",
"(",
"1",
")",
"# Add ram metrics",
"ram",
"=",
"psutil",
".",
"virtual_memory",
"(",
")",
"swap",
"=",
"psutil",
".",
"swap_memory",
"(",
")",
"ram_metric",
".",
"set",
"(",
"{",
"'type'",
":",
"\"virtual\"",
",",
"}",
",",
"ram",
".",
"used",
")",
"ram_metric",
".",
"set",
"(",
"{",
"'type'",
":",
"\"virtual\"",
",",
"'status'",
":",
"\"cached\"",
"}",
",",
"ram",
".",
"cached",
")",
"ram_metric",
".",
"set",
"(",
"{",
"'type'",
":",
"\"swap\"",
"}",
",",
"swap",
".",
"used",
")",
"# Add cpu metrics",
"for",
"c",
",",
"p",
"in",
"enumerate",
"(",
"psutil",
".",
"cpu_percent",
"(",
"interval",
"=",
"1",
",",
"percpu",
"=",
"True",
")",
")",
":",
"cpu_metric",
".",
"set",
"(",
"{",
"'core'",
":",
"c",
"}",
",",
"p",
")"
] | Gathers the metrics | [
"Gathers",
"the",
"metrics"
] | python | train | 31.032258 |
cs50/check50 | check50/flask.py | https://github.com/cs50/check50/blob/42c1f0c36baa6a24f69742d74551a9ea7a5ceb33/check50/flask.py#L90-L114 | def status(self, code=None):
"""Check status code in response returned by application.
If ``code`` is not None, assert that ``code`` is returned by application,
else simply return the status code.
:param code: ``code`` to assert that application returns
:type code: int
Example usage::
check50.flask.app("application.py").status(200)
status = check50.flask.app("application.py").get("/").status()
if status != 200:
raise check50.Failure(f"expected status code 200, but got {status}")
"""
if code is None:
return self.response.status_code
log(_("checking that status code {} is returned...").format(code))
if code != self.response.status_code:
raise Failure(_("expected status code {}, but got {}").format(
code, self.response.status_code))
return self | [
"def",
"status",
"(",
"self",
",",
"code",
"=",
"None",
")",
":",
"if",
"code",
"is",
"None",
":",
"return",
"self",
".",
"response",
".",
"status_code",
"log",
"(",
"_",
"(",
"\"checking that status code {} is returned...\"",
")",
".",
"format",
"(",
"code",
")",
")",
"if",
"code",
"!=",
"self",
".",
"response",
".",
"status_code",
":",
"raise",
"Failure",
"(",
"_",
"(",
"\"expected status code {}, but got {}\"",
")",
".",
"format",
"(",
"code",
",",
"self",
".",
"response",
".",
"status_code",
")",
")",
"return",
"self"
] | Check status code in response returned by application.
If ``code`` is not None, assert that ``code`` is returned by application,
else simply return the status code.
:param code: ``code`` to assert that application returns
:type code: int
Example usage::
check50.flask.app("application.py").status(200)
status = check50.flask.app("application.py").get("/").status()
if status != 200:
raise check50.Failure(f"expected status code 200, but got {status}") | [
"Check",
"status",
"code",
"in",
"response",
"returned",
"by",
"application",
".",
"If",
"code",
"is",
"not",
"None",
"assert",
"that",
"code",
"is",
"returned",
"by",
"application",
"else",
"simply",
"return",
"the",
"status",
"code",
"."
] | python | train | 36.52 |
brocade/pynos | pynos/versions/ver_6/ver_6_0_1/yang/brocade_interface_ext.py | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_interface_ext.py#L493-L510 | def get_ip_interface_output_interface_ip_address_ipv4(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_ip_interface = ET.Element("get_ip_interface")
config = get_ip_interface
output = ET.SubElement(get_ip_interface, "output")
interface = ET.SubElement(output, "interface")
interface_type_key = ET.SubElement(interface, "interface-type")
interface_type_key.text = kwargs.pop('interface_type')
interface_name_key = ET.SubElement(interface, "interface-name")
interface_name_key.text = kwargs.pop('interface_name')
ip_address = ET.SubElement(interface, "ip-address")
ipv4 = ET.SubElement(ip_address, "ipv4")
ipv4.text = kwargs.pop('ipv4')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"get_ip_interface_output_interface_ip_address_ipv4",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"get_ip_interface",
"=",
"ET",
".",
"Element",
"(",
"\"get_ip_interface\"",
")",
"config",
"=",
"get_ip_interface",
"output",
"=",
"ET",
".",
"SubElement",
"(",
"get_ip_interface",
",",
"\"output\"",
")",
"interface",
"=",
"ET",
".",
"SubElement",
"(",
"output",
",",
"\"interface\"",
")",
"interface_type_key",
"=",
"ET",
".",
"SubElement",
"(",
"interface",
",",
"\"interface-type\"",
")",
"interface_type_key",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'interface_type'",
")",
"interface_name_key",
"=",
"ET",
".",
"SubElement",
"(",
"interface",
",",
"\"interface-name\"",
")",
"interface_name_key",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'interface_name'",
")",
"ip_address",
"=",
"ET",
".",
"SubElement",
"(",
"interface",
",",
"\"ip-address\"",
")",
"ipv4",
"=",
"ET",
".",
"SubElement",
"(",
"ip_address",
",",
"\"ipv4\"",
")",
"ipv4",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'ipv4'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] | Auto Generated Code | [
"Auto",
"Generated",
"Code"
] | python | train | 47.166667 |
kislyuk/aegea | aegea/instance_ctl.py | https://github.com/kislyuk/aegea/blob/94957e9dba036eae3052e2662c208b259c08399a/aegea/instance_ctl.py#L40-L43 | def rename(args):
"""Supply two names: Existing instance name or ID, and new name to assign to the instance."""
old_name, new_name = args.names
add_tags(resources.ec2.Instance(resolve_instance_id(old_name)), Name=new_name, dry_run=args.dry_run) | [
"def",
"rename",
"(",
"args",
")",
":",
"old_name",
",",
"new_name",
"=",
"args",
".",
"names",
"add_tags",
"(",
"resources",
".",
"ec2",
".",
"Instance",
"(",
"resolve_instance_id",
"(",
"old_name",
")",
")",
",",
"Name",
"=",
"new_name",
",",
"dry_run",
"=",
"args",
".",
"dry_run",
")"
] | Supply two names: Existing instance name or ID, and new name to assign to the instance. | [
"Supply",
"two",
"names",
":",
"Existing",
"instance",
"name",
"or",
"ID",
"and",
"new",
"name",
"to",
"assign",
"to",
"the",
"instance",
"."
] | python | train | 63.25 |
nilp0inter/cpe | cpe/cpelang2_3.py | https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/cpelang2_3.py#L81-L114 | def _check_fact_ref_eval(cls, cpel_dom):
"""
Returns the result (True, False, Error) of performing the specified
check, unless the check isnt supported, in which case it returns
False. Error is a catch-all for all results other than True and
False.
:param string cpel_dom: XML infoset for the check_fact_ref element.
:returns: result of performing the specified check
:rtype: boolean or error
"""
CHECK_SYSTEM = "check-system"
CHECK_LOCATION = "check-location"
CHECK_ID = "check-id"
checksystemID = cpel_dom.getAttribute(CHECK_SYSTEM)
if (checksystemID == "http://oval.mitre.org/XMLSchema/ovaldefinitions-5"):
# Perform an OVAL check.
# First attribute is the URI of an OVAL definitions file.
# Second attribute is an OVAL definition ID.
return CPELanguage2_3._ovalcheck(cpel_dom.getAttribute(CHECK_LOCATION),
cpel_dom.getAttribute(CHECK_ID))
if (checksystemID == "http://scap.nist.gov/schema/ocil/2"):
# Perform an OCIL check.
# First attribute is the URI of an OCIL questionnaire file.
# Second attribute is OCIL questionnaire ID.
return CPELanguage2_3._ocilcheck(cpel_dom.getAttribute(CHECK_LOCATION),
cpel_dom.getAttribute(CHECK_ID))
# Can add additional check systems here, with each returning a
# True, False, or Error value
return False | [
"def",
"_check_fact_ref_eval",
"(",
"cls",
",",
"cpel_dom",
")",
":",
"CHECK_SYSTEM",
"=",
"\"check-system\"",
"CHECK_LOCATION",
"=",
"\"check-location\"",
"CHECK_ID",
"=",
"\"check-id\"",
"checksystemID",
"=",
"cpel_dom",
".",
"getAttribute",
"(",
"CHECK_SYSTEM",
")",
"if",
"(",
"checksystemID",
"==",
"\"http://oval.mitre.org/XMLSchema/ovaldefinitions-5\"",
")",
":",
"# Perform an OVAL check.",
"# First attribute is the URI of an OVAL definitions file.",
"# Second attribute is an OVAL definition ID.",
"return",
"CPELanguage2_3",
".",
"_ovalcheck",
"(",
"cpel_dom",
".",
"getAttribute",
"(",
"CHECK_LOCATION",
")",
",",
"cpel_dom",
".",
"getAttribute",
"(",
"CHECK_ID",
")",
")",
"if",
"(",
"checksystemID",
"==",
"\"http://scap.nist.gov/schema/ocil/2\"",
")",
":",
"# Perform an OCIL check.",
"# First attribute is the URI of an OCIL questionnaire file.",
"# Second attribute is OCIL questionnaire ID.",
"return",
"CPELanguage2_3",
".",
"_ocilcheck",
"(",
"cpel_dom",
".",
"getAttribute",
"(",
"CHECK_LOCATION",
")",
",",
"cpel_dom",
".",
"getAttribute",
"(",
"CHECK_ID",
")",
")",
"# Can add additional check systems here, with each returning a",
"# True, False, or Error value",
"return",
"False"
] | Returns the result (True, False, Error) of performing the specified
check, unless the check isnt supported, in which case it returns
False. Error is a catch-all for all results other than True and
False.
:param string cpel_dom: XML infoset for the check_fact_ref element.
:returns: result of performing the specified check
:rtype: boolean or error | [
"Returns",
"the",
"result",
"(",
"True",
"False",
"Error",
")",
"of",
"performing",
"the",
"specified",
"check",
"unless",
"the",
"check",
"isnt",
"supported",
"in",
"which",
"case",
"it",
"returns",
"False",
".",
"Error",
"is",
"a",
"catch",
"-",
"all",
"for",
"all",
"results",
"other",
"than",
"True",
"and",
"False",
"."
] | python | train | 45.470588 |
hyperledger/indy-plenum | common/serializers/msgpack_serializer.py | https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/common/serializers/msgpack_serializer.py#L31-L40 | def deserialize(self, data, fields=None):
"""
Deserializes msgpack bytes to OrderedDict (in the same sorted order as for serialize)
:param data: the data in bytes
:return: sorted OrderedDict
"""
# TODO: it can be that we returned data by `get_lines`, that is already deserialized
if not isinstance(data, (bytes, bytearray)):
return data
return msgpack.unpackb(data, encoding='utf-8', object_pairs_hook=decode_to_sorted) | [
"def",
"deserialize",
"(",
"self",
",",
"data",
",",
"fields",
"=",
"None",
")",
":",
"# TODO: it can be that we returned data by `get_lines`, that is already deserialized",
"if",
"not",
"isinstance",
"(",
"data",
",",
"(",
"bytes",
",",
"bytearray",
")",
")",
":",
"return",
"data",
"return",
"msgpack",
".",
"unpackb",
"(",
"data",
",",
"encoding",
"=",
"'utf-8'",
",",
"object_pairs_hook",
"=",
"decode_to_sorted",
")"
] | Deserializes msgpack bytes to OrderedDict (in the same sorted order as for serialize)
:param data: the data in bytes
:return: sorted OrderedDict | [
"Deserializes",
"msgpack",
"bytes",
"to",
"OrderedDict",
"(",
"in",
"the",
"same",
"sorted",
"order",
"as",
"for",
"serialize",
")",
":",
"param",
"data",
":",
"the",
"data",
"in",
"bytes",
":",
"return",
":",
"sorted",
"OrderedDict"
] | python | train | 48.6 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.