repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
LuqueDaniel/pybooru | pybooru/api_moebooru.py | https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/api_moebooru.py#L516-L526 | def pool_create(self, name, description, is_public):
"""Function to create a pool (Require login) (UNTESTED).
Parameters:
name (str): The name.
description (str): A description of the pool.
is_public (int): 1 or 0, whether or not the pool is public.
"""
params = {'pool[name]': name, 'pool[description]': description,
'pool[is_public]': is_public}
return self._get('pool/create', params, method='POST') | [
"def",
"pool_create",
"(",
"self",
",",
"name",
",",
"description",
",",
"is_public",
")",
":",
"params",
"=",
"{",
"'pool[name]'",
":",
"name",
",",
"'pool[description]'",
":",
"description",
",",
"'pool[is_public]'",
":",
"is_public",
"}",
"return",
"self",
".",
"_get",
"(",
"'pool/create'",
",",
"params",
",",
"method",
"=",
"'POST'",
")"
]
| Function to create a pool (Require login) (UNTESTED).
Parameters:
name (str): The name.
description (str): A description of the pool.
is_public (int): 1 or 0, whether or not the pool is public. | [
"Function",
"to",
"create",
"a",
"pool",
"(",
"Require",
"login",
")",
"(",
"UNTESTED",
")",
"."
]
| python | train |
tensorflow/datasets | tensorflow_datasets/translate/wmt.py | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/translate/wmt.py#L838-L858 | def _parse_tmx(path):
"""Generates examples from TMX file."""
def _get_tuv_lang(tuv):
for k, v in tuv.items():
if k.endswith("}lang"):
return v
raise AssertionError("Language not found in `tuv` attributes.")
def _get_tuv_seg(tuv):
segs = tuv.findall("seg")
assert len(segs) == 1, "Invalid number of segments: %d" % len(segs)
return segs[0].text
with tf.io.gfile.GFile(path) as f:
for _, elem in ElementTree.iterparse(f):
if elem.tag == "tu":
yield {
_get_tuv_lang(tuv):
_get_tuv_seg(tuv) for tuv in elem.iterfind("tuv")
}
elem.clear() | [
"def",
"_parse_tmx",
"(",
"path",
")",
":",
"def",
"_get_tuv_lang",
"(",
"tuv",
")",
":",
"for",
"k",
",",
"v",
"in",
"tuv",
".",
"items",
"(",
")",
":",
"if",
"k",
".",
"endswith",
"(",
"\"}lang\"",
")",
":",
"return",
"v",
"raise",
"AssertionError",
"(",
"\"Language not found in `tuv` attributes.\"",
")",
"def",
"_get_tuv_seg",
"(",
"tuv",
")",
":",
"segs",
"=",
"tuv",
".",
"findall",
"(",
"\"seg\"",
")",
"assert",
"len",
"(",
"segs",
")",
"==",
"1",
",",
"\"Invalid number of segments: %d\"",
"%",
"len",
"(",
"segs",
")",
"return",
"segs",
"[",
"0",
"]",
".",
"text",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"path",
")",
"as",
"f",
":",
"for",
"_",
",",
"elem",
"in",
"ElementTree",
".",
"iterparse",
"(",
"f",
")",
":",
"if",
"elem",
".",
"tag",
"==",
"\"tu\"",
":",
"yield",
"{",
"_get_tuv_lang",
"(",
"tuv",
")",
":",
"_get_tuv_seg",
"(",
"tuv",
")",
"for",
"tuv",
"in",
"elem",
".",
"iterfind",
"(",
"\"tuv\"",
")",
"}",
"elem",
".",
"clear",
"(",
")"
]
| Generates examples from TMX file. | [
"Generates",
"examples",
"from",
"TMX",
"file",
"."
]
| python | train |
dpkp/kafka-python | kafka/producer/kafka.py | https://github.com/dpkp/kafka-python/blob/f6a8a38937688ea2cc5dc13d3d1039493be5c9b5/kafka/producer/kafka.py#L612-L637 | def flush(self, timeout=None):
"""
Invoking this method makes all buffered records immediately available
to send (even if linger_ms is greater than 0) and blocks on the
completion of the requests associated with these records. The
post-condition of :meth:`~kafka.KafkaProducer.flush` is that any
previously sent record will have completed
(e.g. Future.is_done() == True). A request is considered completed when
either it is successfully acknowledged according to the 'acks'
configuration for the producer, or it results in an error.
Other threads can continue sending messages while one thread is blocked
waiting for a flush call to complete; however, no guarantee is made
about the completion of messages sent after the flush call begins.
Arguments:
timeout (float, optional): timeout in seconds to wait for completion.
Raises:
KafkaTimeoutError: failure to flush buffered records within the
provided timeout
"""
log.debug("Flushing accumulated records in producer.") # trace
self._accumulator.begin_flush()
self._sender.wakeup()
self._accumulator.await_flush_completion(timeout=timeout) | [
"def",
"flush",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"log",
".",
"debug",
"(",
"\"Flushing accumulated records in producer.\"",
")",
"# trace",
"self",
".",
"_accumulator",
".",
"begin_flush",
"(",
")",
"self",
".",
"_sender",
".",
"wakeup",
"(",
")",
"self",
".",
"_accumulator",
".",
"await_flush_completion",
"(",
"timeout",
"=",
"timeout",
")"
]
| Invoking this method makes all buffered records immediately available
to send (even if linger_ms is greater than 0) and blocks on the
completion of the requests associated with these records. The
post-condition of :meth:`~kafka.KafkaProducer.flush` is that any
previously sent record will have completed
(e.g. Future.is_done() == True). A request is considered completed when
either it is successfully acknowledged according to the 'acks'
configuration for the producer, or it results in an error.
Other threads can continue sending messages while one thread is blocked
waiting for a flush call to complete; however, no guarantee is made
about the completion of messages sent after the flush call begins.
Arguments:
timeout (float, optional): timeout in seconds to wait for completion.
Raises:
KafkaTimeoutError: failure to flush buffered records within the
provided timeout | [
"Invoking",
"this",
"method",
"makes",
"all",
"buffered",
"records",
"immediately",
"available",
"to",
"send",
"(",
"even",
"if",
"linger_ms",
"is",
"greater",
"than",
"0",
")",
"and",
"blocks",
"on",
"the",
"completion",
"of",
"the",
"requests",
"associated",
"with",
"these",
"records",
".",
"The",
"post",
"-",
"condition",
"of",
":",
"meth",
":",
"~kafka",
".",
"KafkaProducer",
".",
"flush",
"is",
"that",
"any",
"previously",
"sent",
"record",
"will",
"have",
"completed",
"(",
"e",
".",
"g",
".",
"Future",
".",
"is_done",
"()",
"==",
"True",
")",
".",
"A",
"request",
"is",
"considered",
"completed",
"when",
"either",
"it",
"is",
"successfully",
"acknowledged",
"according",
"to",
"the",
"acks",
"configuration",
"for",
"the",
"producer",
"or",
"it",
"results",
"in",
"an",
"error",
"."
]
| python | train |
CenturyLinkCloud/clc-python-sdk | src/clc/APIv1/queue.py | https://github.com/CenturyLinkCloud/clc-python-sdk/blob/f4dba40c627cb08dd4b7d0d277e8d67578010b05/src/clc/APIv1/queue.py#L18-L26 | def List(type='All'):
"""List of Queued requests and their current status details.
https://t3n.zendesk.com/entries/20350251-List-Queue-Requests
:param type: list items in the queue filtered by status (All, Pending, Complete, Error)
"""
r = clc.v1.API.Call('post','Queue/ListQueueRequests',{'ItemStatusType': Queue.item_status_type_map[type] })
if int(r['StatusCode']) == 0: return(r['Requests']) | [
"def",
"List",
"(",
"type",
"=",
"'All'",
")",
":",
"r",
"=",
"clc",
".",
"v1",
".",
"API",
".",
"Call",
"(",
"'post'",
",",
"'Queue/ListQueueRequests'",
",",
"{",
"'ItemStatusType'",
":",
"Queue",
".",
"item_status_type_map",
"[",
"type",
"]",
"}",
")",
"if",
"int",
"(",
"r",
"[",
"'StatusCode'",
"]",
")",
"==",
"0",
":",
"return",
"(",
"r",
"[",
"'Requests'",
"]",
")"
]
| List of Queued requests and their current status details.
https://t3n.zendesk.com/entries/20350251-List-Queue-Requests
:param type: list items in the queue filtered by status (All, Pending, Complete, Error) | [
"List",
"of",
"Queued",
"requests",
"and",
"their",
"current",
"status",
"details",
"."
]
| python | train |
project-ncl/pnc-cli | pnc_cli/buildconfigurations.py | https://github.com/project-ncl/pnc-cli/blob/3dc149bf84928f60a8044ac50b58bbaddd451902/pnc_cli/buildconfigurations.py#L367-L373 | def add_dependency(id=None, name=None, dependency_id=None, dependency_name=None):
"""
Add an existing BuildConfiguration as a dependency to another BuildConfiguration.
"""
data = add_dependency_raw(id, name, dependency_id, dependency_name)
if data:
return utils.format_json_list(data) | [
"def",
"add_dependency",
"(",
"id",
"=",
"None",
",",
"name",
"=",
"None",
",",
"dependency_id",
"=",
"None",
",",
"dependency_name",
"=",
"None",
")",
":",
"data",
"=",
"add_dependency_raw",
"(",
"id",
",",
"name",
",",
"dependency_id",
",",
"dependency_name",
")",
"if",
"data",
":",
"return",
"utils",
".",
"format_json_list",
"(",
"data",
")"
]
| Add an existing BuildConfiguration as a dependency to another BuildConfiguration. | [
"Add",
"an",
"existing",
"BuildConfiguration",
"as",
"a",
"dependency",
"to",
"another",
"BuildConfiguration",
"."
]
| python | train |
arista-eosplus/pyeapi | pyeapi/api/vrrp.py | https://github.com/arista-eosplus/pyeapi/blob/96a74faef1fe3bd79c4e900aed29c9956a0587d6/pyeapi/api/vrrp.py#L354-L379 | def create(self, interface, vrid, **kwargs):
"""Creates a vrrp instance from an interface
Note:
This method will attempt to create a vrrp in the node's
operational config. If the vrrp already exists on the
interface, then this method will set the properties of
the existing vrrp to those that have been passed in, if
possible.
Args:
interface (string): The interface to configure.
vrid (integer): The vrid number for the vrrp to be created.
kwargs (dict): A dictionary specifying the properties to
be applied to the new vrrp instance. See library
documentation for available keys and values.
Returns:
True if the vrrp could be created otherwise False (see Node)
"""
if 'enable' not in kwargs:
kwargs['enable'] = False
return self._vrrp_set(interface, vrid, **kwargs) | [
"def",
"create",
"(",
"self",
",",
"interface",
",",
"vrid",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"'enable'",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"'enable'",
"]",
"=",
"False",
"return",
"self",
".",
"_vrrp_set",
"(",
"interface",
",",
"vrid",
",",
"*",
"*",
"kwargs",
")"
]
| Creates a vrrp instance from an interface
Note:
This method will attempt to create a vrrp in the node's
operational config. If the vrrp already exists on the
interface, then this method will set the properties of
the existing vrrp to those that have been passed in, if
possible.
Args:
interface (string): The interface to configure.
vrid (integer): The vrid number for the vrrp to be created.
kwargs (dict): A dictionary specifying the properties to
be applied to the new vrrp instance. See library
documentation for available keys and values.
Returns:
True if the vrrp could be created otherwise False (see Node) | [
"Creates",
"a",
"vrrp",
"instance",
"from",
"an",
"interface"
]
| python | train |
Brazelton-Lab/bio_utils | bio_utils/iterators/b6.py | https://github.com/Brazelton-Lab/bio_utils/blob/5a7ddf13ee0bf4baaaeb6b2b99e01bf74aa132b7/bio_utils/iterators/b6.py#L97-L125 | def write(self, default: bool=False):
"""Restore B6/M8 entry to original format
Args:
default (bool): output entry in default BLAST+ B6 format
Returns:
str: properly formatted string containing the B6/M8 entry
"""
none_type = type(None)
if default: # Default order of format specifiers
ordered_vals = ['query', 'subject', 'identity', 'length',
'mismatches', 'gaps', 'query_start', 'query_end',
'subject_start', 'subject_end', 'evalue',
'bitscore']
else: # Original order of B6 entry format specifiers
try:
ordered_vals = [self.custom_fs[i] if i in self.custom_fs
else getattr(self, i) for i in self.fs_order]
except TypeError:
ordered_vals = [getattr(self, i) for i in self.fs_order]
# Format entry for writing
fstr = "\t".join(['-' if type(i) == none_type else str(i) for i in
ordered_vals])
return '{}{}'.format(fstr, os.linesep) | [
"def",
"write",
"(",
"self",
",",
"default",
":",
"bool",
"=",
"False",
")",
":",
"none_type",
"=",
"type",
"(",
"None",
")",
"if",
"default",
":",
"# Default order of format specifiers",
"ordered_vals",
"=",
"[",
"'query'",
",",
"'subject'",
",",
"'identity'",
",",
"'length'",
",",
"'mismatches'",
",",
"'gaps'",
",",
"'query_start'",
",",
"'query_end'",
",",
"'subject_start'",
",",
"'subject_end'",
",",
"'evalue'",
",",
"'bitscore'",
"]",
"else",
":",
"# Original order of B6 entry format specifiers",
"try",
":",
"ordered_vals",
"=",
"[",
"self",
".",
"custom_fs",
"[",
"i",
"]",
"if",
"i",
"in",
"self",
".",
"custom_fs",
"else",
"getattr",
"(",
"self",
",",
"i",
")",
"for",
"i",
"in",
"self",
".",
"fs_order",
"]",
"except",
"TypeError",
":",
"ordered_vals",
"=",
"[",
"getattr",
"(",
"self",
",",
"i",
")",
"for",
"i",
"in",
"self",
".",
"fs_order",
"]",
"# Format entry for writing",
"fstr",
"=",
"\"\\t\"",
".",
"join",
"(",
"[",
"'-'",
"if",
"type",
"(",
"i",
")",
"==",
"none_type",
"else",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"ordered_vals",
"]",
")",
"return",
"'{}{}'",
".",
"format",
"(",
"fstr",
",",
"os",
".",
"linesep",
")"
]
| Restore B6/M8 entry to original format
Args:
default (bool): output entry in default BLAST+ B6 format
Returns:
str: properly formatted string containing the B6/M8 entry | [
"Restore",
"B6",
"/",
"M8",
"entry",
"to",
"original",
"format"
]
| python | train |
housecanary/hc-api-python | housecanary/apiclient.py | https://github.com/housecanary/hc-api-python/blob/2bb9e2208b34e8617575de45934357ee33b8531c/housecanary/apiclient.py#L218-L250 | def _convert_to_identifier_json(self, address_data):
"""Convert input address data into json format"""
if isinstance(address_data, str):
# allow just passing a slug string.
return {"slug": address_data}
if isinstance(address_data, tuple) and len(address_data) > 0:
address_json = {"address": address_data[0]}
if len(address_data) > 1:
address_json["zipcode"] = address_data[1]
if len(address_data) > 2:
address_json["meta"] = address_data[2]
return address_json
if isinstance(address_data, dict):
allowed_keys = ["address", "zipcode", "unit", "city", "state", "slug", "meta",
"client_value", "client_value_sqft"]
# ensure the dict does not contain any unallowed keys
for key in address_data:
if key not in allowed_keys:
msg = "Key in address input not allowed: " + key
raise housecanary.exceptions.InvalidInputException(msg)
# ensure it contains an "address" key
if "address" in address_data or "slug" in address_data:
return address_data
# if we made it here, the input was not valid.
msg = ("Input is invalid. Must be a list of (address, zipcode) tuples, or a dict or list"
" of dicts with each item containing at least an 'address' or 'slug' key.")
raise housecanary.exceptions.InvalidInputException((msg)) | [
"def",
"_convert_to_identifier_json",
"(",
"self",
",",
"address_data",
")",
":",
"if",
"isinstance",
"(",
"address_data",
",",
"str",
")",
":",
"# allow just passing a slug string.",
"return",
"{",
"\"slug\"",
":",
"address_data",
"}",
"if",
"isinstance",
"(",
"address_data",
",",
"tuple",
")",
"and",
"len",
"(",
"address_data",
")",
">",
"0",
":",
"address_json",
"=",
"{",
"\"address\"",
":",
"address_data",
"[",
"0",
"]",
"}",
"if",
"len",
"(",
"address_data",
")",
">",
"1",
":",
"address_json",
"[",
"\"zipcode\"",
"]",
"=",
"address_data",
"[",
"1",
"]",
"if",
"len",
"(",
"address_data",
")",
">",
"2",
":",
"address_json",
"[",
"\"meta\"",
"]",
"=",
"address_data",
"[",
"2",
"]",
"return",
"address_json",
"if",
"isinstance",
"(",
"address_data",
",",
"dict",
")",
":",
"allowed_keys",
"=",
"[",
"\"address\"",
",",
"\"zipcode\"",
",",
"\"unit\"",
",",
"\"city\"",
",",
"\"state\"",
",",
"\"slug\"",
",",
"\"meta\"",
",",
"\"client_value\"",
",",
"\"client_value_sqft\"",
"]",
"# ensure the dict does not contain any unallowed keys",
"for",
"key",
"in",
"address_data",
":",
"if",
"key",
"not",
"in",
"allowed_keys",
":",
"msg",
"=",
"\"Key in address input not allowed: \"",
"+",
"key",
"raise",
"housecanary",
".",
"exceptions",
".",
"InvalidInputException",
"(",
"msg",
")",
"# ensure it contains an \"address\" key",
"if",
"\"address\"",
"in",
"address_data",
"or",
"\"slug\"",
"in",
"address_data",
":",
"return",
"address_data",
"# if we made it here, the input was not valid.",
"msg",
"=",
"(",
"\"Input is invalid. Must be a list of (address, zipcode) tuples, or a dict or list\"",
"\" of dicts with each item containing at least an 'address' or 'slug' key.\"",
")",
"raise",
"housecanary",
".",
"exceptions",
".",
"InvalidInputException",
"(",
"(",
"msg",
")",
")"
]
| Convert input address data into json format | [
"Convert",
"input",
"address",
"data",
"into",
"json",
"format"
]
| python | train |
pvlib/pvlib-python | pvlib/solarposition.py | https://github.com/pvlib/pvlib-python/blob/2e844a595b820b43d1170269781fa66bd0ccc8a3/pvlib/solarposition.py#L1313-L1355 | def hour_angle(times, longitude, equation_of_time):
"""
Hour angle in local solar time. Zero at local solar noon.
Parameters
----------
times : :class:`pandas.DatetimeIndex`
Corresponding timestamps, must be localized to the timezone for the
``longitude``.
longitude : numeric
Longitude in degrees
equation_of_time : numeric
Equation of time in minutes.
Returns
-------
hour_angle : numeric
Hour angle in local solar time in degrees.
References
----------
[1] J. A. Duffie and W. A. Beckman, "Solar Engineering of Thermal
Processes, 3rd Edition" pp. 13, J. Wiley and Sons, New York (2006)
[2] J. H. Seinfeld and S. N. Pandis, "Atmospheric Chemistry and Physics"
p. 132, J. Wiley (1998)
[3] Daryl R. Myers, "Solar Radiation: Practical Modeling for Renewable
Energy Applications", p. 5 CRC Press (2013)
See Also
--------
equation_of_time_Spencer71
equation_of_time_pvcdrom
"""
naive_times = times.tz_localize(None) # naive but still localized
# hours - timezone = (times - normalized_times) - (naive_times - times)
hrs_minus_tzs = 1 / NS_PER_HR * (
2 * times.astype(np.int64) - times.normalize().astype(np.int64) -
naive_times.astype(np.int64))
# ensure array return instead of a version-dependent pandas <T>Index
return np.asarray(
15. * (hrs_minus_tzs - 12.) + longitude + equation_of_time / 4.) | [
"def",
"hour_angle",
"(",
"times",
",",
"longitude",
",",
"equation_of_time",
")",
":",
"naive_times",
"=",
"times",
".",
"tz_localize",
"(",
"None",
")",
"# naive but still localized",
"# hours - timezone = (times - normalized_times) - (naive_times - times)",
"hrs_minus_tzs",
"=",
"1",
"/",
"NS_PER_HR",
"*",
"(",
"2",
"*",
"times",
".",
"astype",
"(",
"np",
".",
"int64",
")",
"-",
"times",
".",
"normalize",
"(",
")",
".",
"astype",
"(",
"np",
".",
"int64",
")",
"-",
"naive_times",
".",
"astype",
"(",
"np",
".",
"int64",
")",
")",
"# ensure array return instead of a version-dependent pandas <T>Index",
"return",
"np",
".",
"asarray",
"(",
"15.",
"*",
"(",
"hrs_minus_tzs",
"-",
"12.",
")",
"+",
"longitude",
"+",
"equation_of_time",
"/",
"4.",
")"
]
| Hour angle in local solar time. Zero at local solar noon.
Parameters
----------
times : :class:`pandas.DatetimeIndex`
Corresponding timestamps, must be localized to the timezone for the
``longitude``.
longitude : numeric
Longitude in degrees
equation_of_time : numeric
Equation of time in minutes.
Returns
-------
hour_angle : numeric
Hour angle in local solar time in degrees.
References
----------
[1] J. A. Duffie and W. A. Beckman, "Solar Engineering of Thermal
Processes, 3rd Edition" pp. 13, J. Wiley and Sons, New York (2006)
[2] J. H. Seinfeld and S. N. Pandis, "Atmospheric Chemistry and Physics"
p. 132, J. Wiley (1998)
[3] Daryl R. Myers, "Solar Radiation: Practical Modeling for Renewable
Energy Applications", p. 5 CRC Press (2013)
See Also
--------
equation_of_time_Spencer71
equation_of_time_pvcdrom | [
"Hour",
"angle",
"in",
"local",
"solar",
"time",
".",
"Zero",
"at",
"local",
"solar",
"noon",
"."
]
| python | train |
shaunduncan/nosqlite | nosqlite.py | https://github.com/shaunduncan/nosqlite/blob/3033c029b7c8290c66a8b36dc512e560505d4c85/nosqlite.py#L133-L149 | def update(self, document):
"""
Updates a document stored in this collection. If the document does not
already have an '_id' value, it will be created
"""
if '_id' not in document:
return self.insert(document)
# Update the stored document, removing the id
copy = document.copy()
del copy['_id']
self.db.execute("""
update %s set data = ? where id = ?
""" % self.name, (json.dumps(copy), document['_id']))
return document | [
"def",
"update",
"(",
"self",
",",
"document",
")",
":",
"if",
"'_id'",
"not",
"in",
"document",
":",
"return",
"self",
".",
"insert",
"(",
"document",
")",
"# Update the stored document, removing the id",
"copy",
"=",
"document",
".",
"copy",
"(",
")",
"del",
"copy",
"[",
"'_id'",
"]",
"self",
".",
"db",
".",
"execute",
"(",
"\"\"\"\n update %s set data = ? where id = ?\n \"\"\"",
"%",
"self",
".",
"name",
",",
"(",
"json",
".",
"dumps",
"(",
"copy",
")",
",",
"document",
"[",
"'_id'",
"]",
")",
")",
"return",
"document"
]
| Updates a document stored in this collection. If the document does not
already have an '_id' value, it will be created | [
"Updates",
"a",
"document",
"stored",
"in",
"this",
"collection",
".",
"If",
"the",
"document",
"does",
"not",
"already",
"have",
"an",
"_id",
"value",
"it",
"will",
"be",
"created"
]
| python | train |
Kane610/axis | axis/rtsp.py | https://github.com/Kane610/axis/blob/b2b44ce595c7b722b5e13eabcab7b91f048e1808/axis/rtsp.py#L231-L239 | def SETUP(self):
"""Set up stream transport."""
message = "SETUP " + self.session.control_url + " RTSP/1.0\r\n"
message += self.sequence
message += self.authentication
message += self.user_agent
message += self.transport
message += '\r\n'
return message | [
"def",
"SETUP",
"(",
"self",
")",
":",
"message",
"=",
"\"SETUP \"",
"+",
"self",
".",
"session",
".",
"control_url",
"+",
"\" RTSP/1.0\\r\\n\"",
"message",
"+=",
"self",
".",
"sequence",
"message",
"+=",
"self",
".",
"authentication",
"message",
"+=",
"self",
".",
"user_agent",
"message",
"+=",
"self",
".",
"transport",
"message",
"+=",
"'\\r\\n'",
"return",
"message"
]
| Set up stream transport. | [
"Set",
"up",
"stream",
"transport",
"."
]
| python | train |
csparpa/pyowm | pyowm/weatherapi25/parsers/observationlistparser.py | https://github.com/csparpa/pyowm/blob/cdd59eb72f32f7238624ceef9b2e2329a5ebd472/pyowm/weatherapi25/parsers/observationlistparser.py#L20-L64 | def parse_JSON(self, JSON_string):
"""
Parses a list of *Observation* instances out of raw JSON data. Only
certain properties of the data are used: if these properties are not
found or cannot be parsed, an error is issued.
:param JSON_string: a raw JSON string
:type JSON_string: str
:returns: a list of *Observation* instances or ``None`` if no data is
available
:raises: *ParseResponseError* if it is impossible to find or parse the
data needed to build the result, *APIResponseError* if the OWM API
returns a HTTP status error
"""
if JSON_string is None:
raise ParseResponseError('JSON data is None')
d = json.loads(JSON_string)
observation_parser = ObservationParser()
if 'cod' in d:
# Check if server returned errors: this check overcomes the lack of use
# of HTTP error status codes by the OWM API 2.5. This mechanism is
# supposed to be deprecated as soon as the API fully adopts HTTP for
# conveying errors to the clients
if d['cod'] == "200" or d['cod'] == 200:
pass
else:
if d['cod'] == "404" or d['cod'] == 404:
print("OWM API: data not found - response payload: " + json.dumps(d))
return None
else:
raise APIResponseError("OWM API: error - response payload: " + json.dumps(d), str(d['cod']))
# Handle the case when no results are found
if 'count' in d and d['count'] == "0":
return []
if 'cnt' in d and d['cnt'] == 0:
return []
if 'list' in d:
return [observation_parser.parse_JSON(json.dumps(item)) \
for item in d['list']]
# no way out..
raise ParseResponseError(''.join([__name__,
': impossible to read JSON data'])) | [
"def",
"parse_JSON",
"(",
"self",
",",
"JSON_string",
")",
":",
"if",
"JSON_string",
"is",
"None",
":",
"raise",
"ParseResponseError",
"(",
"'JSON data is None'",
")",
"d",
"=",
"json",
".",
"loads",
"(",
"JSON_string",
")",
"observation_parser",
"=",
"ObservationParser",
"(",
")",
"if",
"'cod'",
"in",
"d",
":",
"# Check if server returned errors: this check overcomes the lack of use",
"# of HTTP error status codes by the OWM API 2.5. This mechanism is",
"# supposed to be deprecated as soon as the API fully adopts HTTP for",
"# conveying errors to the clients",
"if",
"d",
"[",
"'cod'",
"]",
"==",
"\"200\"",
"or",
"d",
"[",
"'cod'",
"]",
"==",
"200",
":",
"pass",
"else",
":",
"if",
"d",
"[",
"'cod'",
"]",
"==",
"\"404\"",
"or",
"d",
"[",
"'cod'",
"]",
"==",
"404",
":",
"print",
"(",
"\"OWM API: data not found - response payload: \"",
"+",
"json",
".",
"dumps",
"(",
"d",
")",
")",
"return",
"None",
"else",
":",
"raise",
"APIResponseError",
"(",
"\"OWM API: error - response payload: \"",
"+",
"json",
".",
"dumps",
"(",
"d",
")",
",",
"str",
"(",
"d",
"[",
"'cod'",
"]",
")",
")",
"# Handle the case when no results are found",
"if",
"'count'",
"in",
"d",
"and",
"d",
"[",
"'count'",
"]",
"==",
"\"0\"",
":",
"return",
"[",
"]",
"if",
"'cnt'",
"in",
"d",
"and",
"d",
"[",
"'cnt'",
"]",
"==",
"0",
":",
"return",
"[",
"]",
"if",
"'list'",
"in",
"d",
":",
"return",
"[",
"observation_parser",
".",
"parse_JSON",
"(",
"json",
".",
"dumps",
"(",
"item",
")",
")",
"for",
"item",
"in",
"d",
"[",
"'list'",
"]",
"]",
"# no way out..",
"raise",
"ParseResponseError",
"(",
"''",
".",
"join",
"(",
"[",
"__name__",
",",
"': impossible to read JSON data'",
"]",
")",
")"
]
| Parses a list of *Observation* instances out of raw JSON data. Only
certain properties of the data are used: if these properties are not
found or cannot be parsed, an error is issued.
:param JSON_string: a raw JSON string
:type JSON_string: str
:returns: a list of *Observation* instances or ``None`` if no data is
available
:raises: *ParseResponseError* if it is impossible to find or parse the
data needed to build the result, *APIResponseError* if the OWM API
returns a HTTP status error | [
"Parses",
"a",
"list",
"of",
"*",
"Observation",
"*",
"instances",
"out",
"of",
"raw",
"JSON",
"data",
".",
"Only",
"certain",
"properties",
"of",
"the",
"data",
"are",
"used",
":",
"if",
"these",
"properties",
"are",
"not",
"found",
"or",
"cannot",
"be",
"parsed",
"an",
"error",
"is",
"issued",
"."
]
| python | train |
spoqa/tsukkomi | tsukkomi/typed.py | https://github.com/spoqa/tsukkomi/blob/c67bd28a5211cdd11f8ac81f109c915f3b780445/tsukkomi/typed.py#L109-L142 | def check_tuple(data: typing.Tuple,
hint: typing.Union[type, typing.TypingMeta]) -> bool:
"""Check argument type & return type of :class:`typing.Tuple`. since it
raises check :class:`typing.Tuple` using `isinstance`, so compare in
diffrent way
:param data: tuple given as a argument
:param hint: assumed type of given ``data``
"""
if not isinstance(data, tuple):
raise TypeError(
'expected {}, not {}'.format(
typing._type_repr(hint),
'None' if data is None else '{}: {!r}'.format(
typing._type_repr(type(data)),
data
)
)
)
tuple_param = hint.__tuple_params__
if len(data) != len(tuple_param):
raise TypeError('expected tuple size is {}, not {}: '
'{!r}'.format(len(tuple_param), len(data), data))
zipped = itertools.zip_longest(data, tuple_param)
for i, (v, t) in enumerate(zipped):
_, correct = check_type(v, t)
if not correct:
raise TypeError(
'{0}th item `{1}` in tuple must be {2!r}, not: {3!r}'.format(
i, v, t, v
)
)
return hint, True | [
"def",
"check_tuple",
"(",
"data",
":",
"typing",
".",
"Tuple",
",",
"hint",
":",
"typing",
".",
"Union",
"[",
"type",
",",
"typing",
".",
"TypingMeta",
"]",
")",
"->",
"bool",
":",
"if",
"not",
"isinstance",
"(",
"data",
",",
"tuple",
")",
":",
"raise",
"TypeError",
"(",
"'expected {}, not {}'",
".",
"format",
"(",
"typing",
".",
"_type_repr",
"(",
"hint",
")",
",",
"'None'",
"if",
"data",
"is",
"None",
"else",
"'{}: {!r}'",
".",
"format",
"(",
"typing",
".",
"_type_repr",
"(",
"type",
"(",
"data",
")",
")",
",",
"data",
")",
")",
")",
"tuple_param",
"=",
"hint",
".",
"__tuple_params__",
"if",
"len",
"(",
"data",
")",
"!=",
"len",
"(",
"tuple_param",
")",
":",
"raise",
"TypeError",
"(",
"'expected tuple size is {}, not {}: '",
"'{!r}'",
".",
"format",
"(",
"len",
"(",
"tuple_param",
")",
",",
"len",
"(",
"data",
")",
",",
"data",
")",
")",
"zipped",
"=",
"itertools",
".",
"zip_longest",
"(",
"data",
",",
"tuple_param",
")",
"for",
"i",
",",
"(",
"v",
",",
"t",
")",
"in",
"enumerate",
"(",
"zipped",
")",
":",
"_",
",",
"correct",
"=",
"check_type",
"(",
"v",
",",
"t",
")",
"if",
"not",
"correct",
":",
"raise",
"TypeError",
"(",
"'{0}th item `{1}` in tuple must be {2!r}, not: {3!r}'",
".",
"format",
"(",
"i",
",",
"v",
",",
"t",
",",
"v",
")",
")",
"return",
"hint",
",",
"True"
]
| Check argument type & return type of :class:`typing.Tuple`. since it
raises check :class:`typing.Tuple` using `isinstance`, so compare in
diffrent way
:param data: tuple given as a argument
:param hint: assumed type of given ``data`` | [
"Check",
"argument",
"type",
"&",
"return",
"type",
"of",
":",
"class",
":",
"typing",
".",
"Tuple",
".",
"since",
"it",
"raises",
"check",
":",
"class",
":",
"typing",
".",
"Tuple",
"using",
"isinstance",
"so",
"compare",
"in",
"diffrent",
"way"
]
| python | train |
etcher-be/epab | epab/cmd/_chglog.py | https://github.com/etcher-be/epab/blob/024cde74d058281aa66e6e4b7b71dccbe803b1c1/epab/cmd/_chglog.py#L53-L63 | def temporary_tag(tag):
"""
Temporarily tags the repo
"""
if tag:
CTX.repo.tag(tag)
try:
yield
finally:
if tag:
CTX.repo.remove_tag(tag) | [
"def",
"temporary_tag",
"(",
"tag",
")",
":",
"if",
"tag",
":",
"CTX",
".",
"repo",
".",
"tag",
"(",
"tag",
")",
"try",
":",
"yield",
"finally",
":",
"if",
"tag",
":",
"CTX",
".",
"repo",
".",
"remove_tag",
"(",
"tag",
")"
]
| Temporarily tags the repo | [
"Temporarily",
"tags",
"the",
"repo"
]
| python | train |
elliterate/capybara.py | capybara/session.py | https://github.com/elliterate/capybara.py/blob/0c6ae449cc37e4445ec3cd6af95674533beedc6c/capybara/session.py#L532-L546 | def dismiss_confirm(self, text=None, wait=None):
"""
Execute the wrapped code, dismissing a confirm.
Args:
text (str | RegexObject, optional): Text to match against the text in the modal.
wait (int | float, optional): Maximum time to wait for the modal to appear after
executing the wrapped code.
Raises:
ModalNotFound: If a modal dialog hasn't been found.
"""
with self.driver.dismiss_modal("confirm", text=text, wait=wait):
yield | [
"def",
"dismiss_confirm",
"(",
"self",
",",
"text",
"=",
"None",
",",
"wait",
"=",
"None",
")",
":",
"with",
"self",
".",
"driver",
".",
"dismiss_modal",
"(",
"\"confirm\"",
",",
"text",
"=",
"text",
",",
"wait",
"=",
"wait",
")",
":",
"yield"
]
| Execute the wrapped code, dismissing a confirm.
Args:
text (str | RegexObject, optional): Text to match against the text in the modal.
wait (int | float, optional): Maximum time to wait for the modal to appear after
executing the wrapped code.
Raises:
ModalNotFound: If a modal dialog hasn't been found. | [
"Execute",
"the",
"wrapped",
"code",
"dismissing",
"a",
"confirm",
"."
]
| python | test |
thespacedoctor/fundamentals | fundamentals/mysql/readquery.py | https://github.com/thespacedoctor/fundamentals/blob/1d2c007ac74442ec2eabde771cfcacdb9c1ab382/fundamentals/mysql/readquery.py#L19-L79 | def readquery(
sqlQuery,
dbConn,
log,
quiet=False):
"""Given a mysql query, read the data from the database and return the results as a list of dictionaries (database rows)
**Key Arguments:**
- ``log`` -- the logger.
- ``sqlQuery`` -- the MySQL command to execute
- ``dbConn`` -- the db connection
- ``quiet`` -- ignore mysql warnings and errors and move on. Be careful when setting this to true - damaging errors can easily be missed. Default *False*.
**Return:**
- ``rows`` -- the rows returned by the sql query
**Usage:**
.. code-block:: python
from fundamentals.mysql import readquery
rows = readquery(
log=log,
sqlQuery=sqlQuery,
dbConn=dbConn,
quiet=False
)
"""
log.debug('starting the ``readquery`` function')
import pymysql
import warnings
warnings.filterwarnings('error', category=pymysql.Warning)
rows = []
try:
cursor = dbConn.cursor(pymysql.cursors.DictCursor)
except Exception as e:
log.error('could not create the database cursor: %s' % (e, ))
raise IOError('could not create the database cursor: %s' % (e, ))
# EXECUTE THE SQL COMMAND
cursor.execute(sqlQuery)
rows = cursor.fetchall()
try:
cursor.execute(sqlQuery)
rows = cursor.fetchall()
except Exception as e:
sqlQuery = sqlQuery[:1000]
if quiet == False:
log.warning(
'MySQL raised an error - read command not executed.\n' + str(e) + '\nHere is the sqlQuery\n\t%(sqlQuery)s' % locals())
raise e
# CLOSE THE CURSOR
try:
cursor.close()
except Exception as e:
log.warning('could not close the db cursor ' + str(e) + '\n')
log.debug('completed the ``readquery`` function')
return rows | [
"def",
"readquery",
"(",
"sqlQuery",
",",
"dbConn",
",",
"log",
",",
"quiet",
"=",
"False",
")",
":",
"log",
".",
"debug",
"(",
"'starting the ``readquery`` function'",
")",
"import",
"pymysql",
"import",
"warnings",
"warnings",
".",
"filterwarnings",
"(",
"'error'",
",",
"category",
"=",
"pymysql",
".",
"Warning",
")",
"rows",
"=",
"[",
"]",
"try",
":",
"cursor",
"=",
"dbConn",
".",
"cursor",
"(",
"pymysql",
".",
"cursors",
".",
"DictCursor",
")",
"except",
"Exception",
"as",
"e",
":",
"log",
".",
"error",
"(",
"'could not create the database cursor: %s'",
"%",
"(",
"e",
",",
")",
")",
"raise",
"IOError",
"(",
"'could not create the database cursor: %s'",
"%",
"(",
"e",
",",
")",
")",
"# EXECUTE THE SQL COMMAND",
"cursor",
".",
"execute",
"(",
"sqlQuery",
")",
"rows",
"=",
"cursor",
".",
"fetchall",
"(",
")",
"try",
":",
"cursor",
".",
"execute",
"(",
"sqlQuery",
")",
"rows",
"=",
"cursor",
".",
"fetchall",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"sqlQuery",
"=",
"sqlQuery",
"[",
":",
"1000",
"]",
"if",
"quiet",
"==",
"False",
":",
"log",
".",
"warning",
"(",
"'MySQL raised an error - read command not executed.\\n'",
"+",
"str",
"(",
"e",
")",
"+",
"'\\nHere is the sqlQuery\\n\\t%(sqlQuery)s'",
"%",
"locals",
"(",
")",
")",
"raise",
"e",
"# CLOSE THE CURSOR",
"try",
":",
"cursor",
".",
"close",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"log",
".",
"warning",
"(",
"'could not close the db cursor '",
"+",
"str",
"(",
"e",
")",
"+",
"'\\n'",
")",
"log",
".",
"debug",
"(",
"'completed the ``readquery`` function'",
")",
"return",
"rows"
]
| Given a mysql query, read the data from the database and return the results as a list of dictionaries (database rows)
**Key Arguments:**
- ``log`` -- the logger.
- ``sqlQuery`` -- the MySQL command to execute
- ``dbConn`` -- the db connection
- ``quiet`` -- ignore mysql warnings and errors and move on. Be careful when setting this to true - damaging errors can easily be missed. Default *False*.
**Return:**
- ``rows`` -- the rows returned by the sql query
**Usage:**
.. code-block:: python
from fundamentals.mysql import readquery
rows = readquery(
log=log,
sqlQuery=sqlQuery,
dbConn=dbConn,
quiet=False
) | [
"Given",
"a",
"mysql",
"query",
"read",
"the",
"data",
"from",
"the",
"database",
"and",
"return",
"the",
"results",
"as",
"a",
"list",
"of",
"dictionaries",
"(",
"database",
"rows",
")"
]
| python | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_tree_model_mixin.py | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_tree_model_mixin.py#L122-L189 | def _extract_features_with_missing(self, dataset, tree_id = 0,
missing_value_action = 'auto'):
"""
Extract features along with all the missing features associated with
a dataset.
Parameters
----------
dataset: bool
Dataset on which to make predictions.
missing_value_action: str, optional
Action to perform when missing values are encountered. This can be
one of:
- 'auto': Choose a model dependent missing value policy.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'none': Treat missing value as is. Model must be able to handle
missing value.
- 'error' : Do not proceed with prediction and terminate with
an error message.
Returns
-------
out : SFrame
A table with two columns:
- leaf_id : Leaf id of the corresponding tree.
- missing_features : A list of missing feature, index pairs
"""
# Extract the features from only one tree.
sf = dataset
sf['leaf_id'] = self.extract_features(dataset, missing_value_action)\
.vector_slice(tree_id)\
.astype(int)
tree = self._get_tree(tree_id)
type_map = dict(zip(dataset.column_names(), dataset.column_types()))
def get_missing_features(row):
x = row['leaf_id']
path = tree.get_prediction_path(x)
missing_id = [] # List of "missing_id" children.
# For each node in the prediction path.
for p in path:
fname = p['feature']
idx = p['index']
f = row[fname]
if type_map[fname] in [int, float]:
if f is None:
missing_id.append(p['child_id'])
elif type_map[fname] in [dict]:
if f is None:
missing_id.append(p['child_id'])
if idx not in f:
missing_id.append(p['child_id'])
else:
pass
return missing_id
sf['missing_id'] = sf.apply(get_missing_features, list)
return sf[['leaf_id', 'missing_id']] | [
"def",
"_extract_features_with_missing",
"(",
"self",
",",
"dataset",
",",
"tree_id",
"=",
"0",
",",
"missing_value_action",
"=",
"'auto'",
")",
":",
"# Extract the features from only one tree.",
"sf",
"=",
"dataset",
"sf",
"[",
"'leaf_id'",
"]",
"=",
"self",
".",
"extract_features",
"(",
"dataset",
",",
"missing_value_action",
")",
".",
"vector_slice",
"(",
"tree_id",
")",
".",
"astype",
"(",
"int",
")",
"tree",
"=",
"self",
".",
"_get_tree",
"(",
"tree_id",
")",
"type_map",
"=",
"dict",
"(",
"zip",
"(",
"dataset",
".",
"column_names",
"(",
")",
",",
"dataset",
".",
"column_types",
"(",
")",
")",
")",
"def",
"get_missing_features",
"(",
"row",
")",
":",
"x",
"=",
"row",
"[",
"'leaf_id'",
"]",
"path",
"=",
"tree",
".",
"get_prediction_path",
"(",
"x",
")",
"missing_id",
"=",
"[",
"]",
"# List of \"missing_id\" children.",
"# For each node in the prediction path.",
"for",
"p",
"in",
"path",
":",
"fname",
"=",
"p",
"[",
"'feature'",
"]",
"idx",
"=",
"p",
"[",
"'index'",
"]",
"f",
"=",
"row",
"[",
"fname",
"]",
"if",
"type_map",
"[",
"fname",
"]",
"in",
"[",
"int",
",",
"float",
"]",
":",
"if",
"f",
"is",
"None",
":",
"missing_id",
".",
"append",
"(",
"p",
"[",
"'child_id'",
"]",
")",
"elif",
"type_map",
"[",
"fname",
"]",
"in",
"[",
"dict",
"]",
":",
"if",
"f",
"is",
"None",
":",
"missing_id",
".",
"append",
"(",
"p",
"[",
"'child_id'",
"]",
")",
"if",
"idx",
"not",
"in",
"f",
":",
"missing_id",
".",
"append",
"(",
"p",
"[",
"'child_id'",
"]",
")",
"else",
":",
"pass",
"return",
"missing_id",
"sf",
"[",
"'missing_id'",
"]",
"=",
"sf",
".",
"apply",
"(",
"get_missing_features",
",",
"list",
")",
"return",
"sf",
"[",
"[",
"'leaf_id'",
",",
"'missing_id'",
"]",
"]"
]
| Extract features along with all the missing features associated with
a dataset.
Parameters
----------
dataset: bool
Dataset on which to make predictions.
missing_value_action: str, optional
Action to perform when missing values are encountered. This can be
one of:
- 'auto': Choose a model dependent missing value policy.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'none': Treat missing value as is. Model must be able to handle
missing value.
- 'error' : Do not proceed with prediction and terminate with
an error message.
Returns
-------
out : SFrame
A table with two columns:
- leaf_id : Leaf id of the corresponding tree.
- missing_features : A list of missing feature, index pairs | [
"Extract",
"features",
"along",
"with",
"all",
"the",
"missing",
"features",
"associated",
"with",
"a",
"dataset",
"."
]
| python | train |
eddiejessup/agaro | agaro/runner.py | https://github.com/eddiejessup/agaro/blob/b2feb45d6129d749088c70b3e9290af7ca7c7d33/agaro/runner.py#L105-L111 | def is_snapshot_time(self, output_every=None, t_output_every=None):
"""Determine whether or not the model's iteration number is one
where the runner is expected to make an output snapshot.
"""
if t_output_every is not None:
output_every = int(round(t_output_every // self.model.dt))
return not self.model.i % output_every | [
"def",
"is_snapshot_time",
"(",
"self",
",",
"output_every",
"=",
"None",
",",
"t_output_every",
"=",
"None",
")",
":",
"if",
"t_output_every",
"is",
"not",
"None",
":",
"output_every",
"=",
"int",
"(",
"round",
"(",
"t_output_every",
"//",
"self",
".",
"model",
".",
"dt",
")",
")",
"return",
"not",
"self",
".",
"model",
".",
"i",
"%",
"output_every"
]
| Determine whether or not the model's iteration number is one
where the runner is expected to make an output snapshot. | [
"Determine",
"whether",
"or",
"not",
"the",
"model",
"s",
"iteration",
"number",
"is",
"one",
"where",
"the",
"runner",
"is",
"expected",
"to",
"make",
"an",
"output",
"snapshot",
"."
]
| python | train |
Esri/ArcREST | src/arcrest/common/symbology.py | https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/common/symbology.py#L157-L182 | def value(self):
"""returns the object as dictionary"""
if self._outline is None:
return {
"type" : "esriSMS",
"style" : self._style,
"color" : self._color.value,
"size" : self._size,
"angle" : self._angle,
"xoffset" : self._xoffset,
"yoffset" : self._yoffset
}
else:
return {
"type" : "esriSMS",
"style" : self._style,
"color" : self._color.value,
"size" : self._size,
"angle" : self._angle,
"xoffset" : self._xoffset,
"yoffset" : self._yoffset,
"outline" : {
"width" : self._outline['width'],
"color" : self._color.value
}
} | [
"def",
"value",
"(",
"self",
")",
":",
"if",
"self",
".",
"_outline",
"is",
"None",
":",
"return",
"{",
"\"type\"",
":",
"\"esriSMS\"",
",",
"\"style\"",
":",
"self",
".",
"_style",
",",
"\"color\"",
":",
"self",
".",
"_color",
".",
"value",
",",
"\"size\"",
":",
"self",
".",
"_size",
",",
"\"angle\"",
":",
"self",
".",
"_angle",
",",
"\"xoffset\"",
":",
"self",
".",
"_xoffset",
",",
"\"yoffset\"",
":",
"self",
".",
"_yoffset",
"}",
"else",
":",
"return",
"{",
"\"type\"",
":",
"\"esriSMS\"",
",",
"\"style\"",
":",
"self",
".",
"_style",
",",
"\"color\"",
":",
"self",
".",
"_color",
".",
"value",
",",
"\"size\"",
":",
"self",
".",
"_size",
",",
"\"angle\"",
":",
"self",
".",
"_angle",
",",
"\"xoffset\"",
":",
"self",
".",
"_xoffset",
",",
"\"yoffset\"",
":",
"self",
".",
"_yoffset",
",",
"\"outline\"",
":",
"{",
"\"width\"",
":",
"self",
".",
"_outline",
"[",
"'width'",
"]",
",",
"\"color\"",
":",
"self",
".",
"_color",
".",
"value",
"}",
"}"
]
| returns the object as dictionary | [
"returns",
"the",
"object",
"as",
"dictionary"
]
| python | train |
Cue/scales | src/greplin/scales/util.py | https://github.com/Cue/scales/blob/0aced26eb050ceb98ee9d5d6cdca8db448666986/src/greplin/scales/util.py#L85-L93 | def disconnect(self):
"""Disconnect from the Graphite server if connected."""
if self.sock is not None:
try:
self.sock.close()
except socket.error:
pass
finally:
self.sock = None | [
"def",
"disconnect",
"(",
"self",
")",
":",
"if",
"self",
".",
"sock",
"is",
"not",
"None",
":",
"try",
":",
"self",
".",
"sock",
".",
"close",
"(",
")",
"except",
"socket",
".",
"error",
":",
"pass",
"finally",
":",
"self",
".",
"sock",
"=",
"None"
]
| Disconnect from the Graphite server if connected. | [
"Disconnect",
"from",
"the",
"Graphite",
"server",
"if",
"connected",
"."
]
| python | train |
dcos/shakedown | shakedown/dcos/task.py | https://github.com/dcos/shakedown/blob/e2f9e2382788dbcd29bd18aa058b76e7c3b83b3e/shakedown/dcos/task.py#L118-L120 | def wait_for_task(service, task, timeout_sec=120):
"""Waits for a task which was launched to be launched"""
return time_wait(lambda: task_predicate(service, task), timeout_seconds=timeout_sec) | [
"def",
"wait_for_task",
"(",
"service",
",",
"task",
",",
"timeout_sec",
"=",
"120",
")",
":",
"return",
"time_wait",
"(",
"lambda",
":",
"task_predicate",
"(",
"service",
",",
"task",
")",
",",
"timeout_seconds",
"=",
"timeout_sec",
")"
]
| Waits for a task which was launched to be launched | [
"Waits",
"for",
"a",
"task",
"which",
"was",
"launched",
"to",
"be",
"launched"
]
| python | train |
Robpol86/libnl | libnl/linux_private/genetlink.py | https://github.com/Robpol86/libnl/blob/274e9fdaa39822d06ef70b799ed4a95937a4d923/libnl/linux_private/genetlink.py#L71-L73 | def reserved(self, value):
"""Reserved setter."""
self.bytearray[self._get_slicers(2)] = bytearray(c_uint16(value or 0)) | [
"def",
"reserved",
"(",
"self",
",",
"value",
")",
":",
"self",
".",
"bytearray",
"[",
"self",
".",
"_get_slicers",
"(",
"2",
")",
"]",
"=",
"bytearray",
"(",
"c_uint16",
"(",
"value",
"or",
"0",
")",
")"
]
| Reserved setter. | [
"Reserved",
"setter",
"."
]
| python | train |
KelSolaar/Umbra | umbra/ui/widgets/basic_QPlainTextEdit.py | https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/ui/widgets/basic_QPlainTextEdit.py#L612-L637 | def duplicate_lines(self):
"""
Duplicates the document lines under cursor.
:return: Method success.
:rtype: bool
"""
cursor = self.textCursor()
self.__select_text_under_cursor_blocks(cursor)
text = cursor.selectedText()
cursor.setPosition(cursor.block().next().position())
cursor.position() == cursor.document().firstBlock().position() and cursor.setPosition(
cursor.document().lastBlock().position())
start_position = cursor.position()
cursor.insertText(text)
end_position = cursor.position()
cursor.insertText(QChar(QChar.ParagraphSeparator))
cursor.setPosition(start_position, QTextCursor.MoveAnchor)
cursor.setPosition(end_position, QTextCursor.KeepAnchor)
self.setTextCursor(cursor)
return True | [
"def",
"duplicate_lines",
"(",
"self",
")",
":",
"cursor",
"=",
"self",
".",
"textCursor",
"(",
")",
"self",
".",
"__select_text_under_cursor_blocks",
"(",
"cursor",
")",
"text",
"=",
"cursor",
".",
"selectedText",
"(",
")",
"cursor",
".",
"setPosition",
"(",
"cursor",
".",
"block",
"(",
")",
".",
"next",
"(",
")",
".",
"position",
"(",
")",
")",
"cursor",
".",
"position",
"(",
")",
"==",
"cursor",
".",
"document",
"(",
")",
".",
"firstBlock",
"(",
")",
".",
"position",
"(",
")",
"and",
"cursor",
".",
"setPosition",
"(",
"cursor",
".",
"document",
"(",
")",
".",
"lastBlock",
"(",
")",
".",
"position",
"(",
")",
")",
"start_position",
"=",
"cursor",
".",
"position",
"(",
")",
"cursor",
".",
"insertText",
"(",
"text",
")",
"end_position",
"=",
"cursor",
".",
"position",
"(",
")",
"cursor",
".",
"insertText",
"(",
"QChar",
"(",
"QChar",
".",
"ParagraphSeparator",
")",
")",
"cursor",
".",
"setPosition",
"(",
"start_position",
",",
"QTextCursor",
".",
"MoveAnchor",
")",
"cursor",
".",
"setPosition",
"(",
"end_position",
",",
"QTextCursor",
".",
"KeepAnchor",
")",
"self",
".",
"setTextCursor",
"(",
"cursor",
")",
"return",
"True"
]
| Duplicates the document lines under cursor.
:return: Method success.
:rtype: bool | [
"Duplicates",
"the",
"document",
"lines",
"under",
"cursor",
"."
]
| python | train |
mitsei/dlkit | dlkit/records/assessment/qti/numeric_response_records.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/records/assessment/qti/numeric_response_records.py#L905-L931 | def _init_metadata(self):
"""stub"""
DecimalValuesFormRecord._init_metadata(self)
IntegerValuesFormRecord._init_metadata(self)
TextAnswerFormRecord._init_metadata(self)
super(MultiLanguageCalculationInteractionFeedbackAndFilesAnswerFormRecord, self)._init_metadata()
self._tolerance_mode_metadata = {
'element_id': Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
'tolerance_mode'),
'element_label': 'tolerance_mode',
'instructions': 'enter the tolerance mode',
'required': True,
'read_only': False,
'linked': False,
'array': False,
'default_string_values': [{
'text': '',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
}],
'syntax': 'STRING',
'minimum_string_length': 0,
'maximum_string_length': 1024,
'string_set': []
} | [
"def",
"_init_metadata",
"(",
"self",
")",
":",
"DecimalValuesFormRecord",
".",
"_init_metadata",
"(",
"self",
")",
"IntegerValuesFormRecord",
".",
"_init_metadata",
"(",
"self",
")",
"TextAnswerFormRecord",
".",
"_init_metadata",
"(",
"self",
")",
"super",
"(",
"MultiLanguageCalculationInteractionFeedbackAndFilesAnswerFormRecord",
",",
"self",
")",
".",
"_init_metadata",
"(",
")",
"self",
".",
"_tolerance_mode_metadata",
"=",
"{",
"'element_id'",
":",
"Id",
"(",
"self",
".",
"my_osid_object_form",
".",
"_authority",
",",
"self",
".",
"my_osid_object_form",
".",
"_namespace",
",",
"'tolerance_mode'",
")",
",",
"'element_label'",
":",
"'tolerance_mode'",
",",
"'instructions'",
":",
"'enter the tolerance mode'",
",",
"'required'",
":",
"True",
",",
"'read_only'",
":",
"False",
",",
"'linked'",
":",
"False",
",",
"'array'",
":",
"False",
",",
"'default_string_values'",
":",
"[",
"{",
"'text'",
":",
"''",
",",
"'languageTypeId'",
":",
"str",
"(",
"DEFAULT_LANGUAGE_TYPE",
")",
",",
"'scriptTypeId'",
":",
"str",
"(",
"DEFAULT_SCRIPT_TYPE",
")",
",",
"'formatTypeId'",
":",
"str",
"(",
"DEFAULT_FORMAT_TYPE",
")",
",",
"}",
"]",
",",
"'syntax'",
":",
"'STRING'",
",",
"'minimum_string_length'",
":",
"0",
",",
"'maximum_string_length'",
":",
"1024",
",",
"'string_set'",
":",
"[",
"]",
"}"
]
| stub | [
"stub"
]
| python | train |
UCBerkeleySETI/blimpy | blimpy/waterfall.py | https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/waterfall.py#L509-L539 | def grab_data(self, f_start=None, f_stop=None,t_start=None, t_stop=None, if_id=0):
""" Extract a portion of data by frequency range.
Args:
f_start (float): start frequency in MHz
f_stop (float): stop frequency in MHz
if_id (int): IF input identification (req. when multiple IFs in file)
Returns:
(freqs, data) (np.arrays): frequency axis in MHz and data subset
"""
self.freqs = self.populate_freqs()
self.timestamps = self.populate_timestamps()
if f_start is None:
f_start = self.freqs[0]
if f_stop is None:
f_stop = self.freqs[-1]
i0 = np.argmin(np.abs(self.freqs - f_start))
i1 = np.argmin(np.abs(self.freqs - f_stop))
if i0 < i1:
plot_f = self.freqs[i0:i1 + 1]
plot_data = np.squeeze(self.data[t_start:t_stop, ..., i0:i1 + 1])
else:
plot_f = self.freqs[i1:i0 + 1]
plot_data = np.squeeze(self.data[t_start:t_stop, ..., i1:i0 + 1])
return plot_f, plot_data | [
"def",
"grab_data",
"(",
"self",
",",
"f_start",
"=",
"None",
",",
"f_stop",
"=",
"None",
",",
"t_start",
"=",
"None",
",",
"t_stop",
"=",
"None",
",",
"if_id",
"=",
"0",
")",
":",
"self",
".",
"freqs",
"=",
"self",
".",
"populate_freqs",
"(",
")",
"self",
".",
"timestamps",
"=",
"self",
".",
"populate_timestamps",
"(",
")",
"if",
"f_start",
"is",
"None",
":",
"f_start",
"=",
"self",
".",
"freqs",
"[",
"0",
"]",
"if",
"f_stop",
"is",
"None",
":",
"f_stop",
"=",
"self",
".",
"freqs",
"[",
"-",
"1",
"]",
"i0",
"=",
"np",
".",
"argmin",
"(",
"np",
".",
"abs",
"(",
"self",
".",
"freqs",
"-",
"f_start",
")",
")",
"i1",
"=",
"np",
".",
"argmin",
"(",
"np",
".",
"abs",
"(",
"self",
".",
"freqs",
"-",
"f_stop",
")",
")",
"if",
"i0",
"<",
"i1",
":",
"plot_f",
"=",
"self",
".",
"freqs",
"[",
"i0",
":",
"i1",
"+",
"1",
"]",
"plot_data",
"=",
"np",
".",
"squeeze",
"(",
"self",
".",
"data",
"[",
"t_start",
":",
"t_stop",
",",
"...",
",",
"i0",
":",
"i1",
"+",
"1",
"]",
")",
"else",
":",
"plot_f",
"=",
"self",
".",
"freqs",
"[",
"i1",
":",
"i0",
"+",
"1",
"]",
"plot_data",
"=",
"np",
".",
"squeeze",
"(",
"self",
".",
"data",
"[",
"t_start",
":",
"t_stop",
",",
"...",
",",
"i1",
":",
"i0",
"+",
"1",
"]",
")",
"return",
"plot_f",
",",
"plot_data"
]
| Extract a portion of data by frequency range.
Args:
f_start (float): start frequency in MHz
f_stop (float): stop frequency in MHz
if_id (int): IF input identification (req. when multiple IFs in file)
Returns:
(freqs, data) (np.arrays): frequency axis in MHz and data subset | [
"Extract",
"a",
"portion",
"of",
"data",
"by",
"frequency",
"range",
"."
]
| python | test |
kontron/python-ipmi | pyipmi/interfaces/rmcp.py | https://github.com/kontron/python-ipmi/blob/ce46da47a37dd683615f32d04a10eda069aa569a/pyipmi/interfaces/rmcp.py#L541-L555 | def send_and_receive_raw(self, target, lun, netfn, raw_bytes):
"""Interface function to send and receive raw message.
target: IPMI target
lun: logical unit number
netfn: network function
raw_bytes: RAW bytes as bytestring
Returns the IPMI message response bytestring.
"""
return self._send_and_receive(target=target,
lun=lun,
netfn=netfn,
cmdid=array('B', raw_bytes)[0],
payload=raw_bytes[1:]) | [
"def",
"send_and_receive_raw",
"(",
"self",
",",
"target",
",",
"lun",
",",
"netfn",
",",
"raw_bytes",
")",
":",
"return",
"self",
".",
"_send_and_receive",
"(",
"target",
"=",
"target",
",",
"lun",
"=",
"lun",
",",
"netfn",
"=",
"netfn",
",",
"cmdid",
"=",
"array",
"(",
"'B'",
",",
"raw_bytes",
")",
"[",
"0",
"]",
",",
"payload",
"=",
"raw_bytes",
"[",
"1",
":",
"]",
")"
]
| Interface function to send and receive raw message.
target: IPMI target
lun: logical unit number
netfn: network function
raw_bytes: RAW bytes as bytestring
Returns the IPMI message response bytestring. | [
"Interface",
"function",
"to",
"send",
"and",
"receive",
"raw",
"message",
"."
]
| python | train |
elastic/elasticsearch-dsl-py | elasticsearch_dsl/index.py | https://github.com/elastic/elasticsearch-dsl-py/blob/874b52472fc47b601de0e5fa0e4300e21aff0085/elasticsearch_dsl/index.py#L454-L461 | def get_settings(self, using=None, **kwargs):
"""
Retrieve settings for the index.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.get_settings`` unchanged.
"""
return self._get_connection(using).indices.get_settings(index=self._name, **kwargs) | [
"def",
"get_settings",
"(",
"self",
",",
"using",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_get_connection",
"(",
"using",
")",
".",
"indices",
".",
"get_settings",
"(",
"index",
"=",
"self",
".",
"_name",
",",
"*",
"*",
"kwargs",
")"
]
| Retrieve settings for the index.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.get_settings`` unchanged. | [
"Retrieve",
"settings",
"for",
"the",
"index",
"."
]
| python | train |
openstack/horizon | openstack_dashboard/api/keystone.py | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/api/keystone.py#L671-L696 | def get_project_groups_roles(request, project):
"""Gets the groups roles in a given project.
:param request: the request entity containing the login user information
:param project: the project to filter the groups roles. It accepts both
project object resource or project ID
:returns group_roles: a dictionary mapping the groups and their roles in
given project
"""
groups_roles = collections.defaultdict(list)
project_role_assignments = role_assignments_list(request,
project=project)
for role_assignment in project_role_assignments:
if not hasattr(role_assignment, 'group'):
continue
group_id = role_assignment.group['id']
role_id = role_assignment.role['id']
# filter by project_id
if ('project' in role_assignment.scope and
role_assignment.scope['project']['id'] == project):
groups_roles[group_id].append(role_id)
return groups_roles | [
"def",
"get_project_groups_roles",
"(",
"request",
",",
"project",
")",
":",
"groups_roles",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"project_role_assignments",
"=",
"role_assignments_list",
"(",
"request",
",",
"project",
"=",
"project",
")",
"for",
"role_assignment",
"in",
"project_role_assignments",
":",
"if",
"not",
"hasattr",
"(",
"role_assignment",
",",
"'group'",
")",
":",
"continue",
"group_id",
"=",
"role_assignment",
".",
"group",
"[",
"'id'",
"]",
"role_id",
"=",
"role_assignment",
".",
"role",
"[",
"'id'",
"]",
"# filter by project_id",
"if",
"(",
"'project'",
"in",
"role_assignment",
".",
"scope",
"and",
"role_assignment",
".",
"scope",
"[",
"'project'",
"]",
"[",
"'id'",
"]",
"==",
"project",
")",
":",
"groups_roles",
"[",
"group_id",
"]",
".",
"append",
"(",
"role_id",
")",
"return",
"groups_roles"
]
| Gets the groups roles in a given project.
:param request: the request entity containing the login user information
:param project: the project to filter the groups roles. It accepts both
project object resource or project ID
:returns group_roles: a dictionary mapping the groups and their roles in
given project | [
"Gets",
"the",
"groups",
"roles",
"in",
"a",
"given",
"project",
"."
]
| python | train |
google/grr | grr/core/grr_response_core/lib/communicator.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/communicator.py#L242-L263 | def VerifyCipherSignature(self, remote_public_key):
"""Verifies the signature on the encrypted cipher block.
This method returns True if the signature verifies correctly with
the key given.
Args:
remote_public_key: The remote public key.
Returns:
None
Raises:
rdf_crypto.VerificationError: A signature and a key were both given but
verification fails.
"""
if self.cipher_metadata.signature and remote_public_key:
stats_collector_instance.Get().IncrementCounter("grr_rsa_operations")
remote_public_key.Verify(self.serialized_cipher,
self.cipher_metadata.signature)
return True | [
"def",
"VerifyCipherSignature",
"(",
"self",
",",
"remote_public_key",
")",
":",
"if",
"self",
".",
"cipher_metadata",
".",
"signature",
"and",
"remote_public_key",
":",
"stats_collector_instance",
".",
"Get",
"(",
")",
".",
"IncrementCounter",
"(",
"\"grr_rsa_operations\"",
")",
"remote_public_key",
".",
"Verify",
"(",
"self",
".",
"serialized_cipher",
",",
"self",
".",
"cipher_metadata",
".",
"signature",
")",
"return",
"True"
]
| Verifies the signature on the encrypted cipher block.
This method returns True if the signature verifies correctly with
the key given.
Args:
remote_public_key: The remote public key.
Returns:
None
Raises:
rdf_crypto.VerificationError: A signature and a key were both given but
verification fails. | [
"Verifies",
"the",
"signature",
"on",
"the",
"encrypted",
"cipher",
"block",
"."
]
| python | train |
PSPC-SPAC-buyandsell/von_agent | von_agent/agent/holder_prover.py | https://github.com/PSPC-SPAC-buyandsell/von_agent/blob/0b1c17cca3bd178b6e6974af84dbac1dfce5cf45/von_agent/agent/holder_prover.py#L690-L738 | async def get_box_ids_json(self) -> str:
"""
Return json object on lists of all unique box identifiers for credentials in wallet:
schema identifiers, credential definition identifiers, and revocation registry identifiers; e.g.,
::
{
"schema_id": [
"R17v42T4pk...:2:tombstone:1.2",
"9cHbp54C8n...:2:business:2.0",
...
],
"cred_def_id": [
"R17v42T4pk...:3:CL:19:0",
"9cHbp54C8n...:3:CL:37:0",
...
]
"rev_reg_id": [
"R17v42T4pk...:4:R17v42T4pk...:3:CL:19:0:CL_ACCUM:0",
"R17v42T4pk...:4:R17v42T4pk...:3:CL:19:0:CL_ACCUM:1",
"9cHbp54C8n...:4:9cHbp54C8n...:3:CL:37:0:CL_ACCUM:0",
"9cHbp54C8n...:4:9cHbp54C8n...:3:CL:37:0:CL_ACCUM:1",
"9cHbp54C8n...:4:9cHbp54C8n...:3:CL:37:0:CL_ACCUM:2",
...
]
}
:return: tuple of sets for schema ids, cred def ids, rev reg ids
"""
LOGGER.debug('HolderProver.get_box_ids_json >>>')
s_ids = set()
cd_ids = set()
rr_ids = set()
for cred in json.loads(await self.get_creds_display_coarse()):
s_ids.add(cred['schema_id'])
cd_ids.add(cred['cred_def_id'])
if cred['rev_reg_id']:
rr_ids.add(cred['rev_reg_id'])
rv = json.dumps({
'schema_id': list(s_ids),
'cred_def_id': list(cd_ids),
'rev_reg_id': list(rr_ids)
})
LOGGER.debug('HolderProver.get_box_ids_json <<< %s', rv)
return rv | [
"async",
"def",
"get_box_ids_json",
"(",
"self",
")",
"->",
"str",
":",
"LOGGER",
".",
"debug",
"(",
"'HolderProver.get_box_ids_json >>>'",
")",
"s_ids",
"=",
"set",
"(",
")",
"cd_ids",
"=",
"set",
"(",
")",
"rr_ids",
"=",
"set",
"(",
")",
"for",
"cred",
"in",
"json",
".",
"loads",
"(",
"await",
"self",
".",
"get_creds_display_coarse",
"(",
")",
")",
":",
"s_ids",
".",
"add",
"(",
"cred",
"[",
"'schema_id'",
"]",
")",
"cd_ids",
".",
"add",
"(",
"cred",
"[",
"'cred_def_id'",
"]",
")",
"if",
"cred",
"[",
"'rev_reg_id'",
"]",
":",
"rr_ids",
".",
"add",
"(",
"cred",
"[",
"'rev_reg_id'",
"]",
")",
"rv",
"=",
"json",
".",
"dumps",
"(",
"{",
"'schema_id'",
":",
"list",
"(",
"s_ids",
")",
",",
"'cred_def_id'",
":",
"list",
"(",
"cd_ids",
")",
",",
"'rev_reg_id'",
":",
"list",
"(",
"rr_ids",
")",
"}",
")",
"LOGGER",
".",
"debug",
"(",
"'HolderProver.get_box_ids_json <<< %s'",
",",
"rv",
")",
"return",
"rv"
]
| Return json object on lists of all unique box identifiers for credentials in wallet:
schema identifiers, credential definition identifiers, and revocation registry identifiers; e.g.,
::
{
"schema_id": [
"R17v42T4pk...:2:tombstone:1.2",
"9cHbp54C8n...:2:business:2.0",
...
],
"cred_def_id": [
"R17v42T4pk...:3:CL:19:0",
"9cHbp54C8n...:3:CL:37:0",
...
]
"rev_reg_id": [
"R17v42T4pk...:4:R17v42T4pk...:3:CL:19:0:CL_ACCUM:0",
"R17v42T4pk...:4:R17v42T4pk...:3:CL:19:0:CL_ACCUM:1",
"9cHbp54C8n...:4:9cHbp54C8n...:3:CL:37:0:CL_ACCUM:0",
"9cHbp54C8n...:4:9cHbp54C8n...:3:CL:37:0:CL_ACCUM:1",
"9cHbp54C8n...:4:9cHbp54C8n...:3:CL:37:0:CL_ACCUM:2",
...
]
}
:return: tuple of sets for schema ids, cred def ids, rev reg ids | [
"Return",
"json",
"object",
"on",
"lists",
"of",
"all",
"unique",
"box",
"identifiers",
"for",
"credentials",
"in",
"wallet",
":",
"schema",
"identifiers",
"credential",
"definition",
"identifiers",
"and",
"revocation",
"registry",
"identifiers",
";",
"e",
".",
"g",
"."
]
| python | train |
dgomes/pymediaroom | pymediaroom/notify.py | https://github.com/dgomes/pymediaroom/blob/f4f2686c8d5622dd5ae1bcdd76900ba35e148529/pymediaroom/notify.py#L80-L85 | def timeshift(self):
"""Return if the stream is a timeshift."""
if self.tune and self.tune.get('@src'):
return True if self.tune.get('@src').startswith('timeshift') else False
else:
raise PyMediaroomError("No information in <node> about @src") | [
"def",
"timeshift",
"(",
"self",
")",
":",
"if",
"self",
".",
"tune",
"and",
"self",
".",
"tune",
".",
"get",
"(",
"'@src'",
")",
":",
"return",
"True",
"if",
"self",
".",
"tune",
".",
"get",
"(",
"'@src'",
")",
".",
"startswith",
"(",
"'timeshift'",
")",
"else",
"False",
"else",
":",
"raise",
"PyMediaroomError",
"(",
"\"No information in <node> about @src\"",
")"
]
| Return if the stream is a timeshift. | [
"Return",
"if",
"the",
"stream",
"is",
"a",
"timeshift",
"."
]
| python | train |
dslackw/slpkg | slpkg/downloader.py | https://github.com/dslackw/slpkg/blob/dd2e08a80e944d337d157b992167ba631a4343de/slpkg/downloader.py#L85-L89 | def _make_tarfile(self, output_filename, source_dir):
"""Create .tar.gz file
"""
with tarfile.open(output_filename, "w:gz") as tar:
tar.add(source_dir, arcname=os.path.basename(source_dir)) | [
"def",
"_make_tarfile",
"(",
"self",
",",
"output_filename",
",",
"source_dir",
")",
":",
"with",
"tarfile",
".",
"open",
"(",
"output_filename",
",",
"\"w:gz\"",
")",
"as",
"tar",
":",
"tar",
".",
"add",
"(",
"source_dir",
",",
"arcname",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"source_dir",
")",
")"
]
| Create .tar.gz file | [
"Create",
".",
"tar",
".",
"gz",
"file"
]
| python | train |
pymoca/pymoca | src/pymoca/backends/xml/model.py | https://github.com/pymoca/pymoca/blob/14b5eb7425e96689de6cc5c10f400895d586a978/src/pymoca/backends/xml/model.py#L102-L108 | def create_function_f_J(self):
"""Jacobian for state integration"""
return ca.Function(
'J',
[self.t, self.x, self.y, self.m, self.p, self.c, self.ng, self.nu],
[ca.jacobian(self.f_x_rhs, self.x)],
['t', 'x', 'y', 'm', 'p', 'c', 'ng', 'nu'], ['J'], self.func_opt) | [
"def",
"create_function_f_J",
"(",
"self",
")",
":",
"return",
"ca",
".",
"Function",
"(",
"'J'",
",",
"[",
"self",
".",
"t",
",",
"self",
".",
"x",
",",
"self",
".",
"y",
",",
"self",
".",
"m",
",",
"self",
".",
"p",
",",
"self",
".",
"c",
",",
"self",
".",
"ng",
",",
"self",
".",
"nu",
"]",
",",
"[",
"ca",
".",
"jacobian",
"(",
"self",
".",
"f_x_rhs",
",",
"self",
".",
"x",
")",
"]",
",",
"[",
"'t'",
",",
"'x'",
",",
"'y'",
",",
"'m'",
",",
"'p'",
",",
"'c'",
",",
"'ng'",
",",
"'nu'",
"]",
",",
"[",
"'J'",
"]",
",",
"self",
".",
"func_opt",
")"
]
| Jacobian for state integration | [
"Jacobian",
"for",
"state",
"integration"
]
| python | train |
boris-savic/python-mbills | python_mbills/api.py | https://github.com/boris-savic/python-mbills/blob/a2147810c8c54b9242d9bcc2218622f1e19f9ac3/python_mbills/api.py#L101-L121 | def fetch_transaction_status(self, transaction_id):
"""
Get the transaction current status.
:param transaction_id:
:return:
"""
url = "%s%s%s/status" % (self.api_endpoint, constants.TRANSACTION_STATUS_ENDPOINT, transaction_id)
username = self.base.get_username()
password = self.base.get_password(username=username, request_url=url)
response = requests.get(url, auth=HTTPBasicAuth(username=username, password=password))
if response.status_code == 404:
raise TransactionDoesNotExist('Wrong transaction ID!')
if not self.base.verify_response(response.json()):
raise SignatureValidationException('Server signature verification has failed')
return response.json() | [
"def",
"fetch_transaction_status",
"(",
"self",
",",
"transaction_id",
")",
":",
"url",
"=",
"\"%s%s%s/status\"",
"%",
"(",
"self",
".",
"api_endpoint",
",",
"constants",
".",
"TRANSACTION_STATUS_ENDPOINT",
",",
"transaction_id",
")",
"username",
"=",
"self",
".",
"base",
".",
"get_username",
"(",
")",
"password",
"=",
"self",
".",
"base",
".",
"get_password",
"(",
"username",
"=",
"username",
",",
"request_url",
"=",
"url",
")",
"response",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"auth",
"=",
"HTTPBasicAuth",
"(",
"username",
"=",
"username",
",",
"password",
"=",
"password",
")",
")",
"if",
"response",
".",
"status_code",
"==",
"404",
":",
"raise",
"TransactionDoesNotExist",
"(",
"'Wrong transaction ID!'",
")",
"if",
"not",
"self",
".",
"base",
".",
"verify_response",
"(",
"response",
".",
"json",
"(",
")",
")",
":",
"raise",
"SignatureValidationException",
"(",
"'Server signature verification has failed'",
")",
"return",
"response",
".",
"json",
"(",
")"
]
| Get the transaction current status.
:param transaction_id:
:return: | [
"Get",
"the",
"transaction",
"current",
"status",
"."
]
| python | train |
jpscaletti/solution | solution/_compat.py | https://github.com/jpscaletti/solution/blob/eabafd8e695bbb0209242e002dbcc05ffb327f43/solution/_compat.py#L47-L51 | def itervalues(d, **kw):
"""Return an iterator over the values of a dictionary."""
if not PY2:
return iter(d.values(**kw))
return d.itervalues(**kw) | [
"def",
"itervalues",
"(",
"d",
",",
"*",
"*",
"kw",
")",
":",
"if",
"not",
"PY2",
":",
"return",
"iter",
"(",
"d",
".",
"values",
"(",
"*",
"*",
"kw",
")",
")",
"return",
"d",
".",
"itervalues",
"(",
"*",
"*",
"kw",
")"
]
| Return an iterator over the values of a dictionary. | [
"Return",
"an",
"iterator",
"over",
"the",
"values",
"of",
"a",
"dictionary",
"."
]
| python | train |
tensorflow/tensor2tensor | tensor2tensor/models/video/basic_deterministic_params.py | https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/basic_deterministic_params.py#L110-L117 | def next_frame_ae_tiny():
"""Conv autoencoder, tiny set for testing."""
hparams = next_frame_tiny()
hparams.bottom["inputs"] = modalities.video_bitwise_bottom
hparams.top["inputs"] = modalities.video_top
hparams.batch_size = 8
hparams.dropout = 0.4
return hparams | [
"def",
"next_frame_ae_tiny",
"(",
")",
":",
"hparams",
"=",
"next_frame_tiny",
"(",
")",
"hparams",
".",
"bottom",
"[",
"\"inputs\"",
"]",
"=",
"modalities",
".",
"video_bitwise_bottom",
"hparams",
".",
"top",
"[",
"\"inputs\"",
"]",
"=",
"modalities",
".",
"video_top",
"hparams",
".",
"batch_size",
"=",
"8",
"hparams",
".",
"dropout",
"=",
"0.4",
"return",
"hparams"
]
| Conv autoencoder, tiny set for testing. | [
"Conv",
"autoencoder",
"tiny",
"set",
"for",
"testing",
"."
]
| python | train |
kata198/indexedredis | IndexedRedis/__init__.py | https://github.com/kata198/indexedredis/blob/f9c85adcf5218dac25acb06eedc63fc2950816fa/IndexedRedis/__init__.py#L515-L536 | def hasUnsavedChanges(self, cascadeObjects=False):
'''
hasUnsavedChanges - Check if any unsaved changes are present in this model, or if it has never been saved.
@param cascadeObjects <bool> default False, if True will check if any foreign linked objects themselves have unsaved changes (recursively).
Otherwise, will just check if the pk has changed.
@return <bool> - True if any fields have changed since last fetch, or if never saved. Otherwise, False
'''
if not self._id or not self._origData:
return True
for thisField in self.FIELDS:
thisVal = object.__getattribute__(self, thisField)
if self._origData.get(thisField, '') != thisVal:
return True
if cascadeObjects is True and issubclass(thisField.__class__, IRForeignLinkFieldBase):
if thisVal.objHasUnsavedChanges():
return True
return False | [
"def",
"hasUnsavedChanges",
"(",
"self",
",",
"cascadeObjects",
"=",
"False",
")",
":",
"if",
"not",
"self",
".",
"_id",
"or",
"not",
"self",
".",
"_origData",
":",
"return",
"True",
"for",
"thisField",
"in",
"self",
".",
"FIELDS",
":",
"thisVal",
"=",
"object",
".",
"__getattribute__",
"(",
"self",
",",
"thisField",
")",
"if",
"self",
".",
"_origData",
".",
"get",
"(",
"thisField",
",",
"''",
")",
"!=",
"thisVal",
":",
"return",
"True",
"if",
"cascadeObjects",
"is",
"True",
"and",
"issubclass",
"(",
"thisField",
".",
"__class__",
",",
"IRForeignLinkFieldBase",
")",
":",
"if",
"thisVal",
".",
"objHasUnsavedChanges",
"(",
")",
":",
"return",
"True",
"return",
"False"
]
| hasUnsavedChanges - Check if any unsaved changes are present in this model, or if it has never been saved.
@param cascadeObjects <bool> default False, if True will check if any foreign linked objects themselves have unsaved changes (recursively).
Otherwise, will just check if the pk has changed.
@return <bool> - True if any fields have changed since last fetch, or if never saved. Otherwise, False | [
"hasUnsavedChanges",
"-",
"Check",
"if",
"any",
"unsaved",
"changes",
"are",
"present",
"in",
"this",
"model",
"or",
"if",
"it",
"has",
"never",
"been",
"saved",
"."
]
| python | valid |
saltstack/salt | salt/states/boto3_elasticache.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto3_elasticache.py#L106-L161 | def _diff_cache_cluster(current, desired):
'''
If you need to enhance what modify_cache_cluster() considers when deciding what is to be
(or can be) updated, add it to 'modifiable' below. It's a dict mapping the param as used
in modify_cache_cluster() to that in describe_cache_clusters(). Any data fiddlery that
needs to be done to make the mappings meaningful should be done in the munging section
below as well.
This function will ONLY touch settings that are explicitly called out in 'desired' - any
settings which might have previously been changed from their 'default' values will not be
changed back simply by leaving them out of 'desired'. This is both intentional, and
much, much easier to code :)
'''
### The data formats are annoyingly (and as far as I can can tell, unnecessarily)
### different - we have to munge to a common format to compare...
if current.get('SecurityGroups') is not None:
current['SecurityGroupIds'] = [s['SecurityGroupId'] for s in current['SecurityGroups']]
if current.get('CacheSecurityGroups') is not None:
current['CacheSecurityGroupNames'] = [c['CacheSecurityGroupName'] for c in current['CacheSecurityGroups']]
if current.get('NotificationConfiguration') is not None:
current['NotificationTopicArn'] = current['NotificationConfiguration']['TopicArn']
current['NotificationTopicStatus'] = current['NotificationConfiguration']['TopicStatus']
if current.get('CacheParameterGroup') is not None:
current['CacheParameterGroupName'] = current['CacheParameterGroup']['CacheParameterGroupName']
modifiable = {
'AutoMinorVersionUpgrade': 'AutoMinorVersionUpgrade',
'AZMode': 'AZMode',
'CacheNodeType': 'CacheNodeType',
'CacheNodeIdsToRemove': None,
'CacheParameterGroupName': 'CacheParameterGroupName',
'CacheSecurityGroupNames': 'CacheSecurityGroupNames',
'EngineVersion': 'EngineVersion',
'NewAvailabilityZones': None,
'NotificationTopicArn': 'NotificationTopicArn',
'NotificationTopicStatus': 'NotificationTopicStatus',
'NumCacheNodes': 'NumCacheNodes',
'PreferredMaintenanceWindow': 'PreferredMaintenanceWindow',
'SecurityGroupIds': 'SecurityGroupIds',
'SnapshotRetentionLimit': 'SnapshotRetentionLimit',
'SnapshotWindow': 'SnapshotWindow'
}
need_update = {}
for m, o in modifiable.items():
if m in desired:
if not o:
# Always pass these through - let AWS do the math...
need_update[m] = desired[m]
else:
if m in current:
# Equivalence testing works fine for current simple type comparisons
# This might need enhancement if more complex structures enter the picture
if current[m] != desired[m]:
need_update[m] = desired[m]
return need_update | [
"def",
"_diff_cache_cluster",
"(",
"current",
",",
"desired",
")",
":",
"### The data formats are annoyingly (and as far as I can can tell, unnecessarily)",
"### different - we have to munge to a common format to compare...",
"if",
"current",
".",
"get",
"(",
"'SecurityGroups'",
")",
"is",
"not",
"None",
":",
"current",
"[",
"'SecurityGroupIds'",
"]",
"=",
"[",
"s",
"[",
"'SecurityGroupId'",
"]",
"for",
"s",
"in",
"current",
"[",
"'SecurityGroups'",
"]",
"]",
"if",
"current",
".",
"get",
"(",
"'CacheSecurityGroups'",
")",
"is",
"not",
"None",
":",
"current",
"[",
"'CacheSecurityGroupNames'",
"]",
"=",
"[",
"c",
"[",
"'CacheSecurityGroupName'",
"]",
"for",
"c",
"in",
"current",
"[",
"'CacheSecurityGroups'",
"]",
"]",
"if",
"current",
".",
"get",
"(",
"'NotificationConfiguration'",
")",
"is",
"not",
"None",
":",
"current",
"[",
"'NotificationTopicArn'",
"]",
"=",
"current",
"[",
"'NotificationConfiguration'",
"]",
"[",
"'TopicArn'",
"]",
"current",
"[",
"'NotificationTopicStatus'",
"]",
"=",
"current",
"[",
"'NotificationConfiguration'",
"]",
"[",
"'TopicStatus'",
"]",
"if",
"current",
".",
"get",
"(",
"'CacheParameterGroup'",
")",
"is",
"not",
"None",
":",
"current",
"[",
"'CacheParameterGroupName'",
"]",
"=",
"current",
"[",
"'CacheParameterGroup'",
"]",
"[",
"'CacheParameterGroupName'",
"]",
"modifiable",
"=",
"{",
"'AutoMinorVersionUpgrade'",
":",
"'AutoMinorVersionUpgrade'",
",",
"'AZMode'",
":",
"'AZMode'",
",",
"'CacheNodeType'",
":",
"'CacheNodeType'",
",",
"'CacheNodeIdsToRemove'",
":",
"None",
",",
"'CacheParameterGroupName'",
":",
"'CacheParameterGroupName'",
",",
"'CacheSecurityGroupNames'",
":",
"'CacheSecurityGroupNames'",
",",
"'EngineVersion'",
":",
"'EngineVersion'",
",",
"'NewAvailabilityZones'",
":",
"None",
",",
"'NotificationTopicArn'",
":",
"'NotificationTopicArn'",
",",
"'NotificationTopicStatus'",
":",
"'NotificationTopicStatus'",
",",
"'NumCacheNodes'",
":",
"'NumCacheNodes'",
",",
"'PreferredMaintenanceWindow'",
":",
"'PreferredMaintenanceWindow'",
",",
"'SecurityGroupIds'",
":",
"'SecurityGroupIds'",
",",
"'SnapshotRetentionLimit'",
":",
"'SnapshotRetentionLimit'",
",",
"'SnapshotWindow'",
":",
"'SnapshotWindow'",
"}",
"need_update",
"=",
"{",
"}",
"for",
"m",
",",
"o",
"in",
"modifiable",
".",
"items",
"(",
")",
":",
"if",
"m",
"in",
"desired",
":",
"if",
"not",
"o",
":",
"# Always pass these through - let AWS do the math...",
"need_update",
"[",
"m",
"]",
"=",
"desired",
"[",
"m",
"]",
"else",
":",
"if",
"m",
"in",
"current",
":",
"# Equivalence testing works fine for current simple type comparisons",
"# This might need enhancement if more complex structures enter the picture",
"if",
"current",
"[",
"m",
"]",
"!=",
"desired",
"[",
"m",
"]",
":",
"need_update",
"[",
"m",
"]",
"=",
"desired",
"[",
"m",
"]",
"return",
"need_update"
]
| If you need to enhance what modify_cache_cluster() considers when deciding what is to be
(or can be) updated, add it to 'modifiable' below. It's a dict mapping the param as used
in modify_cache_cluster() to that in describe_cache_clusters(). Any data fiddlery that
needs to be done to make the mappings meaningful should be done in the munging section
below as well.
This function will ONLY touch settings that are explicitly called out in 'desired' - any
settings which might have previously been changed from their 'default' values will not be
changed back simply by leaving them out of 'desired'. This is both intentional, and
much, much easier to code :) | [
"If",
"you",
"need",
"to",
"enhance",
"what",
"modify_cache_cluster",
"()",
"considers",
"when",
"deciding",
"what",
"is",
"to",
"be",
"(",
"or",
"can",
"be",
")",
"updated",
"add",
"it",
"to",
"modifiable",
"below",
".",
"It",
"s",
"a",
"dict",
"mapping",
"the",
"param",
"as",
"used",
"in",
"modify_cache_cluster",
"()",
"to",
"that",
"in",
"describe_cache_clusters",
"()",
".",
"Any",
"data",
"fiddlery",
"that",
"needs",
"to",
"be",
"done",
"to",
"make",
"the",
"mappings",
"meaningful",
"should",
"be",
"done",
"in",
"the",
"munging",
"section",
"below",
"as",
"well",
"."
]
| python | train |
napalm-automation/napalm-ios | napalm_ios/ios.py | https://github.com/napalm-automation/napalm-ios/blob/7bbbc6a4d9f70a5b8cf32b7c7072a7ab437ddb81/napalm_ios/ios.py#L436-L443 | def discard_config(self):
"""Set candidate_cfg to current running-config. Erase the merge_cfg file."""
discard_candidate = 'copy running-config {}'.format(self._gen_full_path(self.candidate_cfg))
discard_merge = 'copy null: {}'.format(self._gen_full_path(self.merge_cfg))
self._disable_confirm()
self.device.send_command_expect(discard_candidate)
self.device.send_command_expect(discard_merge)
self._enable_confirm() | [
"def",
"discard_config",
"(",
"self",
")",
":",
"discard_candidate",
"=",
"'copy running-config {}'",
".",
"format",
"(",
"self",
".",
"_gen_full_path",
"(",
"self",
".",
"candidate_cfg",
")",
")",
"discard_merge",
"=",
"'copy null: {}'",
".",
"format",
"(",
"self",
".",
"_gen_full_path",
"(",
"self",
".",
"merge_cfg",
")",
")",
"self",
".",
"_disable_confirm",
"(",
")",
"self",
".",
"device",
".",
"send_command_expect",
"(",
"discard_candidate",
")",
"self",
".",
"device",
".",
"send_command_expect",
"(",
"discard_merge",
")",
"self",
".",
"_enable_confirm",
"(",
")"
]
| Set candidate_cfg to current running-config. Erase the merge_cfg file. | [
"Set",
"candidate_cfg",
"to",
"current",
"running",
"-",
"config",
".",
"Erase",
"the",
"merge_cfg",
"file",
"."
]
| python | train |
MediaFire/mediafire-python-open-sdk | mediafire/client.py | https://github.com/MediaFire/mediafire-python-open-sdk/blob/8f1f23db1b16f16e026f5c6777aec32d00baa05f/mediafire/client.py#L282-L327 | def create_folder(self, uri, recursive=False):
"""Create folder.
uri -- MediaFire URI
Keyword arguments:
recursive -- set to True to create intermediate folders.
"""
logger.info("Creating %s", uri)
# check that folder exists already
try:
resource = self.get_resource_by_uri(uri)
if isinstance(resource, Folder):
return resource
else:
raise NotAFolderError(uri)
except ResourceNotFoundError:
pass
location = self._parse_uri(uri)
folder_name = posixpath.basename(location)
parent_uri = 'mf://' + posixpath.dirname(location)
try:
parent_node = self.get_resource_by_uri(parent_uri)
if not isinstance(parent_node, Folder):
raise NotAFolderError(parent_uri)
parent_key = parent_node['folderkey']
except ResourceNotFoundError:
if recursive:
result = self.create_folder(parent_uri, recursive=True)
parent_key = result['folderkey']
else:
raise
# We specify exact location, so don't allow duplicates
result = self.api.folder_create(
folder_name, parent_key=parent_key, action_on_duplicate='skip')
logger.info("Created folder '%s' [mf:%s]",
result['name'], result['folder_key'])
return self.get_resource_by_key(result['folder_key']) | [
"def",
"create_folder",
"(",
"self",
",",
"uri",
",",
"recursive",
"=",
"False",
")",
":",
"logger",
".",
"info",
"(",
"\"Creating %s\"",
",",
"uri",
")",
"# check that folder exists already",
"try",
":",
"resource",
"=",
"self",
".",
"get_resource_by_uri",
"(",
"uri",
")",
"if",
"isinstance",
"(",
"resource",
",",
"Folder",
")",
":",
"return",
"resource",
"else",
":",
"raise",
"NotAFolderError",
"(",
"uri",
")",
"except",
"ResourceNotFoundError",
":",
"pass",
"location",
"=",
"self",
".",
"_parse_uri",
"(",
"uri",
")",
"folder_name",
"=",
"posixpath",
".",
"basename",
"(",
"location",
")",
"parent_uri",
"=",
"'mf://'",
"+",
"posixpath",
".",
"dirname",
"(",
"location",
")",
"try",
":",
"parent_node",
"=",
"self",
".",
"get_resource_by_uri",
"(",
"parent_uri",
")",
"if",
"not",
"isinstance",
"(",
"parent_node",
",",
"Folder",
")",
":",
"raise",
"NotAFolderError",
"(",
"parent_uri",
")",
"parent_key",
"=",
"parent_node",
"[",
"'folderkey'",
"]",
"except",
"ResourceNotFoundError",
":",
"if",
"recursive",
":",
"result",
"=",
"self",
".",
"create_folder",
"(",
"parent_uri",
",",
"recursive",
"=",
"True",
")",
"parent_key",
"=",
"result",
"[",
"'folderkey'",
"]",
"else",
":",
"raise",
"# We specify exact location, so don't allow duplicates",
"result",
"=",
"self",
".",
"api",
".",
"folder_create",
"(",
"folder_name",
",",
"parent_key",
"=",
"parent_key",
",",
"action_on_duplicate",
"=",
"'skip'",
")",
"logger",
".",
"info",
"(",
"\"Created folder '%s' [mf:%s]\"",
",",
"result",
"[",
"'name'",
"]",
",",
"result",
"[",
"'folder_key'",
"]",
")",
"return",
"self",
".",
"get_resource_by_key",
"(",
"result",
"[",
"'folder_key'",
"]",
")"
]
| Create folder.
uri -- MediaFire URI
Keyword arguments:
recursive -- set to True to create intermediate folders. | [
"Create",
"folder",
"."
]
| python | train |
csparpa/pyowm | pyowm/pollutionapi30/ozone.py | https://github.com/csparpa/pyowm/blob/cdd59eb72f32f7238624ceef9b2e2329a5ebd472/pyowm/pollutionapi30/ozone.py#L144-L162 | def _to_DOM(self):
"""
Dumps object data to a fully traversable DOM representation of the
object.
:returns: a ``xml.etree.Element`` object
"""
root_node = ET.Element("ozone")
reference_time_node = ET.SubElement(root_node, "reference_time")
reference_time_node.text = str(self._reference_time)
reception_time_node = ET.SubElement(root_node, "reception_time")
reception_time_node.text = str(self._reception_time)
interval_node = ET.SubElement(root_node, "interval")
interval_node.text = str(self._interval)
value_node = ET.SubElement(root_node, "value")
value_node.text = str(self.du_value)
root_node.append(self._location._to_DOM())
return root_node | [
"def",
"_to_DOM",
"(",
"self",
")",
":",
"root_node",
"=",
"ET",
".",
"Element",
"(",
"\"ozone\"",
")",
"reference_time_node",
"=",
"ET",
".",
"SubElement",
"(",
"root_node",
",",
"\"reference_time\"",
")",
"reference_time_node",
".",
"text",
"=",
"str",
"(",
"self",
".",
"_reference_time",
")",
"reception_time_node",
"=",
"ET",
".",
"SubElement",
"(",
"root_node",
",",
"\"reception_time\"",
")",
"reception_time_node",
".",
"text",
"=",
"str",
"(",
"self",
".",
"_reception_time",
")",
"interval_node",
"=",
"ET",
".",
"SubElement",
"(",
"root_node",
",",
"\"interval\"",
")",
"interval_node",
".",
"text",
"=",
"str",
"(",
"self",
".",
"_interval",
")",
"value_node",
"=",
"ET",
".",
"SubElement",
"(",
"root_node",
",",
"\"value\"",
")",
"value_node",
".",
"text",
"=",
"str",
"(",
"self",
".",
"du_value",
")",
"root_node",
".",
"append",
"(",
"self",
".",
"_location",
".",
"_to_DOM",
"(",
")",
")",
"return",
"root_node"
]
| Dumps object data to a fully traversable DOM representation of the
object.
:returns: a ``xml.etree.Element`` object | [
"Dumps",
"object",
"data",
"to",
"a",
"fully",
"traversable",
"DOM",
"representation",
"of",
"the",
"object",
"."
]
| python | train |
edeposit/marcxml_parser | src/marcxml_parser/query.py | https://github.com/edeposit/marcxml_parser/blob/6d1c77c61fc2827b71f1b3d5aa3332d7f5807820/src/marcxml_parser/query.py#L385-L403 | def get_corporations(self, roles=["dst"]):
"""
Args:
roles (list, optional): Specify which types of corporations you
need. Set to ``["any"]`` for any role, ``["dst"]`` for
distributors, etc..
Note:
See http://www.loc.gov/marc/relators/relaterm.html for details.
Returns:
list: :class:`.Corporation` objects specified by roles parameter.
"""
corporations = self._parse_corporations("110", "a", roles)
corporations += self._parse_corporations("610", "a", roles)
corporations += self._parse_corporations("710", "a", roles)
corporations += self._parse_corporations("810", "a", roles)
return corporations | [
"def",
"get_corporations",
"(",
"self",
",",
"roles",
"=",
"[",
"\"dst\"",
"]",
")",
":",
"corporations",
"=",
"self",
".",
"_parse_corporations",
"(",
"\"110\"",
",",
"\"a\"",
",",
"roles",
")",
"corporations",
"+=",
"self",
".",
"_parse_corporations",
"(",
"\"610\"",
",",
"\"a\"",
",",
"roles",
")",
"corporations",
"+=",
"self",
".",
"_parse_corporations",
"(",
"\"710\"",
",",
"\"a\"",
",",
"roles",
")",
"corporations",
"+=",
"self",
".",
"_parse_corporations",
"(",
"\"810\"",
",",
"\"a\"",
",",
"roles",
")",
"return",
"corporations"
]
| Args:
roles (list, optional): Specify which types of corporations you
need. Set to ``["any"]`` for any role, ``["dst"]`` for
distributors, etc..
Note:
See http://www.loc.gov/marc/relators/relaterm.html for details.
Returns:
list: :class:`.Corporation` objects specified by roles parameter. | [
"Args",
":",
"roles",
"(",
"list",
"optional",
")",
":",
"Specify",
"which",
"types",
"of",
"corporations",
"you",
"need",
".",
"Set",
"to",
"[",
"any",
"]",
"for",
"any",
"role",
"[",
"dst",
"]",
"for",
"distributors",
"etc",
".."
]
| python | valid |
dhermes/bezier | src/bezier/curve.py | https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/src/bezier/curve.py#L276-L315 | def subdivide(self):
r"""Split the curve :math:`B(s)` into a left and right half.
Takes the interval :math:`\left[0, 1\right]` and splits the curve into
:math:`B_1 = B\left(\left[0, \frac{1}{2}\right]\right)` and
:math:`B_2 = B\left(\left[\frac{1}{2}, 1\right]\right)`. In
order to do this, also reparameterizes the curve, hence the resulting
left and right halves have new nodes.
.. image:: ../../images/curve_subdivide.png
:align: center
.. doctest:: curve-subdivide
:options: +NORMALIZE_WHITESPACE
>>> nodes = np.asfortranarray([
... [0.0, 1.25, 2.0],
... [0.0, 3.0 , 1.0],
... ])
>>> curve = bezier.Curve(nodes, degree=2)
>>> left, right = curve.subdivide()
>>> left.nodes
array([[0. , 0.625, 1.125],
[0. , 1.5 , 1.75 ]])
>>> right.nodes
array([[1.125, 1.625, 2. ],
[1.75 , 2. , 1. ]])
.. testcleanup:: curve-subdivide
import make_images
make_images.curve_subdivide(curve, left, right)
Returns:
Tuple[Curve, Curve]: The left and right sub-curves.
"""
left_nodes, right_nodes = _curve_helpers.subdivide_nodes(self._nodes)
left = Curve(left_nodes, self._degree, _copy=False)
right = Curve(right_nodes, self._degree, _copy=False)
return left, right | [
"def",
"subdivide",
"(",
"self",
")",
":",
"left_nodes",
",",
"right_nodes",
"=",
"_curve_helpers",
".",
"subdivide_nodes",
"(",
"self",
".",
"_nodes",
")",
"left",
"=",
"Curve",
"(",
"left_nodes",
",",
"self",
".",
"_degree",
",",
"_copy",
"=",
"False",
")",
"right",
"=",
"Curve",
"(",
"right_nodes",
",",
"self",
".",
"_degree",
",",
"_copy",
"=",
"False",
")",
"return",
"left",
",",
"right"
]
| r"""Split the curve :math:`B(s)` into a left and right half.
Takes the interval :math:`\left[0, 1\right]` and splits the curve into
:math:`B_1 = B\left(\left[0, \frac{1}{2}\right]\right)` and
:math:`B_2 = B\left(\left[\frac{1}{2}, 1\right]\right)`. In
order to do this, also reparameterizes the curve, hence the resulting
left and right halves have new nodes.
.. image:: ../../images/curve_subdivide.png
:align: center
.. doctest:: curve-subdivide
:options: +NORMALIZE_WHITESPACE
>>> nodes = np.asfortranarray([
... [0.0, 1.25, 2.0],
... [0.0, 3.0 , 1.0],
... ])
>>> curve = bezier.Curve(nodes, degree=2)
>>> left, right = curve.subdivide()
>>> left.nodes
array([[0. , 0.625, 1.125],
[0. , 1.5 , 1.75 ]])
>>> right.nodes
array([[1.125, 1.625, 2. ],
[1.75 , 2. , 1. ]])
.. testcleanup:: curve-subdivide
import make_images
make_images.curve_subdivide(curve, left, right)
Returns:
Tuple[Curve, Curve]: The left and right sub-curves. | [
"r",
"Split",
"the",
"curve",
":",
"math",
":",
"B",
"(",
"s",
")",
"into",
"a",
"left",
"and",
"right",
"half",
"."
]
| python | train |
geophysics-ubonn/reda | lib/reda/utils/geometric_factors.py | https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/utils/geometric_factors.py#L29-L74 | def compute_K_numerical(dataframe, settings=None, keep_dir=None):
"""Use a finite-element modeling code to infer geometric factors for meshes
with topography or irregular electrode spacings.
Parameters
----------
dataframe : pandas.DataFrame
the data frame that contains the data
settings : dict
The settings required to compute the geometric factors. See examples
down below for more information in the required content.
keep_dir : path
if not None, copy modeling dir here
Returns
-------
K : :class:`numpy.ndarray`
K factors (are also directly written to the dataframe)
Examples
--------
::
settings = {
'rho': 100,
'elem': 'elem.dat',
'elec': 'elec.dat',
'sink_node': '100',
'2D': False,
}
"""
inversion_code = reda.rcParams.get('geom_factor.inversion_code', 'crtomo')
if inversion_code == 'crtomo':
import reda.utils.geom_fac_crtomo as geom_fac_crtomo
if keep_dir is not None:
keep_dir = os.path.abspath(keep_dir)
K = geom_fac_crtomo.compute_K(
dataframe, settings, keep_dir)
else:
raise Exception(
'Inversion code {0} not implemented for K computation'.format(
inversion_code
))
return K | [
"def",
"compute_K_numerical",
"(",
"dataframe",
",",
"settings",
"=",
"None",
",",
"keep_dir",
"=",
"None",
")",
":",
"inversion_code",
"=",
"reda",
".",
"rcParams",
".",
"get",
"(",
"'geom_factor.inversion_code'",
",",
"'crtomo'",
")",
"if",
"inversion_code",
"==",
"'crtomo'",
":",
"import",
"reda",
".",
"utils",
".",
"geom_fac_crtomo",
"as",
"geom_fac_crtomo",
"if",
"keep_dir",
"is",
"not",
"None",
":",
"keep_dir",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"keep_dir",
")",
"K",
"=",
"geom_fac_crtomo",
".",
"compute_K",
"(",
"dataframe",
",",
"settings",
",",
"keep_dir",
")",
"else",
":",
"raise",
"Exception",
"(",
"'Inversion code {0} not implemented for K computation'",
".",
"format",
"(",
"inversion_code",
")",
")",
"return",
"K"
]
| Use a finite-element modeling code to infer geometric factors for meshes
with topography or irregular electrode spacings.
Parameters
----------
dataframe : pandas.DataFrame
the data frame that contains the data
settings : dict
The settings required to compute the geometric factors. See examples
down below for more information in the required content.
keep_dir : path
if not None, copy modeling dir here
Returns
-------
K : :class:`numpy.ndarray`
K factors (are also directly written to the dataframe)
Examples
--------
::
settings = {
'rho': 100,
'elem': 'elem.dat',
'elec': 'elec.dat',
'sink_node': '100',
'2D': False,
} | [
"Use",
"a",
"finite",
"-",
"element",
"modeling",
"code",
"to",
"infer",
"geometric",
"factors",
"for",
"meshes",
"with",
"topography",
"or",
"irregular",
"electrode",
"spacings",
"."
]
| python | train |
koenedaele/skosprovider | skosprovider/registry.py | https://github.com/koenedaele/skosprovider/blob/7304a37953978ca8227febc2d3cc2b2be178f215/skosprovider/registry.py#L37-L55 | def register_provider(self, provider):
'''
Register a :class:`skosprovider.providers.VocabularyProvider`.
:param skosprovider.providers.VocabularyProvider provider: The provider
to register.
:raises RegistryException: A provider with this id or uri has already
been registered.
'''
if provider.get_vocabulary_id() in self.providers:
raise RegistryException(
'A provider with this id has already been registered.'
)
self.providers[provider.get_vocabulary_id()] = provider
if provider.concept_scheme.uri in self.concept_scheme_uri_map:
raise RegistryException(
'A provider with URI %s has already been registered.' % provider.concept_scheme.uri
)
self.concept_scheme_uri_map[provider.concept_scheme.uri] = provider.get_vocabulary_id() | [
"def",
"register_provider",
"(",
"self",
",",
"provider",
")",
":",
"if",
"provider",
".",
"get_vocabulary_id",
"(",
")",
"in",
"self",
".",
"providers",
":",
"raise",
"RegistryException",
"(",
"'A provider with this id has already been registered.'",
")",
"self",
".",
"providers",
"[",
"provider",
".",
"get_vocabulary_id",
"(",
")",
"]",
"=",
"provider",
"if",
"provider",
".",
"concept_scheme",
".",
"uri",
"in",
"self",
".",
"concept_scheme_uri_map",
":",
"raise",
"RegistryException",
"(",
"'A provider with URI %s has already been registered.'",
"%",
"provider",
".",
"concept_scheme",
".",
"uri",
")",
"self",
".",
"concept_scheme_uri_map",
"[",
"provider",
".",
"concept_scheme",
".",
"uri",
"]",
"=",
"provider",
".",
"get_vocabulary_id",
"(",
")"
]
| Register a :class:`skosprovider.providers.VocabularyProvider`.
:param skosprovider.providers.VocabularyProvider provider: The provider
to register.
:raises RegistryException: A provider with this id or uri has already
been registered. | [
"Register",
"a",
":",
"class",
":",
"skosprovider",
".",
"providers",
".",
"VocabularyProvider",
"."
]
| python | valid |
h2non/paco | paco/curry.py | https://github.com/h2non/paco/blob/1e5ef4df317e7cbbcefdf67d8dee28ce90538f3d/paco/curry.py#L8-L143 | def curry(arity_or_fn=None, ignore_kwargs=False, evaluator=None, *args, **kw):
"""
Creates a function that accepts one or more arguments of a function and
either invokes func returning its result if at least arity number of
arguments have been provided, or returns a function that accepts the
remaining function arguments until the function arity is satisfied.
This function is overloaded: you can pass a function or coroutine function
as first argument or an `int` indicating the explicit function arity.
Function arity can be inferred via function signature or explicitly
passed via `arity_or_fn` param.
You can optionally ignore keyword based arguments as well passsing the
`ignore_kwargs` param with `True` value.
This function can be used as decorator.
Arguments:
arity_or_fn (int|function|coroutinefunction): function arity to curry
or function to curry.
ignore_kwargs (bool): ignore keyword arguments as arity to satisfy
during curry.
evaluator (function): use a custom arity evaluator function.
*args (mixed): mixed variadic arguments for partial function
application.
*kwargs (mixed): keyword variadic arguments for partial function
application.
Raises:
TypeError: if function is not a function or a coroutine function.
Returns:
function or coroutinefunction: function will be returned until all the
function arity is satisfied, where a coroutine function will be
returned instead.
Usage::
# Function signature inferred function arity
@paco.curry
async def task(x, y, z=0):
return x * y + z
await task(4)(4)(z=8)
# => 24
# User defined function arity
@paco.curry(4)
async def task(x, y, *args, **kw):
return x * y + args[0] * args[1]
await task(4)(4)(8)(8)
# => 80
# Ignore keyword arguments from arity
@paco.curry(ignore_kwargs=True)
async def task(x, y, z=0):
return x * y
await task(4)(4)
# => 16
"""
def isvalidarg(x):
return all([
x.kind != x.VAR_KEYWORD,
x.kind != x.VAR_POSITIONAL,
any([
not ignore_kwargs,
ignore_kwargs and x.default == x.empty
])
])
def params(fn):
return inspect.signature(fn).parameters.values()
def infer_arity(fn):
return len([x for x in params(fn) if isvalidarg(x)])
def merge_args(acc, args, kw):
_args, _kw = acc
_args = _args + args
_kw = _kw or {}
_kw.update(kw)
return _args, _kw
def currier(arity, acc, fn, *args, **kw):
"""
Function either continues curring of the arguments
or executes function if desired arguments have being collected.
If function curried is variadic then execution without arguments
will finish curring and trigger the function
"""
# Merge call arguments with accumulated ones
_args, _kw = merge_args(acc, args, kw)
# Get current function call accumulated arity
current_arity = len(args)
# Count keyword params as arity to satisfy, if required
if not ignore_kwargs:
current_arity += len(kw)
# Decrease function arity to satisfy
arity -= current_arity
# Use user-defined custom arity evaluator strategy, if present
currify = evaluator and evaluator(acc, fn)
# If arity is not satisfied, return recursive partial function
if currify is not False and arity > 0:
return functools.partial(currier, arity, (_args, _kw), fn)
# If arity is satisfied, instanciate coroutine and return it
return fn(*_args, **_kw)
def wrapper(fn, *args, **kw):
if not iscallable(fn):
raise TypeError('paco: first argument must a coroutine function, '
'a function or a method.')
# Infer function arity, if required
arity = (arity_or_fn if isinstance(arity_or_fn, int)
else infer_arity(fn))
# Wraps function as coroutine function, if needed.
fn = wraps(fn) if isfunc(fn) else fn
# Otherwise return recursive currier function
return currier(arity, (args, kw), fn, *args, **kw) if arity > 0 else fn
# Return currier function or decorator wrapper
return (wrapper(arity_or_fn, *args, **kw)
if iscallable(arity_or_fn)
else wrapper) | [
"def",
"curry",
"(",
"arity_or_fn",
"=",
"None",
",",
"ignore_kwargs",
"=",
"False",
",",
"evaluator",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"def",
"isvalidarg",
"(",
"x",
")",
":",
"return",
"all",
"(",
"[",
"x",
".",
"kind",
"!=",
"x",
".",
"VAR_KEYWORD",
",",
"x",
".",
"kind",
"!=",
"x",
".",
"VAR_POSITIONAL",
",",
"any",
"(",
"[",
"not",
"ignore_kwargs",
",",
"ignore_kwargs",
"and",
"x",
".",
"default",
"==",
"x",
".",
"empty",
"]",
")",
"]",
")",
"def",
"params",
"(",
"fn",
")",
":",
"return",
"inspect",
".",
"signature",
"(",
"fn",
")",
".",
"parameters",
".",
"values",
"(",
")",
"def",
"infer_arity",
"(",
"fn",
")",
":",
"return",
"len",
"(",
"[",
"x",
"for",
"x",
"in",
"params",
"(",
"fn",
")",
"if",
"isvalidarg",
"(",
"x",
")",
"]",
")",
"def",
"merge_args",
"(",
"acc",
",",
"args",
",",
"kw",
")",
":",
"_args",
",",
"_kw",
"=",
"acc",
"_args",
"=",
"_args",
"+",
"args",
"_kw",
"=",
"_kw",
"or",
"{",
"}",
"_kw",
".",
"update",
"(",
"kw",
")",
"return",
"_args",
",",
"_kw",
"def",
"currier",
"(",
"arity",
",",
"acc",
",",
"fn",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"\"\"\"\n Function either continues curring of the arguments\n or executes function if desired arguments have being collected.\n If function curried is variadic then execution without arguments\n will finish curring and trigger the function\n \"\"\"",
"# Merge call arguments with accumulated ones",
"_args",
",",
"_kw",
"=",
"merge_args",
"(",
"acc",
",",
"args",
",",
"kw",
")",
"# Get current function call accumulated arity",
"current_arity",
"=",
"len",
"(",
"args",
")",
"# Count keyword params as arity to satisfy, if required",
"if",
"not",
"ignore_kwargs",
":",
"current_arity",
"+=",
"len",
"(",
"kw",
")",
"# Decrease function arity to satisfy",
"arity",
"-=",
"current_arity",
"# Use user-defined custom arity evaluator strategy, if present",
"currify",
"=",
"evaluator",
"and",
"evaluator",
"(",
"acc",
",",
"fn",
")",
"# If arity is not satisfied, return recursive partial function",
"if",
"currify",
"is",
"not",
"False",
"and",
"arity",
">",
"0",
":",
"return",
"functools",
".",
"partial",
"(",
"currier",
",",
"arity",
",",
"(",
"_args",
",",
"_kw",
")",
",",
"fn",
")",
"# If arity is satisfied, instanciate coroutine and return it",
"return",
"fn",
"(",
"*",
"_args",
",",
"*",
"*",
"_kw",
")",
"def",
"wrapper",
"(",
"fn",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"if",
"not",
"iscallable",
"(",
"fn",
")",
":",
"raise",
"TypeError",
"(",
"'paco: first argument must a coroutine function, '",
"'a function or a method.'",
")",
"# Infer function arity, if required",
"arity",
"=",
"(",
"arity_or_fn",
"if",
"isinstance",
"(",
"arity_or_fn",
",",
"int",
")",
"else",
"infer_arity",
"(",
"fn",
")",
")",
"# Wraps function as coroutine function, if needed.",
"fn",
"=",
"wraps",
"(",
"fn",
")",
"if",
"isfunc",
"(",
"fn",
")",
"else",
"fn",
"# Otherwise return recursive currier function",
"return",
"currier",
"(",
"arity",
",",
"(",
"args",
",",
"kw",
")",
",",
"fn",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
"if",
"arity",
">",
"0",
"else",
"fn",
"# Return currier function or decorator wrapper",
"return",
"(",
"wrapper",
"(",
"arity_or_fn",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
"if",
"iscallable",
"(",
"arity_or_fn",
")",
"else",
"wrapper",
")"
]
| Creates a function that accepts one or more arguments of a function and
either invokes func returning its result if at least arity number of
arguments have been provided, or returns a function that accepts the
remaining function arguments until the function arity is satisfied.
This function is overloaded: you can pass a function or coroutine function
as first argument or an `int` indicating the explicit function arity.
Function arity can be inferred via function signature or explicitly
passed via `arity_or_fn` param.
You can optionally ignore keyword based arguments as well passsing the
`ignore_kwargs` param with `True` value.
This function can be used as decorator.
Arguments:
arity_or_fn (int|function|coroutinefunction): function arity to curry
or function to curry.
ignore_kwargs (bool): ignore keyword arguments as arity to satisfy
during curry.
evaluator (function): use a custom arity evaluator function.
*args (mixed): mixed variadic arguments for partial function
application.
*kwargs (mixed): keyword variadic arguments for partial function
application.
Raises:
TypeError: if function is not a function or a coroutine function.
Returns:
function or coroutinefunction: function will be returned until all the
function arity is satisfied, where a coroutine function will be
returned instead.
Usage::
# Function signature inferred function arity
@paco.curry
async def task(x, y, z=0):
return x * y + z
await task(4)(4)(z=8)
# => 24
# User defined function arity
@paco.curry(4)
async def task(x, y, *args, **kw):
return x * y + args[0] * args[1]
await task(4)(4)(8)(8)
# => 80
# Ignore keyword arguments from arity
@paco.curry(ignore_kwargs=True)
async def task(x, y, z=0):
return x * y
await task(4)(4)
# => 16 | [
"Creates",
"a",
"function",
"that",
"accepts",
"one",
"or",
"more",
"arguments",
"of",
"a",
"function",
"and",
"either",
"invokes",
"func",
"returning",
"its",
"result",
"if",
"at",
"least",
"arity",
"number",
"of",
"arguments",
"have",
"been",
"provided",
"or",
"returns",
"a",
"function",
"that",
"accepts",
"the",
"remaining",
"function",
"arguments",
"until",
"the",
"function",
"arity",
"is",
"satisfied",
"."
]
| python | train |
jbloomlab/phydms | phydmslib/models.py | https://github.com/jbloomlab/phydms/blob/9cdebc10bafbe543c552d79486c7f950780ed3c0/phydmslib/models.py#L1281-L1295 | def _update_phi(self):
"""Compute `phi`, `dphi_dbeta`, and `eta` from `g` and `frxy`."""
self.phi = self._compute_empirical_phi(self.beta)
_checkParam('phi', self.phi, self.PARAMLIMITS, self.PARAMTYPES)
self._eta_from_phi()
dbeta = 1.0e-3
self.dphi_dbeta = scipy.misc.derivative(self._compute_empirical_phi,
self.beta, dx=dbeta, n=1, order=5)
dphi_dbeta_halfdx = scipy.misc.derivative(self._compute_empirical_phi,
self.beta, dx=dbeta / 2, n=1, order=5)
assert scipy.allclose(self.dphi_dbeta, dphi_dbeta_halfdx, atol=1e-5,
rtol=1e-4), ("The numerical derivative dphi_dbeta differs "
"considerably in value for step dbeta = {0} and a step "
"half that size, giving values of {1} and {2}.").format(
dbeta, self.dphi_dbeta, dphi_dbeta_halfdx) | [
"def",
"_update_phi",
"(",
"self",
")",
":",
"self",
".",
"phi",
"=",
"self",
".",
"_compute_empirical_phi",
"(",
"self",
".",
"beta",
")",
"_checkParam",
"(",
"'phi'",
",",
"self",
".",
"phi",
",",
"self",
".",
"PARAMLIMITS",
",",
"self",
".",
"PARAMTYPES",
")",
"self",
".",
"_eta_from_phi",
"(",
")",
"dbeta",
"=",
"1.0e-3",
"self",
".",
"dphi_dbeta",
"=",
"scipy",
".",
"misc",
".",
"derivative",
"(",
"self",
".",
"_compute_empirical_phi",
",",
"self",
".",
"beta",
",",
"dx",
"=",
"dbeta",
",",
"n",
"=",
"1",
",",
"order",
"=",
"5",
")",
"dphi_dbeta_halfdx",
"=",
"scipy",
".",
"misc",
".",
"derivative",
"(",
"self",
".",
"_compute_empirical_phi",
",",
"self",
".",
"beta",
",",
"dx",
"=",
"dbeta",
"/",
"2",
",",
"n",
"=",
"1",
",",
"order",
"=",
"5",
")",
"assert",
"scipy",
".",
"allclose",
"(",
"self",
".",
"dphi_dbeta",
",",
"dphi_dbeta_halfdx",
",",
"atol",
"=",
"1e-5",
",",
"rtol",
"=",
"1e-4",
")",
",",
"(",
"\"The numerical derivative dphi_dbeta differs \"",
"\"considerably in value for step dbeta = {0} and a step \"",
"\"half that size, giving values of {1} and {2}.\"",
")",
".",
"format",
"(",
"dbeta",
",",
"self",
".",
"dphi_dbeta",
",",
"dphi_dbeta_halfdx",
")"
]
| Compute `phi`, `dphi_dbeta`, and `eta` from `g` and `frxy`. | [
"Compute",
"phi",
"dphi_dbeta",
"and",
"eta",
"from",
"g",
"and",
"frxy",
"."
]
| python | train |
IdentityPython/pysaml2 | src/saml2/attribute_converter.py | https://github.com/IdentityPython/pysaml2/blob/d3aa78eeb7d37c12688f783cb4db1c7263a14ad6/src/saml2/attribute_converter.py#L335-L358 | def fro(self, statement):
""" Get the attributes and the attribute values.
:param statement: The AttributeStatement.
:return: A dictionary containing attributes and values
"""
if not self.name_format:
return self.fail_safe_fro(statement)
result = {}
for attribute in statement.attribute:
if attribute.name_format and self.name_format and \
attribute.name_format != self.name_format:
continue
try:
(key, val) = self.ava_from(attribute)
except (KeyError, AttributeError):
pass
else:
result[key] = val
return result | [
"def",
"fro",
"(",
"self",
",",
"statement",
")",
":",
"if",
"not",
"self",
".",
"name_format",
":",
"return",
"self",
".",
"fail_safe_fro",
"(",
"statement",
")",
"result",
"=",
"{",
"}",
"for",
"attribute",
"in",
"statement",
".",
"attribute",
":",
"if",
"attribute",
".",
"name_format",
"and",
"self",
".",
"name_format",
"and",
"attribute",
".",
"name_format",
"!=",
"self",
".",
"name_format",
":",
"continue",
"try",
":",
"(",
"key",
",",
"val",
")",
"=",
"self",
".",
"ava_from",
"(",
"attribute",
")",
"except",
"(",
"KeyError",
",",
"AttributeError",
")",
":",
"pass",
"else",
":",
"result",
"[",
"key",
"]",
"=",
"val",
"return",
"result"
]
| Get the attributes and the attribute values.
:param statement: The AttributeStatement.
:return: A dictionary containing attributes and values | [
"Get",
"the",
"attributes",
"and",
"the",
"attribute",
"values",
"."
]
| python | train |
MAVENSDC/cdflib | cdflib/cdfwrite.py | https://github.com/MAVENSDC/cdflib/blob/d237c60e5db67db0f92d96054209c25c4042465c/cdflib/cdfwrite.py#L2304-L2312 | def _update_offset_value(self, f, offset, size, value):
'''
Writes "value" into location "offset" in file "f".
'''
f.seek(offset, 0)
if (size == 8):
f.write(struct.pack('>q', value))
else:
f.write(struct.pack('>i', value)) | [
"def",
"_update_offset_value",
"(",
"self",
",",
"f",
",",
"offset",
",",
"size",
",",
"value",
")",
":",
"f",
".",
"seek",
"(",
"offset",
",",
"0",
")",
"if",
"(",
"size",
"==",
"8",
")",
":",
"f",
".",
"write",
"(",
"struct",
".",
"pack",
"(",
"'>q'",
",",
"value",
")",
")",
"else",
":",
"f",
".",
"write",
"(",
"struct",
".",
"pack",
"(",
"'>i'",
",",
"value",
")",
")"
]
| Writes "value" into location "offset" in file "f". | [
"Writes",
"value",
"into",
"location",
"offset",
"in",
"file",
"f",
"."
]
| python | train |
belbio/bel | bel/lang/ast.py | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/ast.py#L358-L380 | def subcomponents(self, subcomponents):
"""Generate subcomponents of the BEL subject or object
These subcomponents are used for matching parts of a BEL
subject or Object in the Edgestore.
Args:
AST
subcomponents: Pass an empty list to start a new subcomponents request
Returns:
List[str]: subcomponents of BEL subject or object
"""
for arg in self.args:
if arg.__class__.__name__ == "Function":
subcomponents.append(arg.to_string())
if arg.function_type == "primary":
arg.subcomponents(subcomponents)
else:
subcomponents.append(arg.to_string())
return subcomponents | [
"def",
"subcomponents",
"(",
"self",
",",
"subcomponents",
")",
":",
"for",
"arg",
"in",
"self",
".",
"args",
":",
"if",
"arg",
".",
"__class__",
".",
"__name__",
"==",
"\"Function\"",
":",
"subcomponents",
".",
"append",
"(",
"arg",
".",
"to_string",
"(",
")",
")",
"if",
"arg",
".",
"function_type",
"==",
"\"primary\"",
":",
"arg",
".",
"subcomponents",
"(",
"subcomponents",
")",
"else",
":",
"subcomponents",
".",
"append",
"(",
"arg",
".",
"to_string",
"(",
")",
")",
"return",
"subcomponents"
]
| Generate subcomponents of the BEL subject or object
These subcomponents are used for matching parts of a BEL
subject or Object in the Edgestore.
Args:
AST
subcomponents: Pass an empty list to start a new subcomponents request
Returns:
List[str]: subcomponents of BEL subject or object | [
"Generate",
"subcomponents",
"of",
"the",
"BEL",
"subject",
"or",
"object"
]
| python | train |
tensorflow/datasets | tensorflow_datasets/core/tf_compat.py | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/tf_compat.py#L135-L152 | def _patch_for_tf1_13(tf):
"""Monkey patch tf 1.13 so tfds can use it."""
if not hasattr(tf.io.gfile, "GFile"):
tf.io.gfile.GFile = tf.gfile.GFile
if not hasattr(tf, "nest"):
tf.nest = tf.contrib.framework.nest
if not hasattr(tf.compat, "v2"):
tf.compat.v2 = types.ModuleType("tf.compat.v2")
tf.compat.v2.data = types.ModuleType("tf.compat.v2.data")
from tensorflow.python.data.ops import dataset_ops
tf.compat.v2.data.Dataset = dataset_ops.DatasetV2
if not hasattr(tf.compat.v2.data.Dataset, "output_shapes"):
from tensorflow.python.data.ops import dataset_ops
if hasattr(dataset_ops, "get_legacy_output_shapes"):
tf.compat.v2.data.Dataset.output_shapes = property(
dataset_ops.get_legacy_output_shapes)
tf.compat.v2.data.Dataset.output_types = property(
dataset_ops.get_legacy_output_types) | [
"def",
"_patch_for_tf1_13",
"(",
"tf",
")",
":",
"if",
"not",
"hasattr",
"(",
"tf",
".",
"io",
".",
"gfile",
",",
"\"GFile\"",
")",
":",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"=",
"tf",
".",
"gfile",
".",
"GFile",
"if",
"not",
"hasattr",
"(",
"tf",
",",
"\"nest\"",
")",
":",
"tf",
".",
"nest",
"=",
"tf",
".",
"contrib",
".",
"framework",
".",
"nest",
"if",
"not",
"hasattr",
"(",
"tf",
".",
"compat",
",",
"\"v2\"",
")",
":",
"tf",
".",
"compat",
".",
"v2",
"=",
"types",
".",
"ModuleType",
"(",
"\"tf.compat.v2\"",
")",
"tf",
".",
"compat",
".",
"v2",
".",
"data",
"=",
"types",
".",
"ModuleType",
"(",
"\"tf.compat.v2.data\"",
")",
"from",
"tensorflow",
".",
"python",
".",
"data",
".",
"ops",
"import",
"dataset_ops",
"tf",
".",
"compat",
".",
"v2",
".",
"data",
".",
"Dataset",
"=",
"dataset_ops",
".",
"DatasetV2",
"if",
"not",
"hasattr",
"(",
"tf",
".",
"compat",
".",
"v2",
".",
"data",
".",
"Dataset",
",",
"\"output_shapes\"",
")",
":",
"from",
"tensorflow",
".",
"python",
".",
"data",
".",
"ops",
"import",
"dataset_ops",
"if",
"hasattr",
"(",
"dataset_ops",
",",
"\"get_legacy_output_shapes\"",
")",
":",
"tf",
".",
"compat",
".",
"v2",
".",
"data",
".",
"Dataset",
".",
"output_shapes",
"=",
"property",
"(",
"dataset_ops",
".",
"get_legacy_output_shapes",
")",
"tf",
".",
"compat",
".",
"v2",
".",
"data",
".",
"Dataset",
".",
"output_types",
"=",
"property",
"(",
"dataset_ops",
".",
"get_legacy_output_types",
")"
]
| Monkey patch tf 1.13 so tfds can use it. | [
"Monkey",
"patch",
"tf",
"1",
".",
"13",
"so",
"tfds",
"can",
"use",
"it",
"."
]
| python | train |
ynop/audiomate | audiomate/corpus/io/mailabs.py | https://github.com/ynop/audiomate/blob/61727920b23a708293c3d526fa3000d4de9c6c21/audiomate/corpus/io/mailabs.py#L132-L151 | def load_speaker(corpus, path):
"""
Create a speaker instance for the given path.
"""
base_path, speaker_name = os.path.split(path)
base_path, gender_desc = os.path.split(base_path)
base_path, __ = os.path.split(base_path)
base_path, tag = os.path.split(base_path)
gender = issuers.Gender.UNKNOWN
if gender_desc == 'male':
gender = issuers.Gender.MALE
elif gender_desc == 'female':
gender = issuers.Gender.FEMALE
speaker = issuers.Speaker(speaker_name, gender=gender)
corpus.import_issuers(speaker)
return speaker | [
"def",
"load_speaker",
"(",
"corpus",
",",
"path",
")",
":",
"base_path",
",",
"speaker_name",
"=",
"os",
".",
"path",
".",
"split",
"(",
"path",
")",
"base_path",
",",
"gender_desc",
"=",
"os",
".",
"path",
".",
"split",
"(",
"base_path",
")",
"base_path",
",",
"__",
"=",
"os",
".",
"path",
".",
"split",
"(",
"base_path",
")",
"base_path",
",",
"tag",
"=",
"os",
".",
"path",
".",
"split",
"(",
"base_path",
")",
"gender",
"=",
"issuers",
".",
"Gender",
".",
"UNKNOWN",
"if",
"gender_desc",
"==",
"'male'",
":",
"gender",
"=",
"issuers",
".",
"Gender",
".",
"MALE",
"elif",
"gender_desc",
"==",
"'female'",
":",
"gender",
"=",
"issuers",
".",
"Gender",
".",
"FEMALE",
"speaker",
"=",
"issuers",
".",
"Speaker",
"(",
"speaker_name",
",",
"gender",
"=",
"gender",
")",
"corpus",
".",
"import_issuers",
"(",
"speaker",
")",
"return",
"speaker"
]
| Create a speaker instance for the given path. | [
"Create",
"a",
"speaker",
"instance",
"for",
"the",
"given",
"path",
"."
]
| python | train |
mvn23/pyotgw | pyotgw/pyotgw.py | https://github.com/mvn23/pyotgw/blob/7612378ef4332b250176505af33e7536d6c9da78/pyotgw/pyotgw.py#L458-L500 | async def set_gpio_mode(self, gpio_id, mode, timeout=OTGW_DEFAULT_TIMEOUT):
"""
Configure the functions of the two GPIO pins of the gateway.
The following functions are available:
0 No function, default for both ports on a freshly flashed chip
1 Ground - A permanently low output (0V). Could be used for a
power LED
2 Vcc - A permanently high output (5V). Can be used as a
short-proof power supply for some external circuitry used
by the other GPIO port
3 LED E - An additional LED if you want to present more than 4
LED functions
4 LED F - An additional LED if you want to present more than 5
LED functions
5 Home - Set thermostat to setback temperature when pulled low
6 Away - Set thermostat to setback temperature when pulled high
7 DS1820 (GPIO port B only) - Data line for a DS18S20 or
DS18B20 temperature sensor used to measure the outside
temperature. A 4k7 resistor should be connected between
GPIO port B and Vcc
Return the new mode for the specified gpio, or None on
failure.
This method is a coroutine
"""
if gpio_id in "AB" and mode in range(8):
if mode == 7 and gpio_id != "B":
return None
cmd = globals().get("OTGW_CMD_GPIO_{}".format(gpio_id))
status = {}
ret = await self._wait_for_cmd(cmd, mode, timeout)
if ret is None:
return
ret = int(ret)
var = globals().get("OTGW_GPIO_{}".format(gpio_id))
status[var] = ret
self._update_status(status)
asyncio.ensure_future(
self._poll_gpio(self._protocol.status.get(OTGW_GPIO_A)
or self._protocol.status.get(OTGW_GPIO_B)))
return ret | [
"async",
"def",
"set_gpio_mode",
"(",
"self",
",",
"gpio_id",
",",
"mode",
",",
"timeout",
"=",
"OTGW_DEFAULT_TIMEOUT",
")",
":",
"if",
"gpio_id",
"in",
"\"AB\"",
"and",
"mode",
"in",
"range",
"(",
"8",
")",
":",
"if",
"mode",
"==",
"7",
"and",
"gpio_id",
"!=",
"\"B\"",
":",
"return",
"None",
"cmd",
"=",
"globals",
"(",
")",
".",
"get",
"(",
"\"OTGW_CMD_GPIO_{}\"",
".",
"format",
"(",
"gpio_id",
")",
")",
"status",
"=",
"{",
"}",
"ret",
"=",
"await",
"self",
".",
"_wait_for_cmd",
"(",
"cmd",
",",
"mode",
",",
"timeout",
")",
"if",
"ret",
"is",
"None",
":",
"return",
"ret",
"=",
"int",
"(",
"ret",
")",
"var",
"=",
"globals",
"(",
")",
".",
"get",
"(",
"\"OTGW_GPIO_{}\"",
".",
"format",
"(",
"gpio_id",
")",
")",
"status",
"[",
"var",
"]",
"=",
"ret",
"self",
".",
"_update_status",
"(",
"status",
")",
"asyncio",
".",
"ensure_future",
"(",
"self",
".",
"_poll_gpio",
"(",
"self",
".",
"_protocol",
".",
"status",
".",
"get",
"(",
"OTGW_GPIO_A",
")",
"or",
"self",
".",
"_protocol",
".",
"status",
".",
"get",
"(",
"OTGW_GPIO_B",
")",
")",
")",
"return",
"ret"
]
| Configure the functions of the two GPIO pins of the gateway.
The following functions are available:
0 No function, default for both ports on a freshly flashed chip
1 Ground - A permanently low output (0V). Could be used for a
power LED
2 Vcc - A permanently high output (5V). Can be used as a
short-proof power supply for some external circuitry used
by the other GPIO port
3 LED E - An additional LED if you want to present more than 4
LED functions
4 LED F - An additional LED if you want to present more than 5
LED functions
5 Home - Set thermostat to setback temperature when pulled low
6 Away - Set thermostat to setback temperature when pulled high
7 DS1820 (GPIO port B only) - Data line for a DS18S20 or
DS18B20 temperature sensor used to measure the outside
temperature. A 4k7 resistor should be connected between
GPIO port B and Vcc
Return the new mode for the specified gpio, or None on
failure.
This method is a coroutine | [
"Configure",
"the",
"functions",
"of",
"the",
"two",
"GPIO",
"pins",
"of",
"the",
"gateway",
".",
"The",
"following",
"functions",
"are",
"available",
":"
]
| python | train |
RudolfCardinal/pythonlib | cardinal_pythonlib/psychiatry/drugs.py | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/psychiatry/drugs.py#L1324-L1334 | def drug_name_to_generic(drug_name: str,
unknown_to_default: bool = False,
default: str = None,
include_categories: bool = False) -> str:
"""
Converts a drug name to the name of its generic equivalent.
"""
drug = get_drug(drug_name, include_categories=include_categories)
if drug is not None:
return drug.generic_name
return default if unknown_to_default else drug_name | [
"def",
"drug_name_to_generic",
"(",
"drug_name",
":",
"str",
",",
"unknown_to_default",
":",
"bool",
"=",
"False",
",",
"default",
":",
"str",
"=",
"None",
",",
"include_categories",
":",
"bool",
"=",
"False",
")",
"->",
"str",
":",
"drug",
"=",
"get_drug",
"(",
"drug_name",
",",
"include_categories",
"=",
"include_categories",
")",
"if",
"drug",
"is",
"not",
"None",
":",
"return",
"drug",
".",
"generic_name",
"return",
"default",
"if",
"unknown_to_default",
"else",
"drug_name"
]
| Converts a drug name to the name of its generic equivalent. | [
"Converts",
"a",
"drug",
"name",
"to",
"the",
"name",
"of",
"its",
"generic",
"equivalent",
"."
]
| python | train |
SKA-ScienceDataProcessor/integration-prototype | sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py | https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py#L178-L191 | def delete_all_services(self):
"""Removes/stops a service.
Only the manager nodes can delete a service
"""
# Raise an exception if we are not a manager
if not self._manager:
raise RuntimeError('Services can only be deleted '
'on swarm manager nodes')
service_list = self.get_service_list()
for services in service_list:
# Remove all the services
self._api_client.remove_service(services) | [
"def",
"delete_all_services",
"(",
"self",
")",
":",
"# Raise an exception if we are not a manager",
"if",
"not",
"self",
".",
"_manager",
":",
"raise",
"RuntimeError",
"(",
"'Services can only be deleted '",
"'on swarm manager nodes'",
")",
"service_list",
"=",
"self",
".",
"get_service_list",
"(",
")",
"for",
"services",
"in",
"service_list",
":",
"# Remove all the services",
"self",
".",
"_api_client",
".",
"remove_service",
"(",
"services",
")"
]
| Removes/stops a service.
Only the manager nodes can delete a service | [
"Removes",
"/",
"stops",
"a",
"service",
"."
]
| python | train |
aiidateam/aiida-nwchem | aiida_nwchem/tools/dbexporters/tcod_plugins/nwcpymatgen.py | https://github.com/aiidateam/aiida-nwchem/blob/21034e7f8ea8249948065c28030f4b572a6ecf05/aiida_nwchem/tools/dbexporters/tcod_plugins/nwcpymatgen.py#L46-L55 | def get_atom_type_symbol(cls,calc,**kwargs):
"""
Returns a list of atom types. Each atom site MUST occur only
once in this list. List MUST be sorted.
"""
parameters = calc.out.output
dictionary = parameters.get_dict()
if 'basis_set' not in dictionary.keys():
return None
return sorted(dictionary['basis_set'].keys()) | [
"def",
"get_atom_type_symbol",
"(",
"cls",
",",
"calc",
",",
"*",
"*",
"kwargs",
")",
":",
"parameters",
"=",
"calc",
".",
"out",
".",
"output",
"dictionary",
"=",
"parameters",
".",
"get_dict",
"(",
")",
"if",
"'basis_set'",
"not",
"in",
"dictionary",
".",
"keys",
"(",
")",
":",
"return",
"None",
"return",
"sorted",
"(",
"dictionary",
"[",
"'basis_set'",
"]",
".",
"keys",
"(",
")",
")"
]
| Returns a list of atom types. Each atom site MUST occur only
once in this list. List MUST be sorted. | [
"Returns",
"a",
"list",
"of",
"atom",
"types",
".",
"Each",
"atom",
"site",
"MUST",
"occur",
"only",
"once",
"in",
"this",
"list",
".",
"List",
"MUST",
"be",
"sorted",
"."
]
| python | train |
pydata/xarray | xarray/core/indexing.py | https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/indexing.py#L716-L740 | def _combine_indexers(old_key, shape, new_key):
""" Combine two indexers.
Parameters
----------
old_key: ExplicitIndexer
The first indexer for the original array
shape: tuple of ints
Shape of the original array to be indexed by old_key
new_key:
The second indexer for indexing original[old_key]
"""
if not isinstance(old_key, VectorizedIndexer):
old_key = _outer_to_vectorized_indexer(old_key, shape)
if len(old_key.tuple) == 0:
return new_key
new_shape = np.broadcast(*old_key.tuple).shape
if isinstance(new_key, VectorizedIndexer):
new_key = _arrayize_vectorized_indexer(new_key, new_shape)
else:
new_key = _outer_to_vectorized_indexer(new_key, new_shape)
return VectorizedIndexer(tuple(o[new_key.tuple] for o in
np.broadcast_arrays(*old_key.tuple))) | [
"def",
"_combine_indexers",
"(",
"old_key",
",",
"shape",
",",
"new_key",
")",
":",
"if",
"not",
"isinstance",
"(",
"old_key",
",",
"VectorizedIndexer",
")",
":",
"old_key",
"=",
"_outer_to_vectorized_indexer",
"(",
"old_key",
",",
"shape",
")",
"if",
"len",
"(",
"old_key",
".",
"tuple",
")",
"==",
"0",
":",
"return",
"new_key",
"new_shape",
"=",
"np",
".",
"broadcast",
"(",
"*",
"old_key",
".",
"tuple",
")",
".",
"shape",
"if",
"isinstance",
"(",
"new_key",
",",
"VectorizedIndexer",
")",
":",
"new_key",
"=",
"_arrayize_vectorized_indexer",
"(",
"new_key",
",",
"new_shape",
")",
"else",
":",
"new_key",
"=",
"_outer_to_vectorized_indexer",
"(",
"new_key",
",",
"new_shape",
")",
"return",
"VectorizedIndexer",
"(",
"tuple",
"(",
"o",
"[",
"new_key",
".",
"tuple",
"]",
"for",
"o",
"in",
"np",
".",
"broadcast_arrays",
"(",
"*",
"old_key",
".",
"tuple",
")",
")",
")"
]
| Combine two indexers.
Parameters
----------
old_key: ExplicitIndexer
The first indexer for the original array
shape: tuple of ints
Shape of the original array to be indexed by old_key
new_key:
The second indexer for indexing original[old_key] | [
"Combine",
"two",
"indexers",
"."
]
| python | train |
saltstack/salt | salt/modules/state.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/state.py#L411-L422 | def _check_queue(queue, kwargs):
'''
Utility function to queue the state run if requested
and to check for conflicts in currently running states
'''
if queue:
_wait(kwargs.get('__pub_jid'))
else:
conflict = running(concurrent=kwargs.get('concurrent', False))
if conflict:
__context__['retcode'] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR
return conflict | [
"def",
"_check_queue",
"(",
"queue",
",",
"kwargs",
")",
":",
"if",
"queue",
":",
"_wait",
"(",
"kwargs",
".",
"get",
"(",
"'__pub_jid'",
")",
")",
"else",
":",
"conflict",
"=",
"running",
"(",
"concurrent",
"=",
"kwargs",
".",
"get",
"(",
"'concurrent'",
",",
"False",
")",
")",
"if",
"conflict",
":",
"__context__",
"[",
"'retcode'",
"]",
"=",
"salt",
".",
"defaults",
".",
"exitcodes",
".",
"EX_STATE_COMPILER_ERROR",
"return",
"conflict"
]
| Utility function to queue the state run if requested
and to check for conflicts in currently running states | [
"Utility",
"function",
"to",
"queue",
"the",
"state",
"run",
"if",
"requested",
"and",
"to",
"check",
"for",
"conflicts",
"in",
"currently",
"running",
"states"
]
| python | train |
pickleshare/pickleshare | pickleshare.py | https://github.com/pickleshare/pickleshare/blob/f7950a9a359774c0190abde8da729b1810bdf3f4/pickleshare.py#L137-L154 | def hget(self, hashroot, key, default = _sentinel, fast_only = True):
""" hashed get """
hroot = self.root / hashroot
hfile = hroot / gethashfile(key)
d = self.get(hfile, _sentinel )
#print "got dict",d,"from",hfile
if d is _sentinel:
if fast_only:
if default is _sentinel:
raise KeyError(key)
return default
# slow mode ok, works even after hcompress()
d = self.hdict(hashroot)
return d.get(key, default) | [
"def",
"hget",
"(",
"self",
",",
"hashroot",
",",
"key",
",",
"default",
"=",
"_sentinel",
",",
"fast_only",
"=",
"True",
")",
":",
"hroot",
"=",
"self",
".",
"root",
"/",
"hashroot",
"hfile",
"=",
"hroot",
"/",
"gethashfile",
"(",
"key",
")",
"d",
"=",
"self",
".",
"get",
"(",
"hfile",
",",
"_sentinel",
")",
"#print \"got dict\",d,\"from\",hfile",
"if",
"d",
"is",
"_sentinel",
":",
"if",
"fast_only",
":",
"if",
"default",
"is",
"_sentinel",
":",
"raise",
"KeyError",
"(",
"key",
")",
"return",
"default",
"# slow mode ok, works even after hcompress()",
"d",
"=",
"self",
".",
"hdict",
"(",
"hashroot",
")",
"return",
"d",
".",
"get",
"(",
"key",
",",
"default",
")"
]
| hashed get | [
"hashed",
"get"
]
| python | train |
arne-cl/discoursegraphs | src/discoursegraphs/discoursegraph.py | https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/discoursegraph.py#L1257-L1295 | def select_edges_by(docgraph, layer=None, edge_type=None, data=False):
"""
get all edges with the given edge type and layer.
Parameters
----------
docgraph : DiscourseDocumentGraph
document graph from which the nodes will be extracted
layer : str
name of the layer
edge_type : str
Type of the edges to be extracted (Edge types are defined in the
Enum ``EdgeTypes``).
data : bool
If True, results will include edge attributes.
Returns
-------
edges : generator of str
a container/list of edges (represented as (source node ID, target
node ID) tuples). If data is True, edges are represented as
(source node ID, target node ID, edge attribute dict) tuples.
"""
edge_type_eval = "edge_attribs['edge_type'] == '{}'".format(edge_type)
layer_eval = "'{}' in edge_attribs['layers']".format(layer)
if layer is not None:
if edge_type is not None:
return select_edges(docgraph, data=data,
conditions=[edge_type_eval, layer_eval])
else: # filter by layer, but not by edge type
return select_edges(docgraph, conditions=[layer_eval], data=data)
else: # don't filter layers
if edge_type is not None: # filter by edge type, but not by layer
return select_edges(docgraph, data=data,
conditions=[edge_type_eval])
else: # neither layer, nor edge type is filtered
return docgraph.edges_iter(data=data) | [
"def",
"select_edges_by",
"(",
"docgraph",
",",
"layer",
"=",
"None",
",",
"edge_type",
"=",
"None",
",",
"data",
"=",
"False",
")",
":",
"edge_type_eval",
"=",
"\"edge_attribs['edge_type'] == '{}'\"",
".",
"format",
"(",
"edge_type",
")",
"layer_eval",
"=",
"\"'{}' in edge_attribs['layers']\"",
".",
"format",
"(",
"layer",
")",
"if",
"layer",
"is",
"not",
"None",
":",
"if",
"edge_type",
"is",
"not",
"None",
":",
"return",
"select_edges",
"(",
"docgraph",
",",
"data",
"=",
"data",
",",
"conditions",
"=",
"[",
"edge_type_eval",
",",
"layer_eval",
"]",
")",
"else",
":",
"# filter by layer, but not by edge type",
"return",
"select_edges",
"(",
"docgraph",
",",
"conditions",
"=",
"[",
"layer_eval",
"]",
",",
"data",
"=",
"data",
")",
"else",
":",
"# don't filter layers",
"if",
"edge_type",
"is",
"not",
"None",
":",
"# filter by edge type, but not by layer",
"return",
"select_edges",
"(",
"docgraph",
",",
"data",
"=",
"data",
",",
"conditions",
"=",
"[",
"edge_type_eval",
"]",
")",
"else",
":",
"# neither layer, nor edge type is filtered",
"return",
"docgraph",
".",
"edges_iter",
"(",
"data",
"=",
"data",
")"
]
| get all edges with the given edge type and layer.
Parameters
----------
docgraph : DiscourseDocumentGraph
document graph from which the nodes will be extracted
layer : str
name of the layer
edge_type : str
Type of the edges to be extracted (Edge types are defined in the
Enum ``EdgeTypes``).
data : bool
If True, results will include edge attributes.
Returns
-------
edges : generator of str
a container/list of edges (represented as (source node ID, target
node ID) tuples). If data is True, edges are represented as
(source node ID, target node ID, edge attribute dict) tuples. | [
"get",
"all",
"edges",
"with",
"the",
"given",
"edge",
"type",
"and",
"layer",
"."
]
| python | train |
vtkiorg/vtki | vtki/common.py | https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/common.py#L403-L413 | def rotate_z(self, angle):
"""
Rotates mesh about the z-axis.
Parameters
----------
angle : float
Angle in degrees to rotate about the z-axis.
"""
axis_rotation(self.points, angle, inplace=True, axis='z') | [
"def",
"rotate_z",
"(",
"self",
",",
"angle",
")",
":",
"axis_rotation",
"(",
"self",
".",
"points",
",",
"angle",
",",
"inplace",
"=",
"True",
",",
"axis",
"=",
"'z'",
")"
]
| Rotates mesh about the z-axis.
Parameters
----------
angle : float
Angle in degrees to rotate about the z-axis. | [
"Rotates",
"mesh",
"about",
"the",
"z",
"-",
"axis",
"."
]
| python | train |
spyder-ide/spyder | spyder/widgets/mixins.py | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/widgets/mixins.py#L375-L392 | def get_position(self, subject):
"""Get offset in character for the given subject from the start of
text edit area"""
cursor = self.textCursor()
if subject == 'cursor':
pass
elif subject == 'sol':
cursor.movePosition(QTextCursor.StartOfBlock)
elif subject == 'eol':
cursor.movePosition(QTextCursor.EndOfBlock)
elif subject == 'eof':
cursor.movePosition(QTextCursor.End)
elif subject == 'sof':
cursor.movePosition(QTextCursor.Start)
else:
# Assuming that input argument was already a position
return subject
return cursor.position() | [
"def",
"get_position",
"(",
"self",
",",
"subject",
")",
":",
"cursor",
"=",
"self",
".",
"textCursor",
"(",
")",
"if",
"subject",
"==",
"'cursor'",
":",
"pass",
"elif",
"subject",
"==",
"'sol'",
":",
"cursor",
".",
"movePosition",
"(",
"QTextCursor",
".",
"StartOfBlock",
")",
"elif",
"subject",
"==",
"'eol'",
":",
"cursor",
".",
"movePosition",
"(",
"QTextCursor",
".",
"EndOfBlock",
")",
"elif",
"subject",
"==",
"'eof'",
":",
"cursor",
".",
"movePosition",
"(",
"QTextCursor",
".",
"End",
")",
"elif",
"subject",
"==",
"'sof'",
":",
"cursor",
".",
"movePosition",
"(",
"QTextCursor",
".",
"Start",
")",
"else",
":",
"# Assuming that input argument was already a position\r",
"return",
"subject",
"return",
"cursor",
".",
"position",
"(",
")"
]
| Get offset in character for the given subject from the start of
text edit area | [
"Get",
"offset",
"in",
"character",
"for",
"the",
"given",
"subject",
"from",
"the",
"start",
"of",
"text",
"edit",
"area"
]
| python | train |
Pytwitcher/pytwitcherapi | src/pytwitcherapi/models.py | https://github.com/Pytwitcher/pytwitcherapi/blob/d53ac5ad5ca113ecb7da542e8cdcbbf8c762b336/src/pytwitcherapi/models.py#L375-L390 | def wrap_json(cls, json):
"""Create a User instance for the given json
:param json: the dict with the information of the user
:type json: :class:`dict` | None
:returns: the new user instance
:rtype: :class:`User`
:raises: None
"""
u = User(usertype=json['type'],
name=json['name'],
logo=json['logo'],
twitchid=json['_id'],
displayname=json['display_name'],
bio=json['bio'])
return u | [
"def",
"wrap_json",
"(",
"cls",
",",
"json",
")",
":",
"u",
"=",
"User",
"(",
"usertype",
"=",
"json",
"[",
"'type'",
"]",
",",
"name",
"=",
"json",
"[",
"'name'",
"]",
",",
"logo",
"=",
"json",
"[",
"'logo'",
"]",
",",
"twitchid",
"=",
"json",
"[",
"'_id'",
"]",
",",
"displayname",
"=",
"json",
"[",
"'display_name'",
"]",
",",
"bio",
"=",
"json",
"[",
"'bio'",
"]",
")",
"return",
"u"
]
| Create a User instance for the given json
:param json: the dict with the information of the user
:type json: :class:`dict` | None
:returns: the new user instance
:rtype: :class:`User`
:raises: None | [
"Create",
"a",
"User",
"instance",
"for",
"the",
"given",
"json"
]
| python | train |
boriel/zxbasic | zxbparser.py | https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/zxbparser.py#L2226-L2229 | def p_expr_mod_expr(p):
""" expr : expr MOD expr
"""
p[0] = make_binary(p.lineno(2), 'MOD', p[1], p[3], lambda x, y: x % y) | [
"def",
"p_expr_mod_expr",
"(",
"p",
")",
":",
"p",
"[",
"0",
"]",
"=",
"make_binary",
"(",
"p",
".",
"lineno",
"(",
"2",
")",
",",
"'MOD'",
",",
"p",
"[",
"1",
"]",
",",
"p",
"[",
"3",
"]",
",",
"lambda",
"x",
",",
"y",
":",
"x",
"%",
"y",
")"
]
| expr : expr MOD expr | [
"expr",
":",
"expr",
"MOD",
"expr"
]
| python | train |
CitrineInformatics/python-citrination-client | citrination_client/models/client.py | https://github.com/CitrineInformatics/python-citrination-client/blob/409984fc65ce101a620f069263f155303492465c/citrination_client/models/client.py#L117-L126 | def _data_analysis(self, data_view_id):
"""
Data analysis endpoint.
:param data_view_id: The model identifier (id number for data views)
:type data_view_id: str
:return: dictionary containing information about the data, e.g. dCorr and tsne
"""
failure_message = "Error while retrieving data analysis for data view {}".format(data_view_id)
return self._get_success_json(self._get(routes.data_analysis(data_view_id), failure_message=failure_message)) | [
"def",
"_data_analysis",
"(",
"self",
",",
"data_view_id",
")",
":",
"failure_message",
"=",
"\"Error while retrieving data analysis for data view {}\"",
".",
"format",
"(",
"data_view_id",
")",
"return",
"self",
".",
"_get_success_json",
"(",
"self",
".",
"_get",
"(",
"routes",
".",
"data_analysis",
"(",
"data_view_id",
")",
",",
"failure_message",
"=",
"failure_message",
")",
")"
]
| Data analysis endpoint.
:param data_view_id: The model identifier (id number for data views)
:type data_view_id: str
:return: dictionary containing information about the data, e.g. dCorr and tsne | [
"Data",
"analysis",
"endpoint",
"."
]
| python | valid |
swimlane/swimlane-python | swimlane/core/fields/base/field.py | https://github.com/swimlane/swimlane-python/blob/588fc503a76799bcdb5aecdf2f64a6ee05e3922d/swimlane/core/fields/base/field.py#L57-L68 | def get_report(self, value):
"""Return provided field Python value formatted for use in report filter"""
if self.multiselect:
value = value or []
children = []
for child in value:
children.append(self.cast_to_report(child))
return children
return self.cast_to_report(value) | [
"def",
"get_report",
"(",
"self",
",",
"value",
")",
":",
"if",
"self",
".",
"multiselect",
":",
"value",
"=",
"value",
"or",
"[",
"]",
"children",
"=",
"[",
"]",
"for",
"child",
"in",
"value",
":",
"children",
".",
"append",
"(",
"self",
".",
"cast_to_report",
"(",
"child",
")",
")",
"return",
"children",
"return",
"self",
".",
"cast_to_report",
"(",
"value",
")"
]
| Return provided field Python value formatted for use in report filter | [
"Return",
"provided",
"field",
"Python",
"value",
"formatted",
"for",
"use",
"in",
"report",
"filter"
]
| python | train |
pywbem/pywbem | pywbem/mof_compiler.py | https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem/mof_compiler.py#L1128-L1136 | def p_parameter_3(p):
"""parameter_3 : objectRef parameterName
| objectRef parameterName array
"""
args = {}
if len(p) == 4:
args['is_array'] = True
args['array_size'] = p[3]
p[0] = CIMParameter(p[2], 'reference', reference_class=p[1], **args) | [
"def",
"p_parameter_3",
"(",
"p",
")",
":",
"args",
"=",
"{",
"}",
"if",
"len",
"(",
"p",
")",
"==",
"4",
":",
"args",
"[",
"'is_array'",
"]",
"=",
"True",
"args",
"[",
"'array_size'",
"]",
"=",
"p",
"[",
"3",
"]",
"p",
"[",
"0",
"]",
"=",
"CIMParameter",
"(",
"p",
"[",
"2",
"]",
",",
"'reference'",
",",
"reference_class",
"=",
"p",
"[",
"1",
"]",
",",
"*",
"*",
"args",
")"
]
| parameter_3 : objectRef parameterName
| objectRef parameterName array | [
"parameter_3",
":",
"objectRef",
"parameterName",
"|",
"objectRef",
"parameterName",
"array"
]
| python | train |
tkf/rash | rash/record.py | https://github.com/tkf/rash/blob/585da418ec37dd138f1a4277718b6f507e9536a2/rash/record.py#L86-L99 | def generate_session_id(data):
"""
Generate session ID based on HOST, TTY, PID [#]_ and start time.
:type data: dict
:rtype: str
.. [#] PID of the shell, i.e., PPID of this Python process.
"""
host = data['environ']['HOST']
tty = data['environ'].get('TTY') or 'NO_TTY'
return ':'.join(map(str, [
host, tty, os.getppid(), data['start']])) | [
"def",
"generate_session_id",
"(",
"data",
")",
":",
"host",
"=",
"data",
"[",
"'environ'",
"]",
"[",
"'HOST'",
"]",
"tty",
"=",
"data",
"[",
"'environ'",
"]",
".",
"get",
"(",
"'TTY'",
")",
"or",
"'NO_TTY'",
"return",
"':'",
".",
"join",
"(",
"map",
"(",
"str",
",",
"[",
"host",
",",
"tty",
",",
"os",
".",
"getppid",
"(",
")",
",",
"data",
"[",
"'start'",
"]",
"]",
")",
")"
]
| Generate session ID based on HOST, TTY, PID [#]_ and start time.
:type data: dict
:rtype: str
.. [#] PID of the shell, i.e., PPID of this Python process. | [
"Generate",
"session",
"ID",
"based",
"on",
"HOST",
"TTY",
"PID",
"[",
"#",
"]",
"_",
"and",
"start",
"time",
"."
]
| python | train |
gabfl/dbschema | src/schema_change.py | https://github.com/gabfl/dbschema/blob/37722e6654e9f0374fac5518ebdca22f4c39f92f/src/schema_change.py#L221-L242 | def get_migrations_applied(engine, connection):
""" Get list of migrations already applied """
try:
# Get cursor based on engine
if engine == 'postgresql':
cursor = connection.cursor(
cursor_factory=psycopg2.extras.RealDictCursor)
else:
cursor = connection.cursor()
sql = "SELECT id, name, date FROM migrations_applied"
cursor.execute(sql)
rows = cursor.fetchall()
# print (rows);
return rows
except psycopg2.ProgrammingError:
raise RuntimeError(
'The table `migrations_applied` is missing. Please refer to the project documentation at https://github.com/gabfl/dbschema.')
except pymysql.err.ProgrammingError:
raise RuntimeError(
'The table `migrations_applied` is missing. Please refer to the project documentation at https://github.com/gabfl/dbschema.') | [
"def",
"get_migrations_applied",
"(",
"engine",
",",
"connection",
")",
":",
"try",
":",
"# Get cursor based on engine",
"if",
"engine",
"==",
"'postgresql'",
":",
"cursor",
"=",
"connection",
".",
"cursor",
"(",
"cursor_factory",
"=",
"psycopg2",
".",
"extras",
".",
"RealDictCursor",
")",
"else",
":",
"cursor",
"=",
"connection",
".",
"cursor",
"(",
")",
"sql",
"=",
"\"SELECT id, name, date FROM migrations_applied\"",
"cursor",
".",
"execute",
"(",
"sql",
")",
"rows",
"=",
"cursor",
".",
"fetchall",
"(",
")",
"# print (rows);",
"return",
"rows",
"except",
"psycopg2",
".",
"ProgrammingError",
":",
"raise",
"RuntimeError",
"(",
"'The table `migrations_applied` is missing. Please refer to the project documentation at https://github.com/gabfl/dbschema.'",
")",
"except",
"pymysql",
".",
"err",
".",
"ProgrammingError",
":",
"raise",
"RuntimeError",
"(",
"'The table `migrations_applied` is missing. Please refer to the project documentation at https://github.com/gabfl/dbschema.'",
")"
]
| Get list of migrations already applied | [
"Get",
"list",
"of",
"migrations",
"already",
"applied"
]
| python | train |
quantopian/zipline | zipline/utils/argcheck.py | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/argcheck.py#L131-L140 | def _expect_extra(expected, present, exc_unexpected, exc_missing, exc_args):
"""
Checks for the presence of an extra to the argument list. Raises expections
if this is unexpected or if it is missing and expected.
"""
if present:
if not expected:
raise exc_unexpected(*exc_args)
elif expected and expected is not Argument.ignore:
raise exc_missing(*exc_args) | [
"def",
"_expect_extra",
"(",
"expected",
",",
"present",
",",
"exc_unexpected",
",",
"exc_missing",
",",
"exc_args",
")",
":",
"if",
"present",
":",
"if",
"not",
"expected",
":",
"raise",
"exc_unexpected",
"(",
"*",
"exc_args",
")",
"elif",
"expected",
"and",
"expected",
"is",
"not",
"Argument",
".",
"ignore",
":",
"raise",
"exc_missing",
"(",
"*",
"exc_args",
")"
]
| Checks for the presence of an extra to the argument list. Raises expections
if this is unexpected or if it is missing and expected. | [
"Checks",
"for",
"the",
"presence",
"of",
"an",
"extra",
"to",
"the",
"argument",
"list",
".",
"Raises",
"expections",
"if",
"this",
"is",
"unexpected",
"or",
"if",
"it",
"is",
"missing",
"and",
"expected",
"."
]
| python | train |
sernst/cauldron | cauldron/runner/markdown_file.py | https://github.com/sernst/cauldron/blob/4086aec9c038c402ea212c79fe8bd0d27104f9cf/cauldron/runner/markdown_file.py#L6-L32 | def run(
project: 'projects.Project',
step: 'projects.ProjectStep'
) -> dict:
"""
Runs the markdown file and renders the contents to the notebook display
:param project:
:param step:
:return:
A run response dictionary containing
"""
with open(step.source_path, 'r') as f:
code = f.read()
try:
cauldron.display.markdown(code, **project.shared.fetch(None))
return {'success': True}
except Exception as err:
return dict(
success=False,
html_message=templating.render_template(
'markdown-error.html',
error=err
)
) | [
"def",
"run",
"(",
"project",
":",
"'projects.Project'",
",",
"step",
":",
"'projects.ProjectStep'",
")",
"->",
"dict",
":",
"with",
"open",
"(",
"step",
".",
"source_path",
",",
"'r'",
")",
"as",
"f",
":",
"code",
"=",
"f",
".",
"read",
"(",
")",
"try",
":",
"cauldron",
".",
"display",
".",
"markdown",
"(",
"code",
",",
"*",
"*",
"project",
".",
"shared",
".",
"fetch",
"(",
"None",
")",
")",
"return",
"{",
"'success'",
":",
"True",
"}",
"except",
"Exception",
"as",
"err",
":",
"return",
"dict",
"(",
"success",
"=",
"False",
",",
"html_message",
"=",
"templating",
".",
"render_template",
"(",
"'markdown-error.html'",
",",
"error",
"=",
"err",
")",
")"
]
| Runs the markdown file and renders the contents to the notebook display
:param project:
:param step:
:return:
A run response dictionary containing | [
"Runs",
"the",
"markdown",
"file",
"and",
"renders",
"the",
"contents",
"to",
"the",
"notebook",
"display"
]
| python | train |
SoftwareDefinedBuildings/XBOS | apps/Data_quality_analysis/Clean_Data.py | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/Data_quality_analysis/Clean_Data.py#L907-L946 | def count_if(self, data, condition, val, output="number"):
""" Count the number of values that match the condition.
Parameters
----------
data : pd.DataFrame()
Input dataframe.
condition : str
Condition to match.
val : float
Value to compare against.
output : str
Sting indicating the output of function (number or percent)
Returns
-------
int/float
Count of values that match the condition (int or float)
"""
if condition == "=":
count = self._find_equal_to_values(data,val).sum()
elif condition == ">":
count = self._find_greater_than_values(data,val).sum()
elif condition == "<":
count = self._find_less_than_values(data,val).sum()
elif condition == ">=":
count = self._find_greater_than_or_equal_to_values(data,val).sum()
elif condition == "<=":
count = self._find_less_than_or_equal_to_values(data,val).sum()
elif condition == "!=":
count = self._find_different_from_values(data,val).sum()
if output == "number":
return count
elif output == "percent":
return count/data.shape[0]*1.0*100
return count | [
"def",
"count_if",
"(",
"self",
",",
"data",
",",
"condition",
",",
"val",
",",
"output",
"=",
"\"number\"",
")",
":",
"if",
"condition",
"==",
"\"=\"",
":",
"count",
"=",
"self",
".",
"_find_equal_to_values",
"(",
"data",
",",
"val",
")",
".",
"sum",
"(",
")",
"elif",
"condition",
"==",
"\">\"",
":",
"count",
"=",
"self",
".",
"_find_greater_than_values",
"(",
"data",
",",
"val",
")",
".",
"sum",
"(",
")",
"elif",
"condition",
"==",
"\"<\"",
":",
"count",
"=",
"self",
".",
"_find_less_than_values",
"(",
"data",
",",
"val",
")",
".",
"sum",
"(",
")",
"elif",
"condition",
"==",
"\">=\"",
":",
"count",
"=",
"self",
".",
"_find_greater_than_or_equal_to_values",
"(",
"data",
",",
"val",
")",
".",
"sum",
"(",
")",
"elif",
"condition",
"==",
"\"<=\"",
":",
"count",
"=",
"self",
".",
"_find_less_than_or_equal_to_values",
"(",
"data",
",",
"val",
")",
".",
"sum",
"(",
")",
"elif",
"condition",
"==",
"\"!=\"",
":",
"count",
"=",
"self",
".",
"_find_different_from_values",
"(",
"data",
",",
"val",
")",
".",
"sum",
"(",
")",
"if",
"output",
"==",
"\"number\"",
":",
"return",
"count",
"elif",
"output",
"==",
"\"percent\"",
":",
"return",
"count",
"/",
"data",
".",
"shape",
"[",
"0",
"]",
"*",
"1.0",
"*",
"100",
"return",
"count"
]
| Count the number of values that match the condition.
Parameters
----------
data : pd.DataFrame()
Input dataframe.
condition : str
Condition to match.
val : float
Value to compare against.
output : str
Sting indicating the output of function (number or percent)
Returns
-------
int/float
Count of values that match the condition (int or float) | [
"Count",
"the",
"number",
"of",
"values",
"that",
"match",
"the",
"condition",
".",
"Parameters",
"----------",
"data",
":",
"pd",
".",
"DataFrame",
"()",
"Input",
"dataframe",
".",
"condition",
":",
"str",
"Condition",
"to",
"match",
".",
"val",
":",
"float",
"Value",
"to",
"compare",
"against",
".",
"output",
":",
"str",
"Sting",
"indicating",
"the",
"output",
"of",
"function",
"(",
"number",
"or",
"percent",
")"
]
| python | train |
flowersteam/explauto | explauto/sensorimotor_model/inverse/cma.py | https://github.com/flowersteam/explauto/blob/cf0f81ecb9f6412f7276a95bd27359000e1e26b6/explauto/sensorimotor_model/inverse/cma.py#L8411-L8423 | def fun_as_arg(self, x, *args):
"""``fun_as_arg(x, fun, *more_args)`` calls ``fun(x, *more_args)``.
Use case::
fmin(cma.fun_as_arg, args=(fun,), gradf=grad_numerical)
calls fun_as_args(x, args) and grad_numerical(x, fun, args=args)
"""
fun = args[0]
more_args = args[1:] if len(args) > 1 else ()
return fun(x, *more_args) | [
"def",
"fun_as_arg",
"(",
"self",
",",
"x",
",",
"*",
"args",
")",
":",
"fun",
"=",
"args",
"[",
"0",
"]",
"more_args",
"=",
"args",
"[",
"1",
":",
"]",
"if",
"len",
"(",
"args",
")",
">",
"1",
"else",
"(",
")",
"return",
"fun",
"(",
"x",
",",
"*",
"more_args",
")"
]
| ``fun_as_arg(x, fun, *more_args)`` calls ``fun(x, *more_args)``.
Use case::
fmin(cma.fun_as_arg, args=(fun,), gradf=grad_numerical)
calls fun_as_args(x, args) and grad_numerical(x, fun, args=args) | [
"fun_as_arg",
"(",
"x",
"fun",
"*",
"more_args",
")",
"calls",
"fun",
"(",
"x",
"*",
"more_args",
")",
"."
]
| python | train |
CalebBell/thermo | thermo/chemical.py | https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/chemical.py#L1314-L1330 | def legal_status(self):
r'''Dictionary of legal status indicators for the chemical.
Examples
--------
>>> pprint(Chemical('benzene').legal_status)
{'DSL': 'LISTED',
'EINECS': 'LISTED',
'NLP': 'UNLISTED',
'SPIN': 'LISTED',
'TSCA': 'LISTED'}
'''
if self.__legal_status:
return self.__legal_status
else:
self.__legal_status = legal_status(self.CAS, Method='COMBINED')
return self.__legal_status | [
"def",
"legal_status",
"(",
"self",
")",
":",
"if",
"self",
".",
"__legal_status",
":",
"return",
"self",
".",
"__legal_status",
"else",
":",
"self",
".",
"__legal_status",
"=",
"legal_status",
"(",
"self",
".",
"CAS",
",",
"Method",
"=",
"'COMBINED'",
")",
"return",
"self",
".",
"__legal_status"
]
| r'''Dictionary of legal status indicators for the chemical.
Examples
--------
>>> pprint(Chemical('benzene').legal_status)
{'DSL': 'LISTED',
'EINECS': 'LISTED',
'NLP': 'UNLISTED',
'SPIN': 'LISTED',
'TSCA': 'LISTED'} | [
"r",
"Dictionary",
"of",
"legal",
"status",
"indicators",
"for",
"the",
"chemical",
"."
]
| python | valid |
senaite/senaite.core | bika/lims/api/__init__.py | https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/api/__init__.py#L968-L981 | def get_group(group_or_groupname):
"""Return Plone Group
:param group_or_groupname: Plone group or the name of the group
:type groupname: GroupData/str
:returns: Plone GroupData
"""
if not group_or_groupname:
return None
if hasattr(group_or_groupname, "_getGroup"):
return group_or_groupname
gtool = get_tool("portal_groups")
return gtool.getGroupById(group_or_groupname) | [
"def",
"get_group",
"(",
"group_or_groupname",
")",
":",
"if",
"not",
"group_or_groupname",
":",
"return",
"None",
"if",
"hasattr",
"(",
"group_or_groupname",
",",
"\"_getGroup\"",
")",
":",
"return",
"group_or_groupname",
"gtool",
"=",
"get_tool",
"(",
"\"portal_groups\"",
")",
"return",
"gtool",
".",
"getGroupById",
"(",
"group_or_groupname",
")"
]
| Return Plone Group
:param group_or_groupname: Plone group or the name of the group
:type groupname: GroupData/str
:returns: Plone GroupData | [
"Return",
"Plone",
"Group"
]
| python | train |
cmcqueen/simplerandom | python/python2/simplerandom/random/_random_py.py | https://github.com/cmcqueen/simplerandom/blob/3f19ffdfeaa8256986adf7173f08c1c719164d01/python/python2/simplerandom/random/_random_py.py#L65-L68 | def setbpf(self, bpf):
"""Set number of bits per float output"""
self._bpf = min(bpf, self.BPF)
self._rng_n = int((self._bpf + self.RNG_RANGE_BITS - 1) / self.RNG_RANGE_BITS) | [
"def",
"setbpf",
"(",
"self",
",",
"bpf",
")",
":",
"self",
".",
"_bpf",
"=",
"min",
"(",
"bpf",
",",
"self",
".",
"BPF",
")",
"self",
".",
"_rng_n",
"=",
"int",
"(",
"(",
"self",
".",
"_bpf",
"+",
"self",
".",
"RNG_RANGE_BITS",
"-",
"1",
")",
"/",
"self",
".",
"RNG_RANGE_BITS",
")"
]
| Set number of bits per float output | [
"Set",
"number",
"of",
"bits",
"per",
"float",
"output"
]
| python | train |
StanfordBioinformatics/loom | client/loomengine/playbooks/gce.py | https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/client/loomengine/playbooks/gce.py#L198-L254 | def get_config(self):
"""
Reads the settings from the gce.ini file.
Populates a SafeConfigParser object with defaults and
attempts to read an .ini-style configuration from the filename
specified in GCE_INI_PATH. If the environment variable is
not present, the filename defaults to gce.ini in the current
working directory.
"""
gce_ini_default_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "gce.ini")
gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)
# Create a ConfigParser.
# This provides empty defaults to each key, so that environment
# variable configuration (as opposed to INI configuration) is able
# to work.
config = ConfigParser.SafeConfigParser(defaults={
'gce_service_account_email_address': '',
'gce_service_account_pem_file_path': '',
'gce_project_id': '',
'libcloud_secrets': '',
'inventory_ip_type': '',
'cache_path': '~/.ansible/tmp',
'cache_max_age': '300'
})
if 'gce' not in config.sections():
config.add_section('gce')
if 'inventory' not in config.sections():
config.add_section('inventory')
if 'cache' not in config.sections():
config.add_section('cache')
config.read(gce_ini_path)
#########
# Section added for processing ini settings
#########
# Set the instance_states filter based on config file options
self.instance_states = []
if config.has_option('gce', 'instance_states'):
states = config.get('gce', 'instance_states')
# Ignore if instance_states is an empty string.
if states:
self.instance_states = states.split(',')
# Caching
cache_path = config.get('cache', 'cache_path')
cache_max_age = config.getint('cache', 'cache_max_age')
# TOOD(supertom): support project-specific caches
cache_name = 'ansible-gce.cache'
self.cache = CloudInventoryCache(cache_path=cache_path,
cache_max_age=cache_max_age,
cache_name=cache_name)
return config | [
"def",
"get_config",
"(",
"self",
")",
":",
"gce_ini_default_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"__file__",
")",
")",
",",
"\"gce.ini\"",
")",
"gce_ini_path",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'GCE_INI_PATH'",
",",
"gce_ini_default_path",
")",
"# Create a ConfigParser.",
"# This provides empty defaults to each key, so that environment",
"# variable configuration (as opposed to INI configuration) is able",
"# to work.",
"config",
"=",
"ConfigParser",
".",
"SafeConfigParser",
"(",
"defaults",
"=",
"{",
"'gce_service_account_email_address'",
":",
"''",
",",
"'gce_service_account_pem_file_path'",
":",
"''",
",",
"'gce_project_id'",
":",
"''",
",",
"'libcloud_secrets'",
":",
"''",
",",
"'inventory_ip_type'",
":",
"''",
",",
"'cache_path'",
":",
"'~/.ansible/tmp'",
",",
"'cache_max_age'",
":",
"'300'",
"}",
")",
"if",
"'gce'",
"not",
"in",
"config",
".",
"sections",
"(",
")",
":",
"config",
".",
"add_section",
"(",
"'gce'",
")",
"if",
"'inventory'",
"not",
"in",
"config",
".",
"sections",
"(",
")",
":",
"config",
".",
"add_section",
"(",
"'inventory'",
")",
"if",
"'cache'",
"not",
"in",
"config",
".",
"sections",
"(",
")",
":",
"config",
".",
"add_section",
"(",
"'cache'",
")",
"config",
".",
"read",
"(",
"gce_ini_path",
")",
"#########",
"# Section added for processing ini settings",
"#########",
"# Set the instance_states filter based on config file options",
"self",
".",
"instance_states",
"=",
"[",
"]",
"if",
"config",
".",
"has_option",
"(",
"'gce'",
",",
"'instance_states'",
")",
":",
"states",
"=",
"config",
".",
"get",
"(",
"'gce'",
",",
"'instance_states'",
")",
"# Ignore if instance_states is an empty string.",
"if",
"states",
":",
"self",
".",
"instance_states",
"=",
"states",
".",
"split",
"(",
"','",
")",
"# Caching",
"cache_path",
"=",
"config",
".",
"get",
"(",
"'cache'",
",",
"'cache_path'",
")",
"cache_max_age",
"=",
"config",
".",
"getint",
"(",
"'cache'",
",",
"'cache_max_age'",
")",
"# TOOD(supertom): support project-specific caches",
"cache_name",
"=",
"'ansible-gce.cache'",
"self",
".",
"cache",
"=",
"CloudInventoryCache",
"(",
"cache_path",
"=",
"cache_path",
",",
"cache_max_age",
"=",
"cache_max_age",
",",
"cache_name",
"=",
"cache_name",
")",
"return",
"config"
]
| Reads the settings from the gce.ini file.
Populates a SafeConfigParser object with defaults and
attempts to read an .ini-style configuration from the filename
specified in GCE_INI_PATH. If the environment variable is
not present, the filename defaults to gce.ini in the current
working directory. | [
"Reads",
"the",
"settings",
"from",
"the",
"gce",
".",
"ini",
"file",
"."
]
| python | train |
pypa/pipenv | pipenv/patched/notpip/_internal/req/req_install.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_internal/req/req_install.py#L255-L275 | def hashes(self, trust_internet=True):
# type: (bool) -> Hashes
"""Return a hash-comparer that considers my option- and URL-based
hashes to be known-good.
Hashes in URLs--ones embedded in the requirements file, not ones
downloaded from an index server--are almost peers with ones from
flags. They satisfy --require-hashes (whether it was implicitly or
explicitly activated) but do not activate it. md5 and sha224 are not
allowed in flags, which should nudge people toward good algos. We
always OR all hashes together, even ones from URLs.
:param trust_internet: Whether to trust URL-based (#md5=...) hashes
downloaded from the internet, as by populate_link()
"""
good_hashes = self.options.get('hashes', {}).copy()
link = self.link if trust_internet else self.original_link
if link and link.hash:
good_hashes.setdefault(link.hash_name, []).append(link.hash)
return Hashes(good_hashes) | [
"def",
"hashes",
"(",
"self",
",",
"trust_internet",
"=",
"True",
")",
":",
"# type: (bool) -> Hashes",
"good_hashes",
"=",
"self",
".",
"options",
".",
"get",
"(",
"'hashes'",
",",
"{",
"}",
")",
".",
"copy",
"(",
")",
"link",
"=",
"self",
".",
"link",
"if",
"trust_internet",
"else",
"self",
".",
"original_link",
"if",
"link",
"and",
"link",
".",
"hash",
":",
"good_hashes",
".",
"setdefault",
"(",
"link",
".",
"hash_name",
",",
"[",
"]",
")",
".",
"append",
"(",
"link",
".",
"hash",
")",
"return",
"Hashes",
"(",
"good_hashes",
")"
]
| Return a hash-comparer that considers my option- and URL-based
hashes to be known-good.
Hashes in URLs--ones embedded in the requirements file, not ones
downloaded from an index server--are almost peers with ones from
flags. They satisfy --require-hashes (whether it was implicitly or
explicitly activated) but do not activate it. md5 and sha224 are not
allowed in flags, which should nudge people toward good algos. We
always OR all hashes together, even ones from URLs.
:param trust_internet: Whether to trust URL-based (#md5=...) hashes
downloaded from the internet, as by populate_link() | [
"Return",
"a",
"hash",
"-",
"comparer",
"that",
"considers",
"my",
"option",
"-",
"and",
"URL",
"-",
"based",
"hashes",
"to",
"be",
"known",
"-",
"good",
"."
]
| python | train |
yyuu/botornado | boto/s3/bucket.py | https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/s3/bucket.py#L1155-L1186 | def configure_website(self, suffix, error_key='', headers=None):
"""
Configure this bucket to act as a website
:type suffix: str
:param suffix: Suffix that is appended to a request that is for a
"directory" on the website endpoint (e.g. if the suffix
is index.html and you make a request to
samplebucket/images/ the data that is returned will
be for the object with the key name images/index.html).
The suffix must not be empty and must not include a
slash character.
:type error_key: str
:param error_key: The object key name to use when a 4XX class
error occurs. This is optional.
"""
if error_key:
error_frag = self.WebsiteErrorFragment % error_key
else:
error_frag = ''
body = self.WebsiteBody % (suffix, error_frag)
response = self.connection.make_request('PUT', self.name, data=body,
query_args='website',
headers=headers)
body = response.read()
if response.status == 200:
return True
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body) | [
"def",
"configure_website",
"(",
"self",
",",
"suffix",
",",
"error_key",
"=",
"''",
",",
"headers",
"=",
"None",
")",
":",
"if",
"error_key",
":",
"error_frag",
"=",
"self",
".",
"WebsiteErrorFragment",
"%",
"error_key",
"else",
":",
"error_frag",
"=",
"''",
"body",
"=",
"self",
".",
"WebsiteBody",
"%",
"(",
"suffix",
",",
"error_frag",
")",
"response",
"=",
"self",
".",
"connection",
".",
"make_request",
"(",
"'PUT'",
",",
"self",
".",
"name",
",",
"data",
"=",
"body",
",",
"query_args",
"=",
"'website'",
",",
"headers",
"=",
"headers",
")",
"body",
"=",
"response",
".",
"read",
"(",
")",
"if",
"response",
".",
"status",
"==",
"200",
":",
"return",
"True",
"else",
":",
"raise",
"self",
".",
"connection",
".",
"provider",
".",
"storage_response_error",
"(",
"response",
".",
"status",
",",
"response",
".",
"reason",
",",
"body",
")"
]
| Configure this bucket to act as a website
:type suffix: str
:param suffix: Suffix that is appended to a request that is for a
"directory" on the website endpoint (e.g. if the suffix
is index.html and you make a request to
samplebucket/images/ the data that is returned will
be for the object with the key name images/index.html).
The suffix must not be empty and must not include a
slash character.
:type error_key: str
:param error_key: The object key name to use when a 4XX class
error occurs. This is optional. | [
"Configure",
"this",
"bucket",
"to",
"act",
"as",
"a",
"website"
]
| python | train |
fredRos/pypmc | pypmc/sampler/markov_chain.py | https://github.com/fredRos/pypmc/blob/9138b67c976f0d58edd080353d16769a47794d09/pypmc/sampler/markov_chain.py#L98-L163 | def run(self, N=1):
'''Run the chain and store the history of visited points into
the member variable ``self.samples``. Returns the number of
accepted points during the run.
.. seealso::
:py:class:`pypmc.tools.History`
:param N:
An int which defines the number of steps to run the chain.
'''
if N == 0:
return 0
# set the accept function
if self.proposal.symmetric:
get_log_rho = self._get_log_rho_metropolis
else:
get_log_rho = self._get_log_rho_metropolis_hastings
# allocate an empty numpy array to store the run
if self.target_values is not None:
this_target_values = self.target_values.append(N)
this_run = self.samples.append(N)
accept_count = 0
for i_N in range(N):
# propose new point
proposed_point = self.proposal.propose(self.current_point, self.rng)
proposed_eval = self.target(proposed_point)
# log_rho := log(probability to accept point), where log_rho > 0 is meant to imply rho = 1
log_rho = get_log_rho(proposed_point, proposed_eval)
# check for NaN
if _np.isnan(log_rho): raise ValueError('encountered NaN')
# accept if rho = 1
if log_rho >=0:
accept_count += 1
this_run[i_N] = proposed_point
self.current_point = proposed_point
self.current_target_eval = proposed_eval
# accept with probability rho
elif log_rho >= _np.log(self.rng.rand()):
accept_count += 1
this_run[i_N] = proposed_point
self.current_point = proposed_point
self.current_target_eval = proposed_eval
# reject if not accepted
else:
this_run[i_N] = self.current_point
#do not need to update self.current
#self.current = self.current
# save target value if desired
if self.target_values is not None:
this_target_values[i_N] = self.current_target_eval
# ---------------------- end for --------------------------------
return accept_count | [
"def",
"run",
"(",
"self",
",",
"N",
"=",
"1",
")",
":",
"if",
"N",
"==",
"0",
":",
"return",
"0",
"# set the accept function",
"if",
"self",
".",
"proposal",
".",
"symmetric",
":",
"get_log_rho",
"=",
"self",
".",
"_get_log_rho_metropolis",
"else",
":",
"get_log_rho",
"=",
"self",
".",
"_get_log_rho_metropolis_hastings",
"# allocate an empty numpy array to store the run",
"if",
"self",
".",
"target_values",
"is",
"not",
"None",
":",
"this_target_values",
"=",
"self",
".",
"target_values",
".",
"append",
"(",
"N",
")",
"this_run",
"=",
"self",
".",
"samples",
".",
"append",
"(",
"N",
")",
"accept_count",
"=",
"0",
"for",
"i_N",
"in",
"range",
"(",
"N",
")",
":",
"# propose new point",
"proposed_point",
"=",
"self",
".",
"proposal",
".",
"propose",
"(",
"self",
".",
"current_point",
",",
"self",
".",
"rng",
")",
"proposed_eval",
"=",
"self",
".",
"target",
"(",
"proposed_point",
")",
"# log_rho := log(probability to accept point), where log_rho > 0 is meant to imply rho = 1",
"log_rho",
"=",
"get_log_rho",
"(",
"proposed_point",
",",
"proposed_eval",
")",
"# check for NaN",
"if",
"_np",
".",
"isnan",
"(",
"log_rho",
")",
":",
"raise",
"ValueError",
"(",
"'encountered NaN'",
")",
"# accept if rho = 1",
"if",
"log_rho",
">=",
"0",
":",
"accept_count",
"+=",
"1",
"this_run",
"[",
"i_N",
"]",
"=",
"proposed_point",
"self",
".",
"current_point",
"=",
"proposed_point",
"self",
".",
"current_target_eval",
"=",
"proposed_eval",
"# accept with probability rho",
"elif",
"log_rho",
">=",
"_np",
".",
"log",
"(",
"self",
".",
"rng",
".",
"rand",
"(",
")",
")",
":",
"accept_count",
"+=",
"1",
"this_run",
"[",
"i_N",
"]",
"=",
"proposed_point",
"self",
".",
"current_point",
"=",
"proposed_point",
"self",
".",
"current_target_eval",
"=",
"proposed_eval",
"# reject if not accepted",
"else",
":",
"this_run",
"[",
"i_N",
"]",
"=",
"self",
".",
"current_point",
"#do not need to update self.current",
"#self.current = self.current",
"# save target value if desired",
"if",
"self",
".",
"target_values",
"is",
"not",
"None",
":",
"this_target_values",
"[",
"i_N",
"]",
"=",
"self",
".",
"current_target_eval",
"# ---------------------- end for --------------------------------",
"return",
"accept_count"
]
| Run the chain and store the history of visited points into
the member variable ``self.samples``. Returns the number of
accepted points during the run.
.. seealso::
:py:class:`pypmc.tools.History`
:param N:
An int which defines the number of steps to run the chain. | [
"Run",
"the",
"chain",
"and",
"store",
"the",
"history",
"of",
"visited",
"points",
"into",
"the",
"member",
"variable",
"self",
".",
"samples",
".",
"Returns",
"the",
"number",
"of",
"accepted",
"points",
"during",
"the",
"run",
"."
]
| python | train |
jxtech/wechatpy | wechatpy/client/api/shakearound.py | https://github.com/jxtech/wechatpy/blob/4df0da795618c0895a10f1c2cde9e9d5c0a93aaa/wechatpy/client/api/shakearound.py#L234-L254 | def add_material(self, media_file, media_type='icon'):
"""
上传图片素材
详情请参考
http://mp.weixin.qq.com/wiki/5/e997428269ff189d8f9a4b9e177be2d9.html
:param media_file: 要上传的文件,一个 File-object
:param media_type: 摇一摇素材类型, 取值为 icon或者 license, 默认 icon.
:return: 上传的素材信息
"""
res = self._post(
'shakearound/material/add',
files={
'media': media_file
},
params={
'type': media_type
},
result_processor=lambda x: x['data']
)
return res | [
"def",
"add_material",
"(",
"self",
",",
"media_file",
",",
"media_type",
"=",
"'icon'",
")",
":",
"res",
"=",
"self",
".",
"_post",
"(",
"'shakearound/material/add'",
",",
"files",
"=",
"{",
"'media'",
":",
"media_file",
"}",
",",
"params",
"=",
"{",
"'type'",
":",
"media_type",
"}",
",",
"result_processor",
"=",
"lambda",
"x",
":",
"x",
"[",
"'data'",
"]",
")",
"return",
"res"
]
| 上传图片素材
详情请参考
http://mp.weixin.qq.com/wiki/5/e997428269ff189d8f9a4b9e177be2d9.html
:param media_file: 要上传的文件,一个 File-object
:param media_type: 摇一摇素材类型, 取值为 icon或者 license, 默认 icon.
:return: 上传的素材信息 | [
"上传图片素材",
"详情请参考",
"http",
":",
"//",
"mp",
".",
"weixin",
".",
"qq",
".",
"com",
"/",
"wiki",
"/",
"5",
"/",
"e997428269ff189d8f9a4b9e177be2d9",
".",
"html"
]
| python | train |
neherlab/treetime | treetime/treeanc.py | https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/treeanc.py#L1324-L1461 | def _ml_anc_marginal(self, store_compressed=False, final=True, sample_from_profile=False,
debug=False, **kwargs):
"""
Perform marginal ML reconstruction of the ancestral states. In contrast to
joint reconstructions, this needs to access the probabilities rather than only
log probabilities and is hence handled by a separate function.
Parameters
----------
store_compressed : bool, default True
attach a reduced representation of sequence changed to each branch
final : bool, default True
stop full length by expanding sites with identical alignment patterns
sample_from_profile : bool or str
assign sequences probabilistically according to the inferred probabilities
of ancestral states instead of to their ML value. This parameter can also
take the value 'root' in which case probabilistic sampling will happen
at the root but at no other node.
"""
tree = self.tree
# number of nucleotides changed from prev reconstruction
N_diff = 0
L = self.multiplicity.shape[0]
n_states = self.gtr.alphabet.shape[0]
self.logger("TreeAnc._ml_anc_marginal: type of reconstruction: Marginal", 2)
self.logger("Attaching sequence profiles to leafs... ", 3)
# set the leaves profiles
for leaf in tree.get_terminals():
# in any case, set the profile
leaf.marginal_subtree_LH = seq2prof(leaf.original_cseq, self.gtr.profile_map)
leaf.marginal_subtree_LH_prefactor = np.zeros(L)
self.logger("Walking up the tree, computing likelihoods... ", 3)
# propagate leaves --> root, set the marginal-likelihood messages
for node in tree.get_nonterminals(order='postorder'): #leaves -> root
# regardless of what was before, set the profile to ones
tmp_log_subtree_LH = np.zeros((L,n_states), dtype=float)
node.marginal_subtree_LH_prefactor = np.zeros(L, dtype=float)
for ch in node.clades:
ch.marginal_log_Lx = self.gtr.propagate_profile(ch.marginal_subtree_LH,
self._branch_length_to_gtr(ch), return_log=True) # raw prob to transfer prob up
tmp_log_subtree_LH += ch.marginal_log_Lx
node.marginal_subtree_LH_prefactor += ch.marginal_subtree_LH_prefactor
node.marginal_subtree_LH, offset = normalize_profile(tmp_log_subtree_LH, log=True)
node.marginal_subtree_LH_prefactor += offset # and store log-prefactor
self.logger("Computing root node sequence and total tree likelihood...",3)
# Msg to the root from the distant part (equ frequencies)
if len(self.gtr.Pi.shape)==1:
tree.root.marginal_outgroup_LH = np.repeat([self.gtr.Pi], tree.root.marginal_subtree_LH.shape[0], axis=0)
else:
tree.root.marginal_outgroup_LH = np.copy(self.gtr.Pi.T)
tree.root.marginal_profile, pre = normalize_profile(tree.root.marginal_outgroup_LH*tree.root.marginal_subtree_LH)
marginal_LH_prefactor = tree.root.marginal_subtree_LH_prefactor + pre
# choose sequence characters from this profile.
# treat root node differently to avoid piling up mutations on the longer branch
if sample_from_profile=='root':
root_sample_from_profile = True
other_sample_from_profile = False
elif isinstance(sample_from_profile, bool):
root_sample_from_profile = sample_from_profile
other_sample_from_profile = sample_from_profile
seq, prof_vals, idxs = prof2seq(tree.root.marginal_profile,
self.gtr, sample_from_prof=root_sample_from_profile, normalize=False)
self.tree.sequence_LH = marginal_LH_prefactor
self.tree.total_sequence_LH = (self.tree.sequence_LH*self.multiplicity).sum()
self.tree.root.cseq = seq
gc.collect()
if final:
if self.is_vcf:
self.tree.root.sequence = self.dict_sequence(self.tree.root)
else:
self.tree.root.sequence = self.expanded_sequence(self.tree.root)
self.logger("Walking down the tree, computing maximum likelihood sequences...",3)
# propagate root -->> leaves, reconstruct the internal node sequences
# provided the upstream message + the message from the complementary subtree
for node in tree.find_clades(order='preorder'):
if node.up is None: # skip if node is root
continue
# integrate the information coming from parents with the information
# of all children my multiplying it to the prev computed profile
node.marginal_outgroup_LH, pre = normalize_profile(np.log(node.up.marginal_profile) - node.marginal_log_Lx,
log=True, return_offset=False)
tmp_msg_from_parent = self.gtr.evolve(node.marginal_outgroup_LH,
self._branch_length_to_gtr(node), return_log=False)
node.marginal_profile, pre = normalize_profile(node.marginal_subtree_LH * tmp_msg_from_parent, return_offset=False)
# choose sequence based maximal marginal LH.
seq, prof_vals, idxs = prof2seq(node.marginal_profile, self.gtr,
sample_from_prof=other_sample_from_profile, normalize=False)
if hasattr(node, 'cseq') and node.cseq is not None:
N_diff += (seq!=node.cseq).sum()
else:
N_diff += L
#assign new sequence
node.cseq = seq
if final:
if self.is_vcf:
node.sequence = self.dict_sequence(node)
else:
node.sequence = self.expanded_sequence(node)
node.mutations = self.get_mutations(node)
# note that the root doesn't contribute to N_diff (intended, since root sequence is often ambiguous)
self.logger("TreeAnc._ml_anc_marginal: ...done", 3)
if store_compressed:
self._store_compressed_sequence_pairs()
# do clean-up:
if not debug:
for node in self.tree.find_clades():
try:
del node.marginal_log_Lx
del node.marginal_subtree_LH_prefactor
except:
pass
gc.collect()
return N_diff | [
"def",
"_ml_anc_marginal",
"(",
"self",
",",
"store_compressed",
"=",
"False",
",",
"final",
"=",
"True",
",",
"sample_from_profile",
"=",
"False",
",",
"debug",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"tree",
"=",
"self",
".",
"tree",
"# number of nucleotides changed from prev reconstruction",
"N_diff",
"=",
"0",
"L",
"=",
"self",
".",
"multiplicity",
".",
"shape",
"[",
"0",
"]",
"n_states",
"=",
"self",
".",
"gtr",
".",
"alphabet",
".",
"shape",
"[",
"0",
"]",
"self",
".",
"logger",
"(",
"\"TreeAnc._ml_anc_marginal: type of reconstruction: Marginal\"",
",",
"2",
")",
"self",
".",
"logger",
"(",
"\"Attaching sequence profiles to leafs... \"",
",",
"3",
")",
"# set the leaves profiles",
"for",
"leaf",
"in",
"tree",
".",
"get_terminals",
"(",
")",
":",
"# in any case, set the profile",
"leaf",
".",
"marginal_subtree_LH",
"=",
"seq2prof",
"(",
"leaf",
".",
"original_cseq",
",",
"self",
".",
"gtr",
".",
"profile_map",
")",
"leaf",
".",
"marginal_subtree_LH_prefactor",
"=",
"np",
".",
"zeros",
"(",
"L",
")",
"self",
".",
"logger",
"(",
"\"Walking up the tree, computing likelihoods... \"",
",",
"3",
")",
"# propagate leaves --> root, set the marginal-likelihood messages",
"for",
"node",
"in",
"tree",
".",
"get_nonterminals",
"(",
"order",
"=",
"'postorder'",
")",
":",
"#leaves -> root",
"# regardless of what was before, set the profile to ones",
"tmp_log_subtree_LH",
"=",
"np",
".",
"zeros",
"(",
"(",
"L",
",",
"n_states",
")",
",",
"dtype",
"=",
"float",
")",
"node",
".",
"marginal_subtree_LH_prefactor",
"=",
"np",
".",
"zeros",
"(",
"L",
",",
"dtype",
"=",
"float",
")",
"for",
"ch",
"in",
"node",
".",
"clades",
":",
"ch",
".",
"marginal_log_Lx",
"=",
"self",
".",
"gtr",
".",
"propagate_profile",
"(",
"ch",
".",
"marginal_subtree_LH",
",",
"self",
".",
"_branch_length_to_gtr",
"(",
"ch",
")",
",",
"return_log",
"=",
"True",
")",
"# raw prob to transfer prob up",
"tmp_log_subtree_LH",
"+=",
"ch",
".",
"marginal_log_Lx",
"node",
".",
"marginal_subtree_LH_prefactor",
"+=",
"ch",
".",
"marginal_subtree_LH_prefactor",
"node",
".",
"marginal_subtree_LH",
",",
"offset",
"=",
"normalize_profile",
"(",
"tmp_log_subtree_LH",
",",
"log",
"=",
"True",
")",
"node",
".",
"marginal_subtree_LH_prefactor",
"+=",
"offset",
"# and store log-prefactor",
"self",
".",
"logger",
"(",
"\"Computing root node sequence and total tree likelihood...\"",
",",
"3",
")",
"# Msg to the root from the distant part (equ frequencies)",
"if",
"len",
"(",
"self",
".",
"gtr",
".",
"Pi",
".",
"shape",
")",
"==",
"1",
":",
"tree",
".",
"root",
".",
"marginal_outgroup_LH",
"=",
"np",
".",
"repeat",
"(",
"[",
"self",
".",
"gtr",
".",
"Pi",
"]",
",",
"tree",
".",
"root",
".",
"marginal_subtree_LH",
".",
"shape",
"[",
"0",
"]",
",",
"axis",
"=",
"0",
")",
"else",
":",
"tree",
".",
"root",
".",
"marginal_outgroup_LH",
"=",
"np",
".",
"copy",
"(",
"self",
".",
"gtr",
".",
"Pi",
".",
"T",
")",
"tree",
".",
"root",
".",
"marginal_profile",
",",
"pre",
"=",
"normalize_profile",
"(",
"tree",
".",
"root",
".",
"marginal_outgroup_LH",
"*",
"tree",
".",
"root",
".",
"marginal_subtree_LH",
")",
"marginal_LH_prefactor",
"=",
"tree",
".",
"root",
".",
"marginal_subtree_LH_prefactor",
"+",
"pre",
"# choose sequence characters from this profile.",
"# treat root node differently to avoid piling up mutations on the longer branch",
"if",
"sample_from_profile",
"==",
"'root'",
":",
"root_sample_from_profile",
"=",
"True",
"other_sample_from_profile",
"=",
"False",
"elif",
"isinstance",
"(",
"sample_from_profile",
",",
"bool",
")",
":",
"root_sample_from_profile",
"=",
"sample_from_profile",
"other_sample_from_profile",
"=",
"sample_from_profile",
"seq",
",",
"prof_vals",
",",
"idxs",
"=",
"prof2seq",
"(",
"tree",
".",
"root",
".",
"marginal_profile",
",",
"self",
".",
"gtr",
",",
"sample_from_prof",
"=",
"root_sample_from_profile",
",",
"normalize",
"=",
"False",
")",
"self",
".",
"tree",
".",
"sequence_LH",
"=",
"marginal_LH_prefactor",
"self",
".",
"tree",
".",
"total_sequence_LH",
"=",
"(",
"self",
".",
"tree",
".",
"sequence_LH",
"*",
"self",
".",
"multiplicity",
")",
".",
"sum",
"(",
")",
"self",
".",
"tree",
".",
"root",
".",
"cseq",
"=",
"seq",
"gc",
".",
"collect",
"(",
")",
"if",
"final",
":",
"if",
"self",
".",
"is_vcf",
":",
"self",
".",
"tree",
".",
"root",
".",
"sequence",
"=",
"self",
".",
"dict_sequence",
"(",
"self",
".",
"tree",
".",
"root",
")",
"else",
":",
"self",
".",
"tree",
".",
"root",
".",
"sequence",
"=",
"self",
".",
"expanded_sequence",
"(",
"self",
".",
"tree",
".",
"root",
")",
"self",
".",
"logger",
"(",
"\"Walking down the tree, computing maximum likelihood sequences...\"",
",",
"3",
")",
"# propagate root -->> leaves, reconstruct the internal node sequences",
"# provided the upstream message + the message from the complementary subtree",
"for",
"node",
"in",
"tree",
".",
"find_clades",
"(",
"order",
"=",
"'preorder'",
")",
":",
"if",
"node",
".",
"up",
"is",
"None",
":",
"# skip if node is root",
"continue",
"# integrate the information coming from parents with the information",
"# of all children my multiplying it to the prev computed profile",
"node",
".",
"marginal_outgroup_LH",
",",
"pre",
"=",
"normalize_profile",
"(",
"np",
".",
"log",
"(",
"node",
".",
"up",
".",
"marginal_profile",
")",
"-",
"node",
".",
"marginal_log_Lx",
",",
"log",
"=",
"True",
",",
"return_offset",
"=",
"False",
")",
"tmp_msg_from_parent",
"=",
"self",
".",
"gtr",
".",
"evolve",
"(",
"node",
".",
"marginal_outgroup_LH",
",",
"self",
".",
"_branch_length_to_gtr",
"(",
"node",
")",
",",
"return_log",
"=",
"False",
")",
"node",
".",
"marginal_profile",
",",
"pre",
"=",
"normalize_profile",
"(",
"node",
".",
"marginal_subtree_LH",
"*",
"tmp_msg_from_parent",
",",
"return_offset",
"=",
"False",
")",
"# choose sequence based maximal marginal LH.",
"seq",
",",
"prof_vals",
",",
"idxs",
"=",
"prof2seq",
"(",
"node",
".",
"marginal_profile",
",",
"self",
".",
"gtr",
",",
"sample_from_prof",
"=",
"other_sample_from_profile",
",",
"normalize",
"=",
"False",
")",
"if",
"hasattr",
"(",
"node",
",",
"'cseq'",
")",
"and",
"node",
".",
"cseq",
"is",
"not",
"None",
":",
"N_diff",
"+=",
"(",
"seq",
"!=",
"node",
".",
"cseq",
")",
".",
"sum",
"(",
")",
"else",
":",
"N_diff",
"+=",
"L",
"#assign new sequence",
"node",
".",
"cseq",
"=",
"seq",
"if",
"final",
":",
"if",
"self",
".",
"is_vcf",
":",
"node",
".",
"sequence",
"=",
"self",
".",
"dict_sequence",
"(",
"node",
")",
"else",
":",
"node",
".",
"sequence",
"=",
"self",
".",
"expanded_sequence",
"(",
"node",
")",
"node",
".",
"mutations",
"=",
"self",
".",
"get_mutations",
"(",
"node",
")",
"# note that the root doesn't contribute to N_diff (intended, since root sequence is often ambiguous)",
"self",
".",
"logger",
"(",
"\"TreeAnc._ml_anc_marginal: ...done\"",
",",
"3",
")",
"if",
"store_compressed",
":",
"self",
".",
"_store_compressed_sequence_pairs",
"(",
")",
"# do clean-up:",
"if",
"not",
"debug",
":",
"for",
"node",
"in",
"self",
".",
"tree",
".",
"find_clades",
"(",
")",
":",
"try",
":",
"del",
"node",
".",
"marginal_log_Lx",
"del",
"node",
".",
"marginal_subtree_LH_prefactor",
"except",
":",
"pass",
"gc",
".",
"collect",
"(",
")",
"return",
"N_diff"
]
| Perform marginal ML reconstruction of the ancestral states. In contrast to
joint reconstructions, this needs to access the probabilities rather than only
log probabilities and is hence handled by a separate function.
Parameters
----------
store_compressed : bool, default True
attach a reduced representation of sequence changed to each branch
final : bool, default True
stop full length by expanding sites with identical alignment patterns
sample_from_profile : bool or str
assign sequences probabilistically according to the inferred probabilities
of ancestral states instead of to their ML value. This parameter can also
take the value 'root' in which case probabilistic sampling will happen
at the root but at no other node. | [
"Perform",
"marginal",
"ML",
"reconstruction",
"of",
"the",
"ancestral",
"states",
".",
"In",
"contrast",
"to",
"joint",
"reconstructions",
"this",
"needs",
"to",
"access",
"the",
"probabilities",
"rather",
"than",
"only",
"log",
"probabilities",
"and",
"is",
"hence",
"handled",
"by",
"a",
"separate",
"function",
"."
]
| python | test |
rbit/pydtls | dtls/sslconnection.py | https://github.com/rbit/pydtls/blob/41a71fccd990347d0de5f42418fea1e4e733359c/dtls/sslconnection.py#L206-L217 | def set_curves(self, curves):
u''' Set supported curves by name, nid or nist.
:param str | tuple(int) curves: Example "secp384r1:secp256k1", (715, 714), "P-384", "K-409:B-409:K-571", ...
:return: 1 for success and 0 for failure
'''
retVal = None
if isinstance(curves, str):
retVal = SSL_CTX_set1_curves_list(self._ctx, curves)
elif isinstance(curves, tuple):
retVal = SSL_CTX_set1_curves(self._ctx, curves, len(curves))
return retVal | [
"def",
"set_curves",
"(",
"self",
",",
"curves",
")",
":",
"retVal",
"=",
"None",
"if",
"isinstance",
"(",
"curves",
",",
"str",
")",
":",
"retVal",
"=",
"SSL_CTX_set1_curves_list",
"(",
"self",
".",
"_ctx",
",",
"curves",
")",
"elif",
"isinstance",
"(",
"curves",
",",
"tuple",
")",
":",
"retVal",
"=",
"SSL_CTX_set1_curves",
"(",
"self",
".",
"_ctx",
",",
"curves",
",",
"len",
"(",
"curves",
")",
")",
"return",
"retVal"
]
| u''' Set supported curves by name, nid or nist.
:param str | tuple(int) curves: Example "secp384r1:secp256k1", (715, 714), "P-384", "K-409:B-409:K-571", ...
:return: 1 for success and 0 for failure | [
"u",
"Set",
"supported",
"curves",
"by",
"name",
"nid",
"or",
"nist",
"."
]
| python | train |
SoCo/SoCo | dev_tools/analyse_ws.py | https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/dev_tools/analyse_ws.py#L86-L125 | def set_file(self, filename):
""" Analyse the file with the captured content """
# Use the file name as prefix if none is given
if self.output_prefix is None:
_, self.output_prefix = os.path.split(filename)
# Check if the file is present, since rdpcap will not do that
if not (os.path.isfile(filename) and os.access(filename, os.R_OK)):
print 'The file \'{0}\' is either not present or not readable. '\
'Exiting!'.format(filename)
sys.exit(1)
try:
packets = rdpcap(filename)
except NameError:
# Due probably to a bug in rdpcap, this kind of error raises a
# NameError, because the exception that is tried to raise, is not
# defined
print 'The file \'{}\' is not a pcap capture file. Exiting!'\
.format(filename)
sys.exit(2)
for number, packet in enumerate(packets):
# See if there is a field called load
self._debug('\nNUMBER {0}'.format(number), no_prefix=True)
try:
# Will cause AttributeError if there is no load
packet.getfieldval('load')
# Get the full load
load = packet.sprintf('%TCP.payload%')
self._debug('PAYLOAD LENGTH {0}'.format(len(load)),
no_prefix=True)
self._debug(load, load=True)
self._parse_load(load)
except AttributeError:
self._debug('LOAD EXCEPTION', no_prefix=True)
if len(self.messages) > 0 and not self.messages[-1].write_closed:
self._debug('DELETE LAST OPEN FILE')
del self.messages[-1]
if self.args.debug_analysis:
sys.exit(0) | [
"def",
"set_file",
"(",
"self",
",",
"filename",
")",
":",
"# Use the file name as prefix if none is given",
"if",
"self",
".",
"output_prefix",
"is",
"None",
":",
"_",
",",
"self",
".",
"output_prefix",
"=",
"os",
".",
"path",
".",
"split",
"(",
"filename",
")",
"# Check if the file is present, since rdpcap will not do that",
"if",
"not",
"(",
"os",
".",
"path",
".",
"isfile",
"(",
"filename",
")",
"and",
"os",
".",
"access",
"(",
"filename",
",",
"os",
".",
"R_OK",
")",
")",
":",
"print",
"'The file \\'{0}\\' is either not present or not readable. '",
"'Exiting!'",
".",
"format",
"(",
"filename",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"try",
":",
"packets",
"=",
"rdpcap",
"(",
"filename",
")",
"except",
"NameError",
":",
"# Due probably to a bug in rdpcap, this kind of error raises a",
"# NameError, because the exception that is tried to raise, is not",
"# defined",
"print",
"'The file \\'{}\\' is not a pcap capture file. Exiting!'",
".",
"format",
"(",
"filename",
")",
"sys",
".",
"exit",
"(",
"2",
")",
"for",
"number",
",",
"packet",
"in",
"enumerate",
"(",
"packets",
")",
":",
"# See if there is a field called load",
"self",
".",
"_debug",
"(",
"'\\nNUMBER {0}'",
".",
"format",
"(",
"number",
")",
",",
"no_prefix",
"=",
"True",
")",
"try",
":",
"# Will cause AttributeError if there is no load",
"packet",
".",
"getfieldval",
"(",
"'load'",
")",
"# Get the full load",
"load",
"=",
"packet",
".",
"sprintf",
"(",
"'%TCP.payload%'",
")",
"self",
".",
"_debug",
"(",
"'PAYLOAD LENGTH {0}'",
".",
"format",
"(",
"len",
"(",
"load",
")",
")",
",",
"no_prefix",
"=",
"True",
")",
"self",
".",
"_debug",
"(",
"load",
",",
"load",
"=",
"True",
")",
"self",
".",
"_parse_load",
"(",
"load",
")",
"except",
"AttributeError",
":",
"self",
".",
"_debug",
"(",
"'LOAD EXCEPTION'",
",",
"no_prefix",
"=",
"True",
")",
"if",
"len",
"(",
"self",
".",
"messages",
")",
">",
"0",
"and",
"not",
"self",
".",
"messages",
"[",
"-",
"1",
"]",
".",
"write_closed",
":",
"self",
".",
"_debug",
"(",
"'DELETE LAST OPEN FILE'",
")",
"del",
"self",
".",
"messages",
"[",
"-",
"1",
"]",
"if",
"self",
".",
"args",
".",
"debug_analysis",
":",
"sys",
".",
"exit",
"(",
"0",
")"
]
| Analyse the file with the captured content | [
"Analyse",
"the",
"file",
"with",
"the",
"captured",
"content"
]
| python | train |
Microsoft/LightGBM | python-package/lightgbm/basic.py | https://github.com/Microsoft/LightGBM/blob/8d2ec69f4f685b0ab1c4624d59ee2d3287bb3147/python-package/lightgbm/basic.py#L1054-L1077 | def subset(self, used_indices, params=None):
"""Get subset of current Dataset.
Parameters
----------
used_indices : list of int
Indices used to create the subset.
params : dict or None, optional (default=None)
These parameters will be passed to Dataset constructor.
Returns
-------
subset : Dataset
Subset of the current Dataset.
"""
if params is None:
params = self.params
ret = Dataset(None, reference=self, feature_name=self.feature_name,
categorical_feature=self.categorical_feature, params=params,
free_raw_data=self.free_raw_data)
ret._predictor = self._predictor
ret.pandas_categorical = self.pandas_categorical
ret.used_indices = used_indices
return ret | [
"def",
"subset",
"(",
"self",
",",
"used_indices",
",",
"params",
"=",
"None",
")",
":",
"if",
"params",
"is",
"None",
":",
"params",
"=",
"self",
".",
"params",
"ret",
"=",
"Dataset",
"(",
"None",
",",
"reference",
"=",
"self",
",",
"feature_name",
"=",
"self",
".",
"feature_name",
",",
"categorical_feature",
"=",
"self",
".",
"categorical_feature",
",",
"params",
"=",
"params",
",",
"free_raw_data",
"=",
"self",
".",
"free_raw_data",
")",
"ret",
".",
"_predictor",
"=",
"self",
".",
"_predictor",
"ret",
".",
"pandas_categorical",
"=",
"self",
".",
"pandas_categorical",
"ret",
".",
"used_indices",
"=",
"used_indices",
"return",
"ret"
]
| Get subset of current Dataset.
Parameters
----------
used_indices : list of int
Indices used to create the subset.
params : dict or None, optional (default=None)
These parameters will be passed to Dataset constructor.
Returns
-------
subset : Dataset
Subset of the current Dataset. | [
"Get",
"subset",
"of",
"current",
"Dataset",
"."
]
| python | train |
StackStorm/pybind | pybind/slxos/v17r_1_01a/ipv6_acl/ipv6/access_list/extended/seq/__init__.py | https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17r_1_01a/ipv6_acl/ipv6/access_list/extended/seq/__init__.py#L206-L227 | def _set_protocol_type(self, v, load=False):
"""
Setter method for protocol_type, mapped from YANG variable /ipv6_acl/ipv6/access_list/extended/seq/protocol_type (union)
If this variable is read-only (config: false) in the
source YANG file, then _set_protocol_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_protocol_type() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=[RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'udp': {'value': 17}, u'ipv6-icmp': {'value': 58}, u'tcp': {'value': 6}, u'ipv6': {'value': 41}},),RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0..255']}),], is_leaf=True, yang_name="protocol-type", rest_name="protocol-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-suppress-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-access-list', defining_module='brocade-ipv6-access-list', yang_type='union', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """protocol_type must be of a type compatible with union""",
'defined-type': "brocade-ipv6-access-list:union",
'generated-type': """YANGDynClass(base=[RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'udp': {'value': 17}, u'ipv6-icmp': {'value': 58}, u'tcp': {'value': 6}, u'ipv6': {'value': 41}},),RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0..255']}),], is_leaf=True, yang_name="protocol-type", rest_name="protocol-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-suppress-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-access-list', defining_module='brocade-ipv6-access-list', yang_type='union', is_config=True)""",
})
self.__protocol_type = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_protocol_type",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"[",
"RestrictedClassType",
"(",
"base_type",
"=",
"unicode",
",",
"restriction_type",
"=",
"\"dict_key\"",
",",
"restriction_arg",
"=",
"{",
"u'udp'",
":",
"{",
"'value'",
":",
"17",
"}",
",",
"u'ipv6-icmp'",
":",
"{",
"'value'",
":",
"58",
"}",
",",
"u'tcp'",
":",
"{",
"'value'",
":",
"6",
"}",
",",
"u'ipv6'",
":",
"{",
"'value'",
":",
"41",
"}",
"}",
",",
")",
",",
"RestrictedClassType",
"(",
"base_type",
"=",
"RestrictedClassType",
"(",
"base_type",
"=",
"long",
",",
"restriction_dict",
"=",
"{",
"'range'",
":",
"[",
"'0..4294967295'",
"]",
"}",
",",
"int_size",
"=",
"32",
")",
",",
"restriction_dict",
"=",
"{",
"'range'",
":",
"[",
"u'0..255'",
"]",
"}",
")",
",",
"]",
",",
"is_leaf",
"=",
"True",
",",
"yang_name",
"=",
"\"protocol-type\"",
",",
"rest_name",
"=",
"\"protocol-type\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"True",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'cli-drop-node-name'",
":",
"None",
",",
"u'cli-suppress-no'",
":",
"None",
",",
"u'cli-incomplete-command'",
":",
"None",
"}",
"}",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-ipv6-access-list'",
",",
"defining_module",
"=",
"'brocade-ipv6-access-list'",
",",
"yang_type",
"=",
"'union'",
",",
"is_config",
"=",
"True",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"protocol_type must be of a type compatible with union\"\"\"",
",",
"'defined-type'",
":",
"\"brocade-ipv6-access-list:union\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=[RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'udp': {'value': 17}, u'ipv6-icmp': {'value': 58}, u'tcp': {'value': 6}, u'ipv6': {'value': 41}},),RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0..255']}),], is_leaf=True, yang_name=\"protocol-type\", rest_name=\"protocol-type\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-suppress-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-access-list', defining_module='brocade-ipv6-access-list', yang_type='union', is_config=True)\"\"\"",
",",
"}",
")",
"self",
".",
"__protocol_type",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
]
| Setter method for protocol_type, mapped from YANG variable /ipv6_acl/ipv6/access_list/extended/seq/protocol_type (union)
If this variable is read-only (config: false) in the
source YANG file, then _set_protocol_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_protocol_type() directly. | [
"Setter",
"method",
"for",
"protocol_type",
"mapped",
"from",
"YANG",
"variable",
"/",
"ipv6_acl",
"/",
"ipv6",
"/",
"access_list",
"/",
"extended",
"/",
"seq",
"/",
"protocol_type",
"(",
"union",
")",
"If",
"this",
"variable",
"is",
"read",
"-",
"only",
"(",
"config",
":",
"false",
")",
"in",
"the",
"source",
"YANG",
"file",
"then",
"_set_protocol_type",
"is",
"considered",
"as",
"a",
"private",
"method",
".",
"Backends",
"looking",
"to",
"populate",
"this",
"variable",
"should",
"do",
"so",
"via",
"calling",
"thisObj",
".",
"_set_protocol_type",
"()",
"directly",
"."
]
| python | train |
dereneaton/ipyrad | ipyrad/analysis/tetrad.py | https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/tetrad.py#L707-L740 | def _dump_qmc(self):
"""
Makes a reduced array that excludes quartets with no information and
prints the quartets and weights to a file formatted for wQMC
"""
## open the h5 database
io5 = h5py.File(self.database.output, 'r')
## create an output file for writing
self.files.qdump = os.path.join(self.dirs, self.name+".quartets.txt")
LOGGER.info("qdump file %s", self.files.qdump)
outfile = open(self.files.qdump, 'w')
## todo: should pull quarts order in randomly? or doesn't matter?
for idx in xrange(0, self.params.nquartets, self._chunksize):
## get mask of zero weight quartets
#mask = io5["weights"][idx:idx+self.chunksize] != 0
#weight = io5["weights"][idx:idx+self.chunksize][mask]
#LOGGER.info("exluded = %s, mask shape %s",
# self._chunksize - mask.shape[0], mask.shape)
#LOGGER.info('q shape %s', io5["quartets"][idx:idx+self._chunksize].shape)
masked_quartets = io5["quartets"][idx:idx+self._chunksize, :]#[mask, :]
quarts = [list(j) for j in masked_quartets]
## format and print
#chunk = ["{},{}|{},{}:{}".format(*i+[j]) for i, j \
# in zip(quarts, weight)]
chunk = ["{},{}|{},{}".format(*i) for i in quarts]
outfile.write("\n".join(chunk)+"\n")
## close output file and h5 database
outfile.close()
io5.close() | [
"def",
"_dump_qmc",
"(",
"self",
")",
":",
"## open the h5 database",
"io5",
"=",
"h5py",
".",
"File",
"(",
"self",
".",
"database",
".",
"output",
",",
"'r'",
")",
"## create an output file for writing",
"self",
".",
"files",
".",
"qdump",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"dirs",
",",
"self",
".",
"name",
"+",
"\".quartets.txt\"",
")",
"LOGGER",
".",
"info",
"(",
"\"qdump file %s\"",
",",
"self",
".",
"files",
".",
"qdump",
")",
"outfile",
"=",
"open",
"(",
"self",
".",
"files",
".",
"qdump",
",",
"'w'",
")",
"## todo: should pull quarts order in randomly? or doesn't matter?",
"for",
"idx",
"in",
"xrange",
"(",
"0",
",",
"self",
".",
"params",
".",
"nquartets",
",",
"self",
".",
"_chunksize",
")",
":",
"## get mask of zero weight quartets",
"#mask = io5[\"weights\"][idx:idx+self.chunksize] != 0",
"#weight = io5[\"weights\"][idx:idx+self.chunksize][mask]",
"#LOGGER.info(\"exluded = %s, mask shape %s\", ",
"# self._chunksize - mask.shape[0], mask.shape)",
"#LOGGER.info('q shape %s', io5[\"quartets\"][idx:idx+self._chunksize].shape)",
"masked_quartets",
"=",
"io5",
"[",
"\"quartets\"",
"]",
"[",
"idx",
":",
"idx",
"+",
"self",
".",
"_chunksize",
",",
":",
"]",
"#[mask, :]",
"quarts",
"=",
"[",
"list",
"(",
"j",
")",
"for",
"j",
"in",
"masked_quartets",
"]",
"## format and print",
"#chunk = [\"{},{}|{},{}:{}\".format(*i+[j]) for i, j \\",
"# in zip(quarts, weight)]",
"chunk",
"=",
"[",
"\"{},{}|{},{}\"",
".",
"format",
"(",
"*",
"i",
")",
"for",
"i",
"in",
"quarts",
"]",
"outfile",
".",
"write",
"(",
"\"\\n\"",
".",
"join",
"(",
"chunk",
")",
"+",
"\"\\n\"",
")",
"## close output file and h5 database",
"outfile",
".",
"close",
"(",
")",
"io5",
".",
"close",
"(",
")"
]
| Makes a reduced array that excludes quartets with no information and
prints the quartets and weights to a file formatted for wQMC | [
"Makes",
"a",
"reduced",
"array",
"that",
"excludes",
"quartets",
"with",
"no",
"information",
"and",
"prints",
"the",
"quartets",
"and",
"weights",
"to",
"a",
"file",
"formatted",
"for",
"wQMC"
]
| python | valid |
abilian/abilian-core | abilian/services/security/service.py | https://github.com/abilian/abilian-core/blob/0a71275bf108c3d51e13ca9e093c0249235351e3/abilian/services/security/service.py#L210-L247 | def get_roles(self, principal, object=None, no_group_roles=False):
"""Get all the roles attached to given `principal`, on a given
`object`.
:param principal: a :class:`User` or :class:`Group`
:param object: an :class:`Entity`
:param no_group_roles: If `True`, return only direct roles, not roles
acquired through group membership.
"""
assert principal
if hasattr(principal, "is_anonymous") and principal.is_anonymous:
return [AnonymousRole]
query = db.session.query(RoleAssignment.role)
if isinstance(principal, Group):
filter_principal = RoleAssignment.group == principal
else:
filter_principal = RoleAssignment.user == principal
if not no_group_roles:
groups = [g.id for g in principal.groups]
if groups:
filter_principal |= RoleAssignment.group_id.in_(groups)
query = query.filter(filter_principal)
if object is not None:
assert isinstance(object, Entity)
query = query.filter(RoleAssignment.object == object)
roles = {i[0] for i in query.all()}
if object is not None:
for attr, role in (("creator", Creator), ("owner", Owner)):
if getattr(object, attr) == principal:
roles.add(role)
return list(roles) | [
"def",
"get_roles",
"(",
"self",
",",
"principal",
",",
"object",
"=",
"None",
",",
"no_group_roles",
"=",
"False",
")",
":",
"assert",
"principal",
"if",
"hasattr",
"(",
"principal",
",",
"\"is_anonymous\"",
")",
"and",
"principal",
".",
"is_anonymous",
":",
"return",
"[",
"AnonymousRole",
"]",
"query",
"=",
"db",
".",
"session",
".",
"query",
"(",
"RoleAssignment",
".",
"role",
")",
"if",
"isinstance",
"(",
"principal",
",",
"Group",
")",
":",
"filter_principal",
"=",
"RoleAssignment",
".",
"group",
"==",
"principal",
"else",
":",
"filter_principal",
"=",
"RoleAssignment",
".",
"user",
"==",
"principal",
"if",
"not",
"no_group_roles",
":",
"groups",
"=",
"[",
"g",
".",
"id",
"for",
"g",
"in",
"principal",
".",
"groups",
"]",
"if",
"groups",
":",
"filter_principal",
"|=",
"RoleAssignment",
".",
"group_id",
".",
"in_",
"(",
"groups",
")",
"query",
"=",
"query",
".",
"filter",
"(",
"filter_principal",
")",
"if",
"object",
"is",
"not",
"None",
":",
"assert",
"isinstance",
"(",
"object",
",",
"Entity",
")",
"query",
"=",
"query",
".",
"filter",
"(",
"RoleAssignment",
".",
"object",
"==",
"object",
")",
"roles",
"=",
"{",
"i",
"[",
"0",
"]",
"for",
"i",
"in",
"query",
".",
"all",
"(",
")",
"}",
"if",
"object",
"is",
"not",
"None",
":",
"for",
"attr",
",",
"role",
"in",
"(",
"(",
"\"creator\"",
",",
"Creator",
")",
",",
"(",
"\"owner\"",
",",
"Owner",
")",
")",
":",
"if",
"getattr",
"(",
"object",
",",
"attr",
")",
"==",
"principal",
":",
"roles",
".",
"add",
"(",
"role",
")",
"return",
"list",
"(",
"roles",
")"
]
| Get all the roles attached to given `principal`, on a given
`object`.
:param principal: a :class:`User` or :class:`Group`
:param object: an :class:`Entity`
:param no_group_roles: If `True`, return only direct roles, not roles
acquired through group membership. | [
"Get",
"all",
"the",
"roles",
"attached",
"to",
"given",
"principal",
"on",
"a",
"given",
"object",
"."
]
| python | train |
ellmetha/django-machina | machina/apps/forum_conversation/forum_attachments/cache.py | https://github.com/ellmetha/django-machina/blob/89ac083c1eaf1cfdeae6686ee094cc86362e8c69/machina/apps/forum_conversation/forum_attachments/cache.py#L82-L120 | def get(self, key):
""" Regenerates a MultiValueDict instance containing the files related to all file states
stored for the given key.
"""
upload = None
files_states = self.backend.get(key)
files = MultiValueDict()
if files_states:
for name, state in files_states.items():
f = BytesIO()
f.write(state['content'])
# If the post is too large, we cannot use a
# InMemoryUploadedFile instance.
if state['size'] > settings.FILE_UPLOAD_MAX_MEMORY_SIZE:
upload = TemporaryUploadedFile(
state['name'],
state['content_type'],
state['size'],
state['charset'],
)
upload.file = f
else:
f = BytesIO()
f.write(state['content'])
upload = InMemoryUploadedFile(
file=f,
field_name=name,
name=state['name'],
content_type=state['content_type'],
size=state['size'],
charset=state['charset'],
)
files[name] = upload
# Go to the first byte in the file for future use
upload.file.seek(0)
return files | [
"def",
"get",
"(",
"self",
",",
"key",
")",
":",
"upload",
"=",
"None",
"files_states",
"=",
"self",
".",
"backend",
".",
"get",
"(",
"key",
")",
"files",
"=",
"MultiValueDict",
"(",
")",
"if",
"files_states",
":",
"for",
"name",
",",
"state",
"in",
"files_states",
".",
"items",
"(",
")",
":",
"f",
"=",
"BytesIO",
"(",
")",
"f",
".",
"write",
"(",
"state",
"[",
"'content'",
"]",
")",
"# If the post is too large, we cannot use a",
"# InMemoryUploadedFile instance.",
"if",
"state",
"[",
"'size'",
"]",
">",
"settings",
".",
"FILE_UPLOAD_MAX_MEMORY_SIZE",
":",
"upload",
"=",
"TemporaryUploadedFile",
"(",
"state",
"[",
"'name'",
"]",
",",
"state",
"[",
"'content_type'",
"]",
",",
"state",
"[",
"'size'",
"]",
",",
"state",
"[",
"'charset'",
"]",
",",
")",
"upload",
".",
"file",
"=",
"f",
"else",
":",
"f",
"=",
"BytesIO",
"(",
")",
"f",
".",
"write",
"(",
"state",
"[",
"'content'",
"]",
")",
"upload",
"=",
"InMemoryUploadedFile",
"(",
"file",
"=",
"f",
",",
"field_name",
"=",
"name",
",",
"name",
"=",
"state",
"[",
"'name'",
"]",
",",
"content_type",
"=",
"state",
"[",
"'content_type'",
"]",
",",
"size",
"=",
"state",
"[",
"'size'",
"]",
",",
"charset",
"=",
"state",
"[",
"'charset'",
"]",
",",
")",
"files",
"[",
"name",
"]",
"=",
"upload",
"# Go to the first byte in the file for future use",
"upload",
".",
"file",
".",
"seek",
"(",
"0",
")",
"return",
"files"
]
| Regenerates a MultiValueDict instance containing the files related to all file states
stored for the given key. | [
"Regenerates",
"a",
"MultiValueDict",
"instance",
"containing",
"the",
"files",
"related",
"to",
"all",
"file",
"states",
"stored",
"for",
"the",
"given",
"key",
"."
]
| python | train |
markovmodel/msmtools | msmtools/flux/api.py | https://github.com/markovmodel/msmtools/blob/54dc76dd2113a0e8f3d15d5316abab41402941be/msmtools/flux/api.py#L233-L267 | def to_netflux(flux):
r"""Compute the netflux from the gross flux.
Parameters
----------
flux : (M, M) ndarray
Matrix of flux values between pairs of states.
Returns
-------
netflux : (M, M) ndarray
Matrix of netflux values between pairs of states.
Notes
-----
The netflux or effective current is defined as
.. math:: f_{ij}^{+}=\max \{ f_{ij}-f_{ji}, 0 \}
:math:`f_{ij}` is the flux for the transition from :math:`A` to
:math:`B`.
References
----------
.. [1] P. Metzner, C. Schuette and E. Vanden-Eijnden.
Transition Path Theory for Markov Jump Processes.
Multiscale Model Simul 7: 1192-1219 (2009)
"""
if issparse(flux):
return sparse.tpt.to_netflux(flux)
elif isdense(flux):
return dense.tpt.to_netflux(flux)
else:
raise _type_not_supported | [
"def",
"to_netflux",
"(",
"flux",
")",
":",
"if",
"issparse",
"(",
"flux",
")",
":",
"return",
"sparse",
".",
"tpt",
".",
"to_netflux",
"(",
"flux",
")",
"elif",
"isdense",
"(",
"flux",
")",
":",
"return",
"dense",
".",
"tpt",
".",
"to_netflux",
"(",
"flux",
")",
"else",
":",
"raise",
"_type_not_supported"
]
| r"""Compute the netflux from the gross flux.
Parameters
----------
flux : (M, M) ndarray
Matrix of flux values between pairs of states.
Returns
-------
netflux : (M, M) ndarray
Matrix of netflux values between pairs of states.
Notes
-----
The netflux or effective current is defined as
.. math:: f_{ij}^{+}=\max \{ f_{ij}-f_{ji}, 0 \}
:math:`f_{ij}` is the flux for the transition from :math:`A` to
:math:`B`.
References
----------
.. [1] P. Metzner, C. Schuette and E. Vanden-Eijnden.
Transition Path Theory for Markov Jump Processes.
Multiscale Model Simul 7: 1192-1219 (2009) | [
"r",
"Compute",
"the",
"netflux",
"from",
"the",
"gross",
"flux",
"."
]
| python | train |
tensorflow/hub | examples/image_retraining/retrain.py | https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/examples/image_retraining/retrain.py#L270-L291 | def get_bottleneck_path(image_lists, label_name, index, bottleneck_dir,
category, module_name):
"""Returns a path to a bottleneck file for a label at the given index.
Args:
image_lists: OrderedDict of training images for each label.
label_name: Label string we want to get an image for.
index: Integer offset of the image we want. This will be moduloed by the
available number of images for the label, so it can be arbitrarily large.
bottleneck_dir: Folder string holding cached files of bottleneck values.
category: Name string of set to pull images from - training, testing, or
validation.
module_name: The name of the image module being used.
Returns:
File system path string to an image that meets the requested parameters.
"""
module_name = (module_name.replace('://', '~') # URL scheme.
.replace('/', '~') # URL and Unix paths.
.replace(':', '~').replace('\\', '~')) # Windows paths.
return get_image_path(image_lists, label_name, index, bottleneck_dir,
category) + '_' + module_name + '.txt' | [
"def",
"get_bottleneck_path",
"(",
"image_lists",
",",
"label_name",
",",
"index",
",",
"bottleneck_dir",
",",
"category",
",",
"module_name",
")",
":",
"module_name",
"=",
"(",
"module_name",
".",
"replace",
"(",
"'://'",
",",
"'~'",
")",
"# URL scheme.",
".",
"replace",
"(",
"'/'",
",",
"'~'",
")",
"# URL and Unix paths.",
".",
"replace",
"(",
"':'",
",",
"'~'",
")",
".",
"replace",
"(",
"'\\\\'",
",",
"'~'",
")",
")",
"# Windows paths.",
"return",
"get_image_path",
"(",
"image_lists",
",",
"label_name",
",",
"index",
",",
"bottleneck_dir",
",",
"category",
")",
"+",
"'_'",
"+",
"module_name",
"+",
"'.txt'"
]
| Returns a path to a bottleneck file for a label at the given index.
Args:
image_lists: OrderedDict of training images for each label.
label_name: Label string we want to get an image for.
index: Integer offset of the image we want. This will be moduloed by the
available number of images for the label, so it can be arbitrarily large.
bottleneck_dir: Folder string holding cached files of bottleneck values.
category: Name string of set to pull images from - training, testing, or
validation.
module_name: The name of the image module being used.
Returns:
File system path string to an image that meets the requested parameters. | [
"Returns",
"a",
"path",
"to",
"a",
"bottleneck",
"file",
"for",
"a",
"label",
"at",
"the",
"given",
"index",
"."
]
| python | train |
bmcfee/muda | muda/deformers/util.py | https://github.com/bmcfee/muda/blob/ff82efdfaeb98da0a9f9124845826eb20536a9ba/muda/deformers/util.py#L40-L59 | def transform(self, jam):
'''Bypass transformations.
Parameters
----------
jam : pyjams.JAMS
A muda-enabled JAMS object
Yields
------
jam_out : pyjams.JAMS iterator
The first result is `jam` (unmodified), by reference
All subsequent results are generated by `transformer`
'''
# Step 1: yield the unmodified jam
yield jam
# Step 2: yield from the transformer
for jam_out in self.transformer.transform(jam):
yield jam_out | [
"def",
"transform",
"(",
"self",
",",
"jam",
")",
":",
"# Step 1: yield the unmodified jam",
"yield",
"jam",
"# Step 2: yield from the transformer",
"for",
"jam_out",
"in",
"self",
".",
"transformer",
".",
"transform",
"(",
"jam",
")",
":",
"yield",
"jam_out"
]
| Bypass transformations.
Parameters
----------
jam : pyjams.JAMS
A muda-enabled JAMS object
Yields
------
jam_out : pyjams.JAMS iterator
The first result is `jam` (unmodified), by reference
All subsequent results are generated by `transformer` | [
"Bypass",
"transformations",
"."
]
| python | valid |
bokeh/bokeh | bokeh/models/sources.py | https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/models/sources.py#L519-L674 | def patch(self, patches, setter=None):
''' Efficiently update data source columns at specific locations
If it is only necessary to update a small subset of data in a
``ColumnDataSource``, this method can be used to efficiently update only
the subset, instead of requiring the entire data set to be sent.
This method should be passed a dictionary that maps column names to
lists of tuples that describe a patch change to apply. To replace
individual items in columns entirely, the tuples should be of the
form:
.. code-block:: python
(index, new_value) # replace a single column value
# or
(slice, new_values) # replace several column values
Values at an index or slice will be replaced with the corresponding
new values.
In the case of columns whose values are other arrays or lists, (e.g.
image or patches glyphs), it is also possible to patch "subregions".
In this case the first item of the tuple should be a whose first
element is the index of the array item in the CDS patch, and whose
subsequent elements are integer indices or slices into the array item:
.. code-block:: python
# replace the entire 10th column of the 2nd array:
+----------------- index of item in column data source
|
| +--------- row subindex into array item
| |
| | +- column subindex into array item
V V V
([2, slice(None), 10], new_values)
Imagining a list of 2d NumPy arrays, the patch above is roughly
equivalent to:
.. code-block:: python
data = [arr1, arr2, ...] # list of 2d arrays
data[2][:, 10] = new_data
There are some limitations to the kinds of slices and data that can
be accepted.
* Negative ``start``, ``stop``, or ``step`` values for slices will
result in a ``ValueError``.
* In a slice, ``start > stop`` will result in a ``ValueError``
* When patching 1d or 2d subitems, the subitems must be NumPy arrays.
* New values must be supplied as a **flattened one-dimensional array**
of the appropriate size.
Args:
patches (dict[str, list[tuple]]) : lists of patches for each column
Returns:
None
Raises:
ValueError
Example:
The following example shows how to patch entire column elements. In this case,
.. code-block:: python
source = ColumnDataSource(data=dict(foo=[10, 20, 30], bar=[100, 200, 300]))
patches = {
'foo' : [ (slice(2), [11, 12]) ],
'bar' : [ (0, 101), (2, 301) ],
}
source.patch(patches)
After this operation, the value of the ``source.data`` will be:
.. code-block:: python
dict(foo=[11, 12, 30], bar=[101, 200, 301])
For a more comprehensive complete example, see :bokeh-tree:`examples/howto/patch_app.py`.
'''
import numpy as np
extra = set(patches.keys()) - set(self.data.keys())
if extra:
raise ValueError("Can only patch existing columns (extra: %s)" % ", ".join(sorted(extra)))
for name, patch in patches.items():
col_len = len(self.data[name])
for ind, value in patch:
# integer index, patch single value of 1d column
if isinstance(ind, int):
if ind > col_len or ind < 0:
raise ValueError("Out-of bounds index (%d) in patch for column: %s" % (ind, name))
# slice index, patch multiple values of 1d column
elif isinstance(ind, slice):
_check_slice(ind)
if ind.stop is not None and ind.stop > col_len:
raise ValueError("Out-of bounds slice index stop (%d) in patch for column: %s" % (ind.stop, name))
# multi-index, patch sub-regions of "n-d" column
elif isinstance(ind, (list, tuple)):
if len(ind) == 0:
raise ValueError("Empty (length zero) patch multi-index")
if len(ind) == 1:
raise ValueError("Patch multi-index must contain more than one subindex")
if not isinstance(ind[0], int):
raise ValueError("Initial patch sub-index may only be integer, got: %s" % ind[0])
if ind[0] > col_len or ind[0] < 0:
raise ValueError("Out-of bounds initial sub-index (%d) in patch for column: %s" % (ind, name))
if not isinstance(self.data[name][ind[0]], np.ndarray):
raise ValueError("Can only sub-patch into columns with NumPy array items")
if len(self.data[name][ind[0]].shape) != (len(ind)-1):
raise ValueError("Shape mismatch between patch slice and sliced data")
elif isinstance(ind[0], slice):
_check_slice(ind[0])
if ind[0].stop is not None and ind[0].stop > col_len:
raise ValueError("Out-of bounds initial slice sub-index stop (%d) in patch for column: %s" % (ind.stop, name))
# Note: bounds of sub-indices after the first are not checked!
for subind in ind[1:]:
if not isinstance(subind, (int, slice)):
raise ValueError("Invalid patch sub-index: %s" % subind)
if isinstance(subind, slice):
_check_slice(subind)
else:
raise ValueError("Invalid patch index: %s" % ind)
self.data._patch(self.document, self, patches, setter) | [
"def",
"patch",
"(",
"self",
",",
"patches",
",",
"setter",
"=",
"None",
")",
":",
"import",
"numpy",
"as",
"np",
"extra",
"=",
"set",
"(",
"patches",
".",
"keys",
"(",
")",
")",
"-",
"set",
"(",
"self",
".",
"data",
".",
"keys",
"(",
")",
")",
"if",
"extra",
":",
"raise",
"ValueError",
"(",
"\"Can only patch existing columns (extra: %s)\"",
"%",
"\", \"",
".",
"join",
"(",
"sorted",
"(",
"extra",
")",
")",
")",
"for",
"name",
",",
"patch",
"in",
"patches",
".",
"items",
"(",
")",
":",
"col_len",
"=",
"len",
"(",
"self",
".",
"data",
"[",
"name",
"]",
")",
"for",
"ind",
",",
"value",
"in",
"patch",
":",
"# integer index, patch single value of 1d column",
"if",
"isinstance",
"(",
"ind",
",",
"int",
")",
":",
"if",
"ind",
">",
"col_len",
"or",
"ind",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"Out-of bounds index (%d) in patch for column: %s\"",
"%",
"(",
"ind",
",",
"name",
")",
")",
"# slice index, patch multiple values of 1d column",
"elif",
"isinstance",
"(",
"ind",
",",
"slice",
")",
":",
"_check_slice",
"(",
"ind",
")",
"if",
"ind",
".",
"stop",
"is",
"not",
"None",
"and",
"ind",
".",
"stop",
">",
"col_len",
":",
"raise",
"ValueError",
"(",
"\"Out-of bounds slice index stop (%d) in patch for column: %s\"",
"%",
"(",
"ind",
".",
"stop",
",",
"name",
")",
")",
"# multi-index, patch sub-regions of \"n-d\" column",
"elif",
"isinstance",
"(",
"ind",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"if",
"len",
"(",
"ind",
")",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"\"Empty (length zero) patch multi-index\"",
")",
"if",
"len",
"(",
"ind",
")",
"==",
"1",
":",
"raise",
"ValueError",
"(",
"\"Patch multi-index must contain more than one subindex\"",
")",
"if",
"not",
"isinstance",
"(",
"ind",
"[",
"0",
"]",
",",
"int",
")",
":",
"raise",
"ValueError",
"(",
"\"Initial patch sub-index may only be integer, got: %s\"",
"%",
"ind",
"[",
"0",
"]",
")",
"if",
"ind",
"[",
"0",
"]",
">",
"col_len",
"or",
"ind",
"[",
"0",
"]",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"Out-of bounds initial sub-index (%d) in patch for column: %s\"",
"%",
"(",
"ind",
",",
"name",
")",
")",
"if",
"not",
"isinstance",
"(",
"self",
".",
"data",
"[",
"name",
"]",
"[",
"ind",
"[",
"0",
"]",
"]",
",",
"np",
".",
"ndarray",
")",
":",
"raise",
"ValueError",
"(",
"\"Can only sub-patch into columns with NumPy array items\"",
")",
"if",
"len",
"(",
"self",
".",
"data",
"[",
"name",
"]",
"[",
"ind",
"[",
"0",
"]",
"]",
".",
"shape",
")",
"!=",
"(",
"len",
"(",
"ind",
")",
"-",
"1",
")",
":",
"raise",
"ValueError",
"(",
"\"Shape mismatch between patch slice and sliced data\"",
")",
"elif",
"isinstance",
"(",
"ind",
"[",
"0",
"]",
",",
"slice",
")",
":",
"_check_slice",
"(",
"ind",
"[",
"0",
"]",
")",
"if",
"ind",
"[",
"0",
"]",
".",
"stop",
"is",
"not",
"None",
"and",
"ind",
"[",
"0",
"]",
".",
"stop",
">",
"col_len",
":",
"raise",
"ValueError",
"(",
"\"Out-of bounds initial slice sub-index stop (%d) in patch for column: %s\"",
"%",
"(",
"ind",
".",
"stop",
",",
"name",
")",
")",
"# Note: bounds of sub-indices after the first are not checked!",
"for",
"subind",
"in",
"ind",
"[",
"1",
":",
"]",
":",
"if",
"not",
"isinstance",
"(",
"subind",
",",
"(",
"int",
",",
"slice",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"Invalid patch sub-index: %s\"",
"%",
"subind",
")",
"if",
"isinstance",
"(",
"subind",
",",
"slice",
")",
":",
"_check_slice",
"(",
"subind",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Invalid patch index: %s\"",
"%",
"ind",
")",
"self",
".",
"data",
".",
"_patch",
"(",
"self",
".",
"document",
",",
"self",
",",
"patches",
",",
"setter",
")"
]
| Efficiently update data source columns at specific locations
If it is only necessary to update a small subset of data in a
``ColumnDataSource``, this method can be used to efficiently update only
the subset, instead of requiring the entire data set to be sent.
This method should be passed a dictionary that maps column names to
lists of tuples that describe a patch change to apply. To replace
individual items in columns entirely, the tuples should be of the
form:
.. code-block:: python
(index, new_value) # replace a single column value
# or
(slice, new_values) # replace several column values
Values at an index or slice will be replaced with the corresponding
new values.
In the case of columns whose values are other arrays or lists, (e.g.
image or patches glyphs), it is also possible to patch "subregions".
In this case the first item of the tuple should be a whose first
element is the index of the array item in the CDS patch, and whose
subsequent elements are integer indices or slices into the array item:
.. code-block:: python
# replace the entire 10th column of the 2nd array:
+----------------- index of item in column data source
|
| +--------- row subindex into array item
| |
| | +- column subindex into array item
V V V
([2, slice(None), 10], new_values)
Imagining a list of 2d NumPy arrays, the patch above is roughly
equivalent to:
.. code-block:: python
data = [arr1, arr2, ...] # list of 2d arrays
data[2][:, 10] = new_data
There are some limitations to the kinds of slices and data that can
be accepted.
* Negative ``start``, ``stop``, or ``step`` values for slices will
result in a ``ValueError``.
* In a slice, ``start > stop`` will result in a ``ValueError``
* When patching 1d or 2d subitems, the subitems must be NumPy arrays.
* New values must be supplied as a **flattened one-dimensional array**
of the appropriate size.
Args:
patches (dict[str, list[tuple]]) : lists of patches for each column
Returns:
None
Raises:
ValueError
Example:
The following example shows how to patch entire column elements. In this case,
.. code-block:: python
source = ColumnDataSource(data=dict(foo=[10, 20, 30], bar=[100, 200, 300]))
patches = {
'foo' : [ (slice(2), [11, 12]) ],
'bar' : [ (0, 101), (2, 301) ],
}
source.patch(patches)
After this operation, the value of the ``source.data`` will be:
.. code-block:: python
dict(foo=[11, 12, 30], bar=[101, 200, 301])
For a more comprehensive complete example, see :bokeh-tree:`examples/howto/patch_app.py`. | [
"Efficiently",
"update",
"data",
"source",
"columns",
"at",
"specific",
"locations"
]
| python | train |
lablup/backend.ai-client-py | src/ai/backend/client/resource_policy.py | https://github.com/lablup/backend.ai-client-py/blob/a063d774fea6f4350b89498c40d3c837ec3029a7/src/ai/backend/client/resource_policy.py#L23-L66 | async def create(cls, name: str,
default_for_unspecified: int,
total_resource_slots: int,
max_concurrent_sessions: int,
max_containers_per_session: int,
max_vfolder_count: int,
max_vfolder_size: int,
idle_timeout: int,
allowed_vfolder_hosts: Sequence[str],
fields: Iterable[str] = None) -> dict:
"""
Creates a new keypair resource policy with the given options.
You need an admin privilege for this operation.
"""
if fields is None:
fields = ('name',)
q = 'mutation($name: String!, $input: CreateKeyPairResourcePolicyInput!) {' \
+ \
' create_keypair_resource_policy(name: $name, props: $input) {' \
' ok msg resource_policy { $fields }' \
' }' \
'}'
q = q.replace('$fields', ' '.join(fields))
variables = {
'name': name,
'input': {
'default_for_unspecified': default_for_unspecified,
'total_resource_slots': total_resource_slots,
'max_concurrent_sessions': max_concurrent_sessions,
'max_containers_per_session': max_containers_per_session,
'max_vfolder_count': max_vfolder_count,
'max_vfolder_size': max_vfolder_size,
'idle_timeout': idle_timeout,
'allowed_vfolder_hosts': allowed_vfolder_hosts,
},
}
rqst = Request(cls.session, 'POST', '/admin/graphql')
rqst.set_json({
'query': q,
'variables': variables,
})
async with rqst.fetch() as resp:
data = await resp.json()
return data['create_keypair_resource_policy'] | [
"async",
"def",
"create",
"(",
"cls",
",",
"name",
":",
"str",
",",
"default_for_unspecified",
":",
"int",
",",
"total_resource_slots",
":",
"int",
",",
"max_concurrent_sessions",
":",
"int",
",",
"max_containers_per_session",
":",
"int",
",",
"max_vfolder_count",
":",
"int",
",",
"max_vfolder_size",
":",
"int",
",",
"idle_timeout",
":",
"int",
",",
"allowed_vfolder_hosts",
":",
"Sequence",
"[",
"str",
"]",
",",
"fields",
":",
"Iterable",
"[",
"str",
"]",
"=",
"None",
")",
"->",
"dict",
":",
"if",
"fields",
"is",
"None",
":",
"fields",
"=",
"(",
"'name'",
",",
")",
"q",
"=",
"'mutation($name: String!, $input: CreateKeyPairResourcePolicyInput!) {'",
"+",
"' create_keypair_resource_policy(name: $name, props: $input) {'",
"' ok msg resource_policy { $fields }'",
"' }'",
"'}'",
"q",
"=",
"q",
".",
"replace",
"(",
"'$fields'",
",",
"' '",
".",
"join",
"(",
"fields",
")",
")",
"variables",
"=",
"{",
"'name'",
":",
"name",
",",
"'input'",
":",
"{",
"'default_for_unspecified'",
":",
"default_for_unspecified",
",",
"'total_resource_slots'",
":",
"total_resource_slots",
",",
"'max_concurrent_sessions'",
":",
"max_concurrent_sessions",
",",
"'max_containers_per_session'",
":",
"max_containers_per_session",
",",
"'max_vfolder_count'",
":",
"max_vfolder_count",
",",
"'max_vfolder_size'",
":",
"max_vfolder_size",
",",
"'idle_timeout'",
":",
"idle_timeout",
",",
"'allowed_vfolder_hosts'",
":",
"allowed_vfolder_hosts",
",",
"}",
",",
"}",
"rqst",
"=",
"Request",
"(",
"cls",
".",
"session",
",",
"'POST'",
",",
"'/admin/graphql'",
")",
"rqst",
".",
"set_json",
"(",
"{",
"'query'",
":",
"q",
",",
"'variables'",
":",
"variables",
",",
"}",
")",
"async",
"with",
"rqst",
".",
"fetch",
"(",
")",
"as",
"resp",
":",
"data",
"=",
"await",
"resp",
".",
"json",
"(",
")",
"return",
"data",
"[",
"'create_keypair_resource_policy'",
"]"
]
| Creates a new keypair resource policy with the given options.
You need an admin privilege for this operation. | [
"Creates",
"a",
"new",
"keypair",
"resource",
"policy",
"with",
"the",
"given",
"options",
".",
"You",
"need",
"an",
"admin",
"privilege",
"for",
"this",
"operation",
"."
]
| python | train |
Qiskit/qiskit-terra | qiskit/visualization/text.py | https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/visualization/text.py#L483-L549 | def lines(self, line_length=None):
"""
Generates a list with lines. These lines form the text drawing.
Args:
line_length (int): Optional. Breaks the circuit drawing to this length. This
useful when the drawing does not fit in the console. If
None (default), it will try to guess the console width using
shutil.get_terminal_size(). If you don't want pagination
at all, set line_length=-1.
Returns:
list: A list of lines with the text drawing.
"""
if line_length is None:
line_length = self.line_length
if line_length is None:
if ('ipykernel' in sys.modules) and ('spyder' not in sys.modules):
line_length = 80
else:
line_length, _ = get_terminal_size()
noqubits = len(self.qregs)
layers = self.build_layers()
if not line_length:
line_length = self.line_length
layer_groups = [[]]
rest_of_the_line = line_length
for layerno, layer in enumerate(layers):
# Replace the Nones with EmptyWire
layers[layerno] = EmptyWire.fillup_layer(layer, noqubits)
TextDrawing.normalize_width(layer)
if line_length == -1:
# Do not use pagination (aka line breaking. aka ignore line_length).
layer_groups[-1].append(layer)
continue
# chop the layer to the line_length (pager)
layer_length = layers[layerno][0].length
if layer_length < rest_of_the_line:
layer_groups[-1].append(layer)
rest_of_the_line -= layer_length
else:
layer_groups[-1].append(BreakWire.fillup_layer(len(layer), '»'))
# New group
layer_groups.append([BreakWire.fillup_layer(len(layer), '«')])
rest_of_the_line = line_length - layer_groups[-1][-1][0].length
layer_groups[-1].append(
InputWire.fillup_layer(self.wire_names(with_initial_value=False)))
rest_of_the_line -= layer_groups[-1][-1][0].length
layer_groups[-1].append(layer)
rest_of_the_line -= layer_groups[-1][-1][0].length
lines = []
for layer_group in layer_groups:
wires = [i for i in zip(*layer_group)]
lines += TextDrawing.draw_wires(wires, self.vertically_compressed)
return lines | [
"def",
"lines",
"(",
"self",
",",
"line_length",
"=",
"None",
")",
":",
"if",
"line_length",
"is",
"None",
":",
"line_length",
"=",
"self",
".",
"line_length",
"if",
"line_length",
"is",
"None",
":",
"if",
"(",
"'ipykernel'",
"in",
"sys",
".",
"modules",
")",
"and",
"(",
"'spyder'",
"not",
"in",
"sys",
".",
"modules",
")",
":",
"line_length",
"=",
"80",
"else",
":",
"line_length",
",",
"_",
"=",
"get_terminal_size",
"(",
")",
"noqubits",
"=",
"len",
"(",
"self",
".",
"qregs",
")",
"layers",
"=",
"self",
".",
"build_layers",
"(",
")",
"if",
"not",
"line_length",
":",
"line_length",
"=",
"self",
".",
"line_length",
"layer_groups",
"=",
"[",
"[",
"]",
"]",
"rest_of_the_line",
"=",
"line_length",
"for",
"layerno",
",",
"layer",
"in",
"enumerate",
"(",
"layers",
")",
":",
"# Replace the Nones with EmptyWire",
"layers",
"[",
"layerno",
"]",
"=",
"EmptyWire",
".",
"fillup_layer",
"(",
"layer",
",",
"noqubits",
")",
"TextDrawing",
".",
"normalize_width",
"(",
"layer",
")",
"if",
"line_length",
"==",
"-",
"1",
":",
"# Do not use pagination (aka line breaking. aka ignore line_length).",
"layer_groups",
"[",
"-",
"1",
"]",
".",
"append",
"(",
"layer",
")",
"continue",
"# chop the layer to the line_length (pager)",
"layer_length",
"=",
"layers",
"[",
"layerno",
"]",
"[",
"0",
"]",
".",
"length",
"if",
"layer_length",
"<",
"rest_of_the_line",
":",
"layer_groups",
"[",
"-",
"1",
"]",
".",
"append",
"(",
"layer",
")",
"rest_of_the_line",
"-=",
"layer_length",
"else",
":",
"layer_groups",
"[",
"-",
"1",
"]",
".",
"append",
"(",
"BreakWire",
".",
"fillup_layer",
"(",
"len",
"(",
"layer",
")",
",",
"'»')",
")",
"",
"# New group",
"layer_groups",
".",
"append",
"(",
"[",
"BreakWire",
".",
"fillup_layer",
"(",
"len",
"(",
"layer",
")",
",",
"'«')",
"]",
")",
"",
"rest_of_the_line",
"=",
"line_length",
"-",
"layer_groups",
"[",
"-",
"1",
"]",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
".",
"length",
"layer_groups",
"[",
"-",
"1",
"]",
".",
"append",
"(",
"InputWire",
".",
"fillup_layer",
"(",
"self",
".",
"wire_names",
"(",
"with_initial_value",
"=",
"False",
")",
")",
")",
"rest_of_the_line",
"-=",
"layer_groups",
"[",
"-",
"1",
"]",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
".",
"length",
"layer_groups",
"[",
"-",
"1",
"]",
".",
"append",
"(",
"layer",
")",
"rest_of_the_line",
"-=",
"layer_groups",
"[",
"-",
"1",
"]",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
".",
"length",
"lines",
"=",
"[",
"]",
"for",
"layer_group",
"in",
"layer_groups",
":",
"wires",
"=",
"[",
"i",
"for",
"i",
"in",
"zip",
"(",
"*",
"layer_group",
")",
"]",
"lines",
"+=",
"TextDrawing",
".",
"draw_wires",
"(",
"wires",
",",
"self",
".",
"vertically_compressed",
")",
"return",
"lines"
]
| Generates a list with lines. These lines form the text drawing.
Args:
line_length (int): Optional. Breaks the circuit drawing to this length. This
useful when the drawing does not fit in the console. If
None (default), it will try to guess the console width using
shutil.get_terminal_size(). If you don't want pagination
at all, set line_length=-1.
Returns:
list: A list of lines with the text drawing. | [
"Generates",
"a",
"list",
"with",
"lines",
".",
"These",
"lines",
"form",
"the",
"text",
"drawing",
".",
"Args",
":",
"line_length",
"(",
"int",
")",
":",
"Optional",
".",
"Breaks",
"the",
"circuit",
"drawing",
"to",
"this",
"length",
".",
"This",
"useful",
"when",
"the",
"drawing",
"does",
"not",
"fit",
"in",
"the",
"console",
".",
"If",
"None",
"(",
"default",
")",
"it",
"will",
"try",
"to",
"guess",
"the",
"console",
"width",
"using",
"shutil",
".",
"get_terminal_size",
"()",
".",
"If",
"you",
"don",
"t",
"want",
"pagination",
"at",
"all",
"set",
"line_length",
"=",
"-",
"1",
"."
]
| python | test |
simpleai-team/simpleai | samples/search/missioners.py | https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/samples/search/missioners.py#L27-L34 | def _is_valid(self, s):
'''Check if a state is valid.'''
# valid states: no more cannibals than missioners on each side,
# and numbers between 0 and 3
return ((s[0] >= s[1] or s[0] == 0)) and \
((3 - s[0]) >= (3 - s[1]) or s[0] == 3) and \
(0 <= s[0] <= 3) and \
(0 <= s[1] <= 3) | [
"def",
"_is_valid",
"(",
"self",
",",
"s",
")",
":",
"# valid states: no more cannibals than missioners on each side,",
"# and numbers between 0 and 3",
"return",
"(",
"(",
"s",
"[",
"0",
"]",
">=",
"s",
"[",
"1",
"]",
"or",
"s",
"[",
"0",
"]",
"==",
"0",
")",
")",
"and",
"(",
"(",
"3",
"-",
"s",
"[",
"0",
"]",
")",
">=",
"(",
"3",
"-",
"s",
"[",
"1",
"]",
")",
"or",
"s",
"[",
"0",
"]",
"==",
"3",
")",
"and",
"(",
"0",
"<=",
"s",
"[",
"0",
"]",
"<=",
"3",
")",
"and",
"(",
"0",
"<=",
"s",
"[",
"1",
"]",
"<=",
"3",
")"
]
| Check if a state is valid. | [
"Check",
"if",
"a",
"state",
"is",
"valid",
"."
]
| python | train |
rosenbrockc/fortpy | fortpy/isense/builtin.py | https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/isense/builtin.py#L11-L33 | def load(parser, serializer):
"""Returns a dictionary of builtin functions for Fortran. Checks the
cache first to see if we have a serialized version. If we don't, it
loads it from the XML file.
:arg parser: the DocParser instance for parsing the XML tags.
:arg serializer: a Serializer instance from the CodeParser to cache
the loaded XML file.
"""
fortdir = os.path.dirname(fortpy.__file__)
xmlpath = os.path.join(fortdir, "isense", "builtin.xml")
if not os.path.isfile(xmlpath):
return {}
changed_time = os.path.getmtime(xmlpath)
cached = serializer.load_module("builtin.xml", changed_time)
if cached is None:
result = _load_builtin_xml(xmlpath, parser)
serializer.save_module("builtin.xml", result, changed_time)
else:
result = cached
return result | [
"def",
"load",
"(",
"parser",
",",
"serializer",
")",
":",
"fortdir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"fortpy",
".",
"__file__",
")",
"xmlpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"fortdir",
",",
"\"isense\"",
",",
"\"builtin.xml\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"xmlpath",
")",
":",
"return",
"{",
"}",
"changed_time",
"=",
"os",
".",
"path",
".",
"getmtime",
"(",
"xmlpath",
")",
"cached",
"=",
"serializer",
".",
"load_module",
"(",
"\"builtin.xml\"",
",",
"changed_time",
")",
"if",
"cached",
"is",
"None",
":",
"result",
"=",
"_load_builtin_xml",
"(",
"xmlpath",
",",
"parser",
")",
"serializer",
".",
"save_module",
"(",
"\"builtin.xml\"",
",",
"result",
",",
"changed_time",
")",
"else",
":",
"result",
"=",
"cached",
"return",
"result"
]
| Returns a dictionary of builtin functions for Fortran. Checks the
cache first to see if we have a serialized version. If we don't, it
loads it from the XML file.
:arg parser: the DocParser instance for parsing the XML tags.
:arg serializer: a Serializer instance from the CodeParser to cache
the loaded XML file. | [
"Returns",
"a",
"dictionary",
"of",
"builtin",
"functions",
"for",
"Fortran",
".",
"Checks",
"the",
"cache",
"first",
"to",
"see",
"if",
"we",
"have",
"a",
"serialized",
"version",
".",
"If",
"we",
"don",
"t",
"it",
"loads",
"it",
"from",
"the",
"XML",
"file",
"."
]
| python | train |
ktbyers/netmiko | netmiko/citrix/netscaler_ssh.py | https://github.com/ktbyers/netmiko/blob/54e6116c0b4664de2123081937e0a9a27bdfdfea/netmiko/citrix/netscaler_ssh.py#L53-L60 | def strip_prompt(self, a_string):
""" Strip 'Done' from command output """
output = super(NetscalerSSH, self).strip_prompt(a_string)
lines = output.split(self.RESPONSE_RETURN)
if "Done" in lines[-1]:
return self.RESPONSE_RETURN.join(lines[:-1])
else:
return output | [
"def",
"strip_prompt",
"(",
"self",
",",
"a_string",
")",
":",
"output",
"=",
"super",
"(",
"NetscalerSSH",
",",
"self",
")",
".",
"strip_prompt",
"(",
"a_string",
")",
"lines",
"=",
"output",
".",
"split",
"(",
"self",
".",
"RESPONSE_RETURN",
")",
"if",
"\"Done\"",
"in",
"lines",
"[",
"-",
"1",
"]",
":",
"return",
"self",
".",
"RESPONSE_RETURN",
".",
"join",
"(",
"lines",
"[",
":",
"-",
"1",
"]",
")",
"else",
":",
"return",
"output"
]
| Strip 'Done' from command output | [
"Strip",
"Done",
"from",
"command",
"output"
]
| python | train |
mfcloud/python-zvm-sdk | zvmsdk/database.py | https://github.com/mfcloud/python-zvm-sdk/blob/de9994ceca764f5460ce51bd74237986341d8e3c/zvmsdk/database.py#L670-L683 | def get_comments_by_userid(self, userid):
""" Get comments record.
output should be like: {'k1': 'v1', 'k2': 'v2'}'
"""
userid = userid
with get_guest_conn() as conn:
res = conn.execute("SELECT comments FROM guests "
"WHERE userid=?", (userid,))
result = res.fetchall()
comments = {}
if result[0][0]:
comments = json.loads(result[0][0])
return comments | [
"def",
"get_comments_by_userid",
"(",
"self",
",",
"userid",
")",
":",
"userid",
"=",
"userid",
"with",
"get_guest_conn",
"(",
")",
"as",
"conn",
":",
"res",
"=",
"conn",
".",
"execute",
"(",
"\"SELECT comments FROM guests \"",
"\"WHERE userid=?\"",
",",
"(",
"userid",
",",
")",
")",
"result",
"=",
"res",
".",
"fetchall",
"(",
")",
"comments",
"=",
"{",
"}",
"if",
"result",
"[",
"0",
"]",
"[",
"0",
"]",
":",
"comments",
"=",
"json",
".",
"loads",
"(",
"result",
"[",
"0",
"]",
"[",
"0",
"]",
")",
"return",
"comments"
]
| Get comments record.
output should be like: {'k1': 'v1', 'k2': 'v2'}' | [
"Get",
"comments",
"record",
".",
"output",
"should",
"be",
"like",
":",
"{",
"k1",
":",
"v1",
"k2",
":",
"v2",
"}"
]
| python | train |
cloudmesh/cloudmesh-common | cloudmesh/common/StopWatch.py | https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/StopWatch.py#L57-L70 | def get(cls, name):
"""
returns the time of the timer.
:param name: the name of the timer
:type name: string
:rtype: the elapsed time
"""
if name in cls.timer_end:
cls.timer_elapsed[name] = cls.timer_end[name] - \
cls.timer_start[name]
return cls.timer_elapsed[name]
else:
return "undefined" | [
"def",
"get",
"(",
"cls",
",",
"name",
")",
":",
"if",
"name",
"in",
"cls",
".",
"timer_end",
":",
"cls",
".",
"timer_elapsed",
"[",
"name",
"]",
"=",
"cls",
".",
"timer_end",
"[",
"name",
"]",
"-",
"cls",
".",
"timer_start",
"[",
"name",
"]",
"return",
"cls",
".",
"timer_elapsed",
"[",
"name",
"]",
"else",
":",
"return",
"\"undefined\""
]
| returns the time of the timer.
:param name: the name of the timer
:type name: string
:rtype: the elapsed time | [
"returns",
"the",
"time",
"of",
"the",
"timer",
"."
]
| python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.