repo
stringlengths 7
55
| path
stringlengths 4
223
| url
stringlengths 87
315
| code
stringlengths 75
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values | avg_line_len
float64 7.91
980
|
---|---|---|---|---|---|---|---|---|---|
ossobv/dutree | dutree/dutree.py | https://github.com/ossobv/dutree/blob/adceeeb17f9fd70a7ed9c674850d7015d820eb2a/dutree/dutree.py#L311-L318 | def _normpath(self, pathname):
"Return path normalized for duscan usage: no trailing slash."
if pathname == '/':
pathname = ''
elif pathname.endswith('/'):
pathname = pathname[:-1]
assert not pathname.endswith('/'), pathname
return pathname | [
"def",
"_normpath",
"(",
"self",
",",
"pathname",
")",
":",
"if",
"pathname",
"==",
"'/'",
":",
"pathname",
"=",
"''",
"elif",
"pathname",
".",
"endswith",
"(",
"'/'",
")",
":",
"pathname",
"=",
"pathname",
"[",
":",
"-",
"1",
"]",
"assert",
"not",
"pathname",
".",
"endswith",
"(",
"'/'",
")",
",",
"pathname",
"return",
"pathname"
]
| Return path normalized for duscan usage: no trailing slash. | [
"Return",
"path",
"normalized",
"for",
"duscan",
"usage",
":",
"no",
"trailing",
"slash",
"."
]
| python | train | 37.125 |
gem/oq-engine | openquake/baselib/node.py | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/baselib/node.py#L363-L377 | def _displayattrs(attrib, expandattrs):
"""
Helper function to display the attributes of a Node object in lexicographic
order.
:param attrib: dictionary with the attributes
:param expandattrs: if True also displays the value of the attributes
"""
if not attrib:
return ''
if expandattrs:
alist = ['%s=%r' % item for item in sorted(attrib.items())]
else:
alist = list(attrib)
return '{%s}' % ', '.join(alist) | [
"def",
"_displayattrs",
"(",
"attrib",
",",
"expandattrs",
")",
":",
"if",
"not",
"attrib",
":",
"return",
"''",
"if",
"expandattrs",
":",
"alist",
"=",
"[",
"'%s=%r'",
"%",
"item",
"for",
"item",
"in",
"sorted",
"(",
"attrib",
".",
"items",
"(",
")",
")",
"]",
"else",
":",
"alist",
"=",
"list",
"(",
"attrib",
")",
"return",
"'{%s}'",
"%",
"', '",
".",
"join",
"(",
"alist",
")"
]
| Helper function to display the attributes of a Node object in lexicographic
order.
:param attrib: dictionary with the attributes
:param expandattrs: if True also displays the value of the attributes | [
"Helper",
"function",
"to",
"display",
"the",
"attributes",
"of",
"a",
"Node",
"object",
"in",
"lexicographic",
"order",
"."
]
| python | train | 30.533333 |
ArabellaTech/django-basic-cms | basic_cms/placeholders.py | https://github.com/ArabellaTech/django-basic-cms/blob/863f3c6098606f663994930cd8e7723ad0c07caf/basic_cms/placeholders.py#L35-L75 | def parse_placeholder(parser, token):
"""Parse the `PlaceholderNode` parameters.
Return a tuple with the name and parameters."""
bits = token.split_contents()
count = len(bits)
error_string = '%r tag requires at least one argument' % bits[0]
if count <= 1:
raise TemplateSyntaxError(error_string)
name = bits[1]
remaining = bits[2:]
params = {}
simple_options = ['parsed', 'inherited', 'untranslated']
param_options = ['as', 'on', 'with']
all_options = simple_options + param_options
while remaining:
bit = remaining[0]
if bit not in all_options:
raise TemplateSyntaxError(
"%r is not an correct option for a placeholder" % bit)
if bit in param_options:
if len(remaining) < 2:
raise TemplateSyntaxError(
"Placeholder option '%s' need a parameter" % bit)
if bit == 'as':
params['as_varname'] = remaining[1]
if bit == 'with':
params['widget'] = remaining[1]
if bit == 'on':
params['page'] = remaining[1]
remaining = remaining[2:]
elif bit == 'parsed':
params['parsed'] = True
remaining = remaining[1:]
elif bit == 'inherited':
params['inherited'] = True
remaining = remaining[1:]
elif bit == 'untranslated':
params['untranslated'] = True
remaining = remaining[1:]
return name, params | [
"def",
"parse_placeholder",
"(",
"parser",
",",
"token",
")",
":",
"bits",
"=",
"token",
".",
"split_contents",
"(",
")",
"count",
"=",
"len",
"(",
"bits",
")",
"error_string",
"=",
"'%r tag requires at least one argument'",
"%",
"bits",
"[",
"0",
"]",
"if",
"count",
"<=",
"1",
":",
"raise",
"TemplateSyntaxError",
"(",
"error_string",
")",
"name",
"=",
"bits",
"[",
"1",
"]",
"remaining",
"=",
"bits",
"[",
"2",
":",
"]",
"params",
"=",
"{",
"}",
"simple_options",
"=",
"[",
"'parsed'",
",",
"'inherited'",
",",
"'untranslated'",
"]",
"param_options",
"=",
"[",
"'as'",
",",
"'on'",
",",
"'with'",
"]",
"all_options",
"=",
"simple_options",
"+",
"param_options",
"while",
"remaining",
":",
"bit",
"=",
"remaining",
"[",
"0",
"]",
"if",
"bit",
"not",
"in",
"all_options",
":",
"raise",
"TemplateSyntaxError",
"(",
"\"%r is not an correct option for a placeholder\"",
"%",
"bit",
")",
"if",
"bit",
"in",
"param_options",
":",
"if",
"len",
"(",
"remaining",
")",
"<",
"2",
":",
"raise",
"TemplateSyntaxError",
"(",
"\"Placeholder option '%s' need a parameter\"",
"%",
"bit",
")",
"if",
"bit",
"==",
"'as'",
":",
"params",
"[",
"'as_varname'",
"]",
"=",
"remaining",
"[",
"1",
"]",
"if",
"bit",
"==",
"'with'",
":",
"params",
"[",
"'widget'",
"]",
"=",
"remaining",
"[",
"1",
"]",
"if",
"bit",
"==",
"'on'",
":",
"params",
"[",
"'page'",
"]",
"=",
"remaining",
"[",
"1",
"]",
"remaining",
"=",
"remaining",
"[",
"2",
":",
"]",
"elif",
"bit",
"==",
"'parsed'",
":",
"params",
"[",
"'parsed'",
"]",
"=",
"True",
"remaining",
"=",
"remaining",
"[",
"1",
":",
"]",
"elif",
"bit",
"==",
"'inherited'",
":",
"params",
"[",
"'inherited'",
"]",
"=",
"True",
"remaining",
"=",
"remaining",
"[",
"1",
":",
"]",
"elif",
"bit",
"==",
"'untranslated'",
":",
"params",
"[",
"'untranslated'",
"]",
"=",
"True",
"remaining",
"=",
"remaining",
"[",
"1",
":",
"]",
"return",
"name",
",",
"params"
]
| Parse the `PlaceholderNode` parameters.
Return a tuple with the name and parameters. | [
"Parse",
"the",
"PlaceholderNode",
"parameters",
"."
]
| python | train | 36.439024 |
fermiPy/fermipy | fermipy/hpx_utils.py | https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/hpx_utils.py#L686-L704 | def write_fits(self, data, outfile, extname="SKYMAP", clobber=True):
""" Write input data to a FITS file
data : The data begin stored
outfile : The name of the output file
extname : The HDU extension name
clobber : True -> overwrite existing files
"""
hdu_prim = fits.PrimaryHDU()
hdu_hpx = self.make_hdu(data, extname=extname)
hl = [hdu_prim, hdu_hpx]
if self.conv.energy_hdu == 'EBOUNDS':
hdu_energy = self.make_energy_bounds_hdu()
elif self.conv.energy_hdu == 'ENERGIES':
hdu_energy = self.make_energies_hdu()
if hdu_energy is not None:
hl.append(hdu_energy)
hdulist = fits.HDUList(hl)
hdulist.writeto(outfile, overwrite=clobber) | [
"def",
"write_fits",
"(",
"self",
",",
"data",
",",
"outfile",
",",
"extname",
"=",
"\"SKYMAP\"",
",",
"clobber",
"=",
"True",
")",
":",
"hdu_prim",
"=",
"fits",
".",
"PrimaryHDU",
"(",
")",
"hdu_hpx",
"=",
"self",
".",
"make_hdu",
"(",
"data",
",",
"extname",
"=",
"extname",
")",
"hl",
"=",
"[",
"hdu_prim",
",",
"hdu_hpx",
"]",
"if",
"self",
".",
"conv",
".",
"energy_hdu",
"==",
"'EBOUNDS'",
":",
"hdu_energy",
"=",
"self",
".",
"make_energy_bounds_hdu",
"(",
")",
"elif",
"self",
".",
"conv",
".",
"energy_hdu",
"==",
"'ENERGIES'",
":",
"hdu_energy",
"=",
"self",
".",
"make_energies_hdu",
"(",
")",
"if",
"hdu_energy",
"is",
"not",
"None",
":",
"hl",
".",
"append",
"(",
"hdu_energy",
")",
"hdulist",
"=",
"fits",
".",
"HDUList",
"(",
"hl",
")",
"hdulist",
".",
"writeto",
"(",
"outfile",
",",
"overwrite",
"=",
"clobber",
")"
]
| Write input data to a FITS file
data : The data begin stored
outfile : The name of the output file
extname : The HDU extension name
clobber : True -> overwrite existing files | [
"Write",
"input",
"data",
"to",
"a",
"FITS",
"file"
]
| python | train | 41.157895 |
etgalloway/fullqualname | fullqualname.py | https://github.com/etgalloway/fullqualname/blob/c16fa82880219cf91cdcd5466db9bf2099592c59/fullqualname.py#L128-L146 | def _fullqualname_builtin_py2(obj):
"""Fully qualified name for 'builtin_function_or_method' objects
in Python 2.
"""
if obj.__self__ is None:
# built-in functions
module = obj.__module__
qualname = obj.__name__
else:
# built-in methods
if inspect.isclass(obj.__self__):
cls = obj.__self__
else:
cls = obj.__self__.__class__
module = cls.__module__
qualname = cls.__name__ + '.' + obj.__name__
return module + '.' + qualname | [
"def",
"_fullqualname_builtin_py2",
"(",
"obj",
")",
":",
"if",
"obj",
".",
"__self__",
"is",
"None",
":",
"# built-in functions",
"module",
"=",
"obj",
".",
"__module__",
"qualname",
"=",
"obj",
".",
"__name__",
"else",
":",
"# built-in methods",
"if",
"inspect",
".",
"isclass",
"(",
"obj",
".",
"__self__",
")",
":",
"cls",
"=",
"obj",
".",
"__self__",
"else",
":",
"cls",
"=",
"obj",
".",
"__self__",
".",
"__class__",
"module",
"=",
"cls",
".",
"__module__",
"qualname",
"=",
"cls",
".",
"__name__",
"+",
"'.'",
"+",
"obj",
".",
"__name__",
"return",
"module",
"+",
"'.'",
"+",
"qualname"
]
| Fully qualified name for 'builtin_function_or_method' objects
in Python 2. | [
"Fully",
"qualified",
"name",
"for",
"builtin_function_or_method",
"objects",
"in",
"Python",
"2",
"."
]
| python | train | 27.368421 |
pyrogram/pyrogram | pyrogram/client/types/messages_and_media/message.py | https://github.com/pyrogram/pyrogram/blob/e7258a341ba905cfa86264c22040654db732ec1c/pyrogram/client/types/messages_and_media/message.py#L652-L727 | def reply(
self,
text: str,
quote: bool = None,
parse_mode: str = "",
disable_web_page_preview: bool = None,
disable_notification: bool = None,
reply_to_message_id: int = None,
reply_markup=None
) -> "Message":
"""Bound method *reply* of :obj:`Message <pyrogram.Message>`.
Use as a shortcut for:
.. code-block:: python
client.send_message(
chat_id=message.chat.id,
text="hello",
reply_to_message_id=message.message_id
)
Example:
.. code-block:: python
message.reply("hello", quote=True)
Args:
text (``str``):
Text of the message to be sent.
quote (``bool``, *optional*):
If ``True``, the message will be sent as a reply to this message.
If *reply_to_message_id* is passed, this parameter will be ignored.
Defaults to ``True`` in group chats and ``False`` in private chats.
parse_mode (``str``, *optional*):
Use :obj:`MARKDOWN <pyrogram.ParseMode.MARKDOWN>` or :obj:`HTML <pyrogram.ParseMode.HTML>`
if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in your message.
Defaults to Markdown.
disable_web_page_preview (``bool``, *optional*):
Disables link previews for links in this message.
disable_notification (``bool``, *optional*):
Sends the message silently.
Users will receive a notification with no sound.
reply_to_message_id (``int``, *optional*):
If the message is a reply, ID of the original message.
reply_markup (:obj:`InlineKeyboardMarkup` | :obj:`ReplyKeyboardMarkup` | :obj:`ReplyKeyboardRemove` | :obj:`ForceReply`, *optional*):
Additional interface options. An object for an inline keyboard, custom reply keyboard,
instructions to remove reply keyboard or to force a reply from the user.
Returns:
On success, the sent Message is returned.
Raises:
:class:`RPCError <pyrogram.RPCError>`
"""
if quote is None:
quote = self.chat.type != "private"
if reply_to_message_id is None and quote:
reply_to_message_id = self.message_id
return self._client.send_message(
chat_id=self.chat.id,
text=text,
parse_mode=parse_mode,
disable_web_page_preview=disable_web_page_preview,
disable_notification=disable_notification,
reply_to_message_id=reply_to_message_id,
reply_markup=reply_markup
) | [
"def",
"reply",
"(",
"self",
",",
"text",
":",
"str",
",",
"quote",
":",
"bool",
"=",
"None",
",",
"parse_mode",
":",
"str",
"=",
"\"\"",
",",
"disable_web_page_preview",
":",
"bool",
"=",
"None",
",",
"disable_notification",
":",
"bool",
"=",
"None",
",",
"reply_to_message_id",
":",
"int",
"=",
"None",
",",
"reply_markup",
"=",
"None",
")",
"->",
"\"Message\"",
":",
"if",
"quote",
"is",
"None",
":",
"quote",
"=",
"self",
".",
"chat",
".",
"type",
"!=",
"\"private\"",
"if",
"reply_to_message_id",
"is",
"None",
"and",
"quote",
":",
"reply_to_message_id",
"=",
"self",
".",
"message_id",
"return",
"self",
".",
"_client",
".",
"send_message",
"(",
"chat_id",
"=",
"self",
".",
"chat",
".",
"id",
",",
"text",
"=",
"text",
",",
"parse_mode",
"=",
"parse_mode",
",",
"disable_web_page_preview",
"=",
"disable_web_page_preview",
",",
"disable_notification",
"=",
"disable_notification",
",",
"reply_to_message_id",
"=",
"reply_to_message_id",
",",
"reply_markup",
"=",
"reply_markup",
")"
]
| Bound method *reply* of :obj:`Message <pyrogram.Message>`.
Use as a shortcut for:
.. code-block:: python
client.send_message(
chat_id=message.chat.id,
text="hello",
reply_to_message_id=message.message_id
)
Example:
.. code-block:: python
message.reply("hello", quote=True)
Args:
text (``str``):
Text of the message to be sent.
quote (``bool``, *optional*):
If ``True``, the message will be sent as a reply to this message.
If *reply_to_message_id* is passed, this parameter will be ignored.
Defaults to ``True`` in group chats and ``False`` in private chats.
parse_mode (``str``, *optional*):
Use :obj:`MARKDOWN <pyrogram.ParseMode.MARKDOWN>` or :obj:`HTML <pyrogram.ParseMode.HTML>`
if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in your message.
Defaults to Markdown.
disable_web_page_preview (``bool``, *optional*):
Disables link previews for links in this message.
disable_notification (``bool``, *optional*):
Sends the message silently.
Users will receive a notification with no sound.
reply_to_message_id (``int``, *optional*):
If the message is a reply, ID of the original message.
reply_markup (:obj:`InlineKeyboardMarkup` | :obj:`ReplyKeyboardMarkup` | :obj:`ReplyKeyboardRemove` | :obj:`ForceReply`, *optional*):
Additional interface options. An object for an inline keyboard, custom reply keyboard,
instructions to remove reply keyboard or to force a reply from the user.
Returns:
On success, the sent Message is returned.
Raises:
:class:`RPCError <pyrogram.RPCError>` | [
"Bound",
"method",
"*",
"reply",
"*",
"of",
":",
"obj",
":",
"Message",
"<pyrogram",
".",
"Message",
">",
"."
]
| python | train | 36.131579 |
opendatateam/udata | udata/patch_flask_security.py | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/patch_flask_security.py#L18-L20 | def sendmail_proxy(subject, email, template, **context):
"""Cast the lazy_gettext'ed subject to string before passing to Celery"""
sendmail.delay(subject.value, email, template, **context) | [
"def",
"sendmail_proxy",
"(",
"subject",
",",
"email",
",",
"template",
",",
"*",
"*",
"context",
")",
":",
"sendmail",
".",
"delay",
"(",
"subject",
".",
"value",
",",
"email",
",",
"template",
",",
"*",
"*",
"context",
")"
]
| Cast the lazy_gettext'ed subject to string before passing to Celery | [
"Cast",
"the",
"lazy_gettext",
"ed",
"subject",
"to",
"string",
"before",
"passing",
"to",
"Celery"
]
| python | train | 64.666667 |
inspirehep/refextract | refextract/references/text.py | https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/text.py#L43-L88 | def extract_references_from_fulltext(fulltext):
"""Locate and extract the reference section from a fulltext document.
Return the extracted reference section as a list of strings, whereby each
string in the list is considered to be a single reference line.
E.g. a string could be something like:
'[19] Wilson, A. Unpublished (1986).
@param fulltext: (list) of strings, whereby each string is a line of the
document.
@return: (list) of strings, where each string is an extracted reference
line.
"""
# Try to remove pagebreaks, headers, footers
fulltext = remove_page_boundary_lines(fulltext)
status = 0
# How ref section found flag
how_found_start = 0
# Find start of refs section
ref_sect_start = get_reference_section_beginning(fulltext)
if ref_sect_start is None:
# No References
refs = []
status = 4
LOGGER.debug(u"extract_references_from_fulltext: ref_sect_start is None")
else:
# If a reference section was found, however weak
ref_sect_end = \
find_end_of_reference_section(fulltext,
ref_sect_start["start_line"],
ref_sect_start["marker"],
ref_sect_start["marker_pattern"])
if ref_sect_end is None:
# No End to refs? Not safe to extract
refs = []
status = 5
LOGGER.debug(u"extract_references_from_fulltext: no end to refs!")
else:
# If the end of the reference section was found.. start extraction
refs = get_reference_lines(fulltext,
ref_sect_start["start_line"],
ref_sect_end,
ref_sect_start["title_string"],
ref_sect_start["marker_pattern"],
ref_sect_start["title_marker_same_line"])
return refs, status, how_found_start | [
"def",
"extract_references_from_fulltext",
"(",
"fulltext",
")",
":",
"# Try to remove pagebreaks, headers, footers",
"fulltext",
"=",
"remove_page_boundary_lines",
"(",
"fulltext",
")",
"status",
"=",
"0",
"# How ref section found flag",
"how_found_start",
"=",
"0",
"# Find start of refs section",
"ref_sect_start",
"=",
"get_reference_section_beginning",
"(",
"fulltext",
")",
"if",
"ref_sect_start",
"is",
"None",
":",
"# No References",
"refs",
"=",
"[",
"]",
"status",
"=",
"4",
"LOGGER",
".",
"debug",
"(",
"u\"extract_references_from_fulltext: ref_sect_start is None\"",
")",
"else",
":",
"# If a reference section was found, however weak",
"ref_sect_end",
"=",
"find_end_of_reference_section",
"(",
"fulltext",
",",
"ref_sect_start",
"[",
"\"start_line\"",
"]",
",",
"ref_sect_start",
"[",
"\"marker\"",
"]",
",",
"ref_sect_start",
"[",
"\"marker_pattern\"",
"]",
")",
"if",
"ref_sect_end",
"is",
"None",
":",
"# No End to refs? Not safe to extract",
"refs",
"=",
"[",
"]",
"status",
"=",
"5",
"LOGGER",
".",
"debug",
"(",
"u\"extract_references_from_fulltext: no end to refs!\"",
")",
"else",
":",
"# If the end of the reference section was found.. start extraction",
"refs",
"=",
"get_reference_lines",
"(",
"fulltext",
",",
"ref_sect_start",
"[",
"\"start_line\"",
"]",
",",
"ref_sect_end",
",",
"ref_sect_start",
"[",
"\"title_string\"",
"]",
",",
"ref_sect_start",
"[",
"\"marker_pattern\"",
"]",
",",
"ref_sect_start",
"[",
"\"title_marker_same_line\"",
"]",
")",
"return",
"refs",
",",
"status",
",",
"how_found_start"
]
| Locate and extract the reference section from a fulltext document.
Return the extracted reference section as a list of strings, whereby each
string in the list is considered to be a single reference line.
E.g. a string could be something like:
'[19] Wilson, A. Unpublished (1986).
@param fulltext: (list) of strings, whereby each string is a line of the
document.
@return: (list) of strings, where each string is an extracted reference
line. | [
"Locate",
"and",
"extract",
"the",
"reference",
"section",
"from",
"a",
"fulltext",
"document",
".",
"Return",
"the",
"extracted",
"reference",
"section",
"as",
"a",
"list",
"of",
"strings",
"whereby",
"each",
"string",
"in",
"the",
"list",
"is",
"considered",
"to",
"be",
"a",
"single",
"reference",
"line",
".",
"E",
".",
"g",
".",
"a",
"string",
"could",
"be",
"something",
"like",
":",
"[",
"19",
"]",
"Wilson",
"A",
".",
"Unpublished",
"(",
"1986",
")",
"."
]
| python | train | 44.804348 |
azogue/esiosdata | esiosdata/importpvpcdata.py | https://github.com/azogue/esiosdata/blob/680c7918955bc6ceee5bded92b3a4485f5ea8151/esiosdata/importpvpcdata.py#L48-L87 | def pvpc_calc_tcu_cp_feu_d(df, verbose=True, convert_kwh=True):
"""Procesa TCU, CP, FEU diario.
:param df:
:param verbose:
:param convert_kwh:
:return:
"""
if 'TCU' + TARIFAS[0] not in df.columns:
# Pasa de €/MWh a €/kWh:
if convert_kwh:
cols_mwh = [c + t for c in COLS_PVPC for t in TARIFAS if c != 'COF']
df[cols_mwh] = df[cols_mwh].applymap(lambda x: x / 1000.)
# Obtiene columnas TCU, CP, precio día
gb_t = df.groupby(lambda x: TARIFAS[np.argmax([t in x for t in TARIFAS])], axis=1)
for k, g in gb_t:
if verbose:
print('TARIFA {}'.format(k))
print(g.head())
# Cálculo de TCU
df['TCU{}'.format(k)] = g[k] - g['TEU{}'.format(k)]
# Cálculo de CP
# cols_cp = [c + k for c in ['FOS', 'FOM', 'INT', 'PCAP', 'PMH', 'SAH']]
cols_cp = [c + k for c in COLS_PVPC if c not in ['', 'COF', 'TEU']]
df['CP{}'.format(k)] = g[cols_cp].sum(axis=1)
# Cálculo de PERD --> No es posible así, ya que los valores base ya vienen con PERD
# dfs_pvpc[k]['PERD{}'.format(k)] = dfs_pvpc[k]['TCU{}'.format(k)] / dfs_pvpc[k]['CP{}'.format(k)]
# dfs_pvpc[k]['PERD{}'.format(k)] = dfs_pvpc[k]['INT{}'.format(k)] / 1.92
# Cálculo de FEU diario
cols_k = ['TEU' + k, 'TCU' + k, 'COF' + k]
g = df[cols_k].groupby('TEU' + k)
pr = g.apply(lambda x: x['TCU' + k].dot(x['COF' + k]) / x['COF' + k].sum())
pr.name = 'PD_' + k
df = df.join(pr, on='TEU' + k, rsuffix='_r')
df['PD_' + k] += df['TEU' + k]
return df | [
"def",
"pvpc_calc_tcu_cp_feu_d",
"(",
"df",
",",
"verbose",
"=",
"True",
",",
"convert_kwh",
"=",
"True",
")",
":",
"if",
"'TCU'",
"+",
"TARIFAS",
"[",
"0",
"]",
"not",
"in",
"df",
".",
"columns",
":",
"# Pasa de €/MWh a €/kWh:",
"if",
"convert_kwh",
":",
"cols_mwh",
"=",
"[",
"c",
"+",
"t",
"for",
"c",
"in",
"COLS_PVPC",
"for",
"t",
"in",
"TARIFAS",
"if",
"c",
"!=",
"'COF'",
"]",
"df",
"[",
"cols_mwh",
"]",
"=",
"df",
"[",
"cols_mwh",
"]",
".",
"applymap",
"(",
"lambda",
"x",
":",
"x",
"/",
"1000.",
")",
"# Obtiene columnas TCU, CP, precio día",
"gb_t",
"=",
"df",
".",
"groupby",
"(",
"lambda",
"x",
":",
"TARIFAS",
"[",
"np",
".",
"argmax",
"(",
"[",
"t",
"in",
"x",
"for",
"t",
"in",
"TARIFAS",
"]",
")",
"]",
",",
"axis",
"=",
"1",
")",
"for",
"k",
",",
"g",
"in",
"gb_t",
":",
"if",
"verbose",
":",
"print",
"(",
"'TARIFA {}'",
".",
"format",
"(",
"k",
")",
")",
"print",
"(",
"g",
".",
"head",
"(",
")",
")",
"# Cálculo de TCU",
"df",
"[",
"'TCU{}'",
".",
"format",
"(",
"k",
")",
"]",
"=",
"g",
"[",
"k",
"]",
"-",
"g",
"[",
"'TEU{}'",
".",
"format",
"(",
"k",
")",
"]",
"# Cálculo de CP",
"# cols_cp = [c + k for c in ['FOS', 'FOM', 'INT', 'PCAP', 'PMH', 'SAH']]",
"cols_cp",
"=",
"[",
"c",
"+",
"k",
"for",
"c",
"in",
"COLS_PVPC",
"if",
"c",
"not",
"in",
"[",
"''",
",",
"'COF'",
",",
"'TEU'",
"]",
"]",
"df",
"[",
"'CP{}'",
".",
"format",
"(",
"k",
")",
"]",
"=",
"g",
"[",
"cols_cp",
"]",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
"# Cálculo de PERD --> No es posible así, ya que los valores base ya vienen con PERD",
"# dfs_pvpc[k]['PERD{}'.format(k)] = dfs_pvpc[k]['TCU{}'.format(k)] / dfs_pvpc[k]['CP{}'.format(k)]",
"# dfs_pvpc[k]['PERD{}'.format(k)] = dfs_pvpc[k]['INT{}'.format(k)] / 1.92",
"# Cálculo de FEU diario",
"cols_k",
"=",
"[",
"'TEU'",
"+",
"k",
",",
"'TCU'",
"+",
"k",
",",
"'COF'",
"+",
"k",
"]",
"g",
"=",
"df",
"[",
"cols_k",
"]",
".",
"groupby",
"(",
"'TEU'",
"+",
"k",
")",
"pr",
"=",
"g",
".",
"apply",
"(",
"lambda",
"x",
":",
"x",
"[",
"'TCU'",
"+",
"k",
"]",
".",
"dot",
"(",
"x",
"[",
"'COF'",
"+",
"k",
"]",
")",
"/",
"x",
"[",
"'COF'",
"+",
"k",
"]",
".",
"sum",
"(",
")",
")",
"pr",
".",
"name",
"=",
"'PD_'",
"+",
"k",
"df",
"=",
"df",
".",
"join",
"(",
"pr",
",",
"on",
"=",
"'TEU'",
"+",
"k",
",",
"rsuffix",
"=",
"'_r'",
")",
"df",
"[",
"'PD_'",
"+",
"k",
"]",
"+=",
"df",
"[",
"'TEU'",
"+",
"k",
"]",
"return",
"df"
]
| Procesa TCU, CP, FEU diario.
:param df:
:param verbose:
:param convert_kwh:
:return: | [
"Procesa",
"TCU",
"CP",
"FEU",
"diario",
"."
]
| python | valid | 41.775 |
Kane610/deconz | pydeconz/sensor.py | https://github.com/Kane610/deconz/blob/8a9498dbbc8c168d4a081173ad6c3b1e17fffdf6/pydeconz/sensor.py#L842-L851 | async def async_set_config(self, data):
"""Set config of thermostat.
{
"mode": "auto",
"heatsetpoint": 180,
}
"""
field = self.deconz_id + '/config'
await self._async_set_state_callback(field, data) | [
"async",
"def",
"async_set_config",
"(",
"self",
",",
"data",
")",
":",
"field",
"=",
"self",
".",
"deconz_id",
"+",
"'/config'",
"await",
"self",
".",
"_async_set_state_callback",
"(",
"field",
",",
"data",
")"
]
| Set config of thermostat.
{
"mode": "auto",
"heatsetpoint": 180,
} | [
"Set",
"config",
"of",
"thermostat",
"."
]
| python | train | 26.2 |
acrisci/i3ipc-python | i3ipc/i3ipc.py | https://github.com/acrisci/i3ipc-python/blob/243d353434cdd2a93a9ca917c6bbf07b865c39af/i3ipc/i3ipc.py#L586-L597 | def get_workspaces(self):
"""
Get a list of workspaces. Returns JSON-like data, not a Con instance.
You might want to try the :meth:`Con.workspaces` instead if the info
contained here is too little.
:rtype: List of :class:`WorkspaceReply`.
"""
data = self.message(MessageType.GET_WORKSPACES, '')
return json.loads(data, object_hook=WorkspaceReply) | [
"def",
"get_workspaces",
"(",
"self",
")",
":",
"data",
"=",
"self",
".",
"message",
"(",
"MessageType",
".",
"GET_WORKSPACES",
",",
"''",
")",
"return",
"json",
".",
"loads",
"(",
"data",
",",
"object_hook",
"=",
"WorkspaceReply",
")"
]
| Get a list of workspaces. Returns JSON-like data, not a Con instance.
You might want to try the :meth:`Con.workspaces` instead if the info
contained here is too little.
:rtype: List of :class:`WorkspaceReply`. | [
"Get",
"a",
"list",
"of",
"workspaces",
".",
"Returns",
"JSON",
"-",
"like",
"data",
"not",
"a",
"Con",
"instance",
"."
]
| python | train | 33.583333 |
ArduPilot/MAVProxy | MAVProxy/modules/lib/MacOS/backend_agg.py | https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/lib/MacOS/backend_agg.py#L318-L348 | def restore_region(self, region, bbox=None, xy=None):
"""
Restore the saved region. If bbox (instance of BboxBase, or
its extents) is given, only the region specified by the bbox
will be restored. *xy* (a tuple of two floasts) optionally
specifies the new position (the LLC of the original region,
not the LLC of the bbox) where the region will be restored.
>>> region = renderer.copy_from_bbox()
>>> x1, y1, x2, y2 = region.get_extents()
>>> renderer.restore_region(region, bbox=(x1+dx, y1, x2, y2),
... xy=(x1-dx, y1))
"""
if bbox is not None or xy is not None:
if bbox is None:
x1, y1, x2, y2 = region.get_extents()
elif isinstance(bbox, BboxBase):
x1, y1, x2, y2 = bbox.extents
else:
x1, y1, x2, y2 = bbox
if xy is None:
ox, oy = x1, y1
else:
ox, oy = xy
self._renderer.restore_region2(region, x1, y1, x2, y2, ox, oy)
else:
self._renderer.restore_region(region) | [
"def",
"restore_region",
"(",
"self",
",",
"region",
",",
"bbox",
"=",
"None",
",",
"xy",
"=",
"None",
")",
":",
"if",
"bbox",
"is",
"not",
"None",
"or",
"xy",
"is",
"not",
"None",
":",
"if",
"bbox",
"is",
"None",
":",
"x1",
",",
"y1",
",",
"x2",
",",
"y2",
"=",
"region",
".",
"get_extents",
"(",
")",
"elif",
"isinstance",
"(",
"bbox",
",",
"BboxBase",
")",
":",
"x1",
",",
"y1",
",",
"x2",
",",
"y2",
"=",
"bbox",
".",
"extents",
"else",
":",
"x1",
",",
"y1",
",",
"x2",
",",
"y2",
"=",
"bbox",
"if",
"xy",
"is",
"None",
":",
"ox",
",",
"oy",
"=",
"x1",
",",
"y1",
"else",
":",
"ox",
",",
"oy",
"=",
"xy",
"self",
".",
"_renderer",
".",
"restore_region2",
"(",
"region",
",",
"x1",
",",
"y1",
",",
"x2",
",",
"y2",
",",
"ox",
",",
"oy",
")",
"else",
":",
"self",
".",
"_renderer",
".",
"restore_region",
"(",
"region",
")"
]
| Restore the saved region. If bbox (instance of BboxBase, or
its extents) is given, only the region specified by the bbox
will be restored. *xy* (a tuple of two floasts) optionally
specifies the new position (the LLC of the original region,
not the LLC of the bbox) where the region will be restored.
>>> region = renderer.copy_from_bbox()
>>> x1, y1, x2, y2 = region.get_extents()
>>> renderer.restore_region(region, bbox=(x1+dx, y1, x2, y2),
... xy=(x1-dx, y1)) | [
"Restore",
"the",
"saved",
"region",
".",
"If",
"bbox",
"(",
"instance",
"of",
"BboxBase",
"or",
"its",
"extents",
")",
"is",
"given",
"only",
"the",
"region",
"specified",
"by",
"the",
"bbox",
"will",
"be",
"restored",
".",
"*",
"xy",
"*",
"(",
"a",
"tuple",
"of",
"two",
"floasts",
")",
"optionally",
"specifies",
"the",
"new",
"position",
"(",
"the",
"LLC",
"of",
"the",
"original",
"region",
"not",
"the",
"LLC",
"of",
"the",
"bbox",
")",
"where",
"the",
"region",
"will",
"be",
"restored",
"."
]
| python | train | 36.516129 |
d0c-s4vage/pfp | pfp/fields.py | https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/fields.py#L914-L941 | def _pfp__build(self, stream=None, save_offset=False):
"""Build the union and write the result into the stream.
:stream: None
:returns: None
"""
max_size = -1
if stream is None:
core_stream = six.BytesIO()
new_stream = bitwrap.BitwrappedStream(core_stream)
else:
new_stream = stream
for child in self._pfp__children:
curr_pos = new_stream.tell()
child._pfp__build(new_stream, save_offset)
size = new_stream.tell() - curr_pos
new_stream.seek(-size, 1)
if size > max_size:
max_size = size
new_stream.seek(max_size, 1)
if stream is None:
return core_stream.getvalue()
else:
return max_size | [
"def",
"_pfp__build",
"(",
"self",
",",
"stream",
"=",
"None",
",",
"save_offset",
"=",
"False",
")",
":",
"max_size",
"=",
"-",
"1",
"if",
"stream",
"is",
"None",
":",
"core_stream",
"=",
"six",
".",
"BytesIO",
"(",
")",
"new_stream",
"=",
"bitwrap",
".",
"BitwrappedStream",
"(",
"core_stream",
")",
"else",
":",
"new_stream",
"=",
"stream",
"for",
"child",
"in",
"self",
".",
"_pfp__children",
":",
"curr_pos",
"=",
"new_stream",
".",
"tell",
"(",
")",
"child",
".",
"_pfp__build",
"(",
"new_stream",
",",
"save_offset",
")",
"size",
"=",
"new_stream",
".",
"tell",
"(",
")",
"-",
"curr_pos",
"new_stream",
".",
"seek",
"(",
"-",
"size",
",",
"1",
")",
"if",
"size",
">",
"max_size",
":",
"max_size",
"=",
"size",
"new_stream",
".",
"seek",
"(",
"max_size",
",",
"1",
")",
"if",
"stream",
"is",
"None",
":",
"return",
"core_stream",
".",
"getvalue",
"(",
")",
"else",
":",
"return",
"max_size"
]
| Build the union and write the result into the stream.
:stream: None
:returns: None | [
"Build",
"the",
"union",
"and",
"write",
"the",
"result",
"into",
"the",
"stream",
"."
]
| python | train | 28.142857 |
cds-astro/mocpy | mocpy/abstract_moc.py | https://github.com/cds-astro/mocpy/blob/09472cabe537f6bfdb049eeea64d3ea57b391c21/mocpy/abstract_moc.py#L592-L623 | def degrade_to_order(self, new_order):
"""
Degrades the MOC instance to a new, less precise, MOC.
The maximum depth (i.e. the depth of the smallest HEALPix cells that can be found in the MOC) of the
degraded MOC is set to ``new_order``.
Parameters
----------
new_order : int
Maximum depth of the output degraded MOC.
Returns
-------
moc : `~mocpy.moc.MOC` or `~mocpy.tmoc.TimeMOC`
The degraded MOC.
"""
shift = 2 * (AbstractMOC.HPY_MAX_NORDER - new_order)
ofs = (int(1) << shift) - 1
mask = ~ofs
adda = int(0)
addb = ofs
iv_set = []
for iv in self._interval_set._intervals:
a = (iv[0] + adda) & mask
b = (iv[1] + addb) & mask
if b > a:
iv_set.append((a, b))
return self.__class__(IntervalSet(np.asarray(iv_set))) | [
"def",
"degrade_to_order",
"(",
"self",
",",
"new_order",
")",
":",
"shift",
"=",
"2",
"*",
"(",
"AbstractMOC",
".",
"HPY_MAX_NORDER",
"-",
"new_order",
")",
"ofs",
"=",
"(",
"int",
"(",
"1",
")",
"<<",
"shift",
")",
"-",
"1",
"mask",
"=",
"~",
"ofs",
"adda",
"=",
"int",
"(",
"0",
")",
"addb",
"=",
"ofs",
"iv_set",
"=",
"[",
"]",
"for",
"iv",
"in",
"self",
".",
"_interval_set",
".",
"_intervals",
":",
"a",
"=",
"(",
"iv",
"[",
"0",
"]",
"+",
"adda",
")",
"&",
"mask",
"b",
"=",
"(",
"iv",
"[",
"1",
"]",
"+",
"addb",
")",
"&",
"mask",
"if",
"b",
">",
"a",
":",
"iv_set",
".",
"append",
"(",
"(",
"a",
",",
"b",
")",
")",
"return",
"self",
".",
"__class__",
"(",
"IntervalSet",
"(",
"np",
".",
"asarray",
"(",
"iv_set",
")",
")",
")"
]
| Degrades the MOC instance to a new, less precise, MOC.
The maximum depth (i.e. the depth of the smallest HEALPix cells that can be found in the MOC) of the
degraded MOC is set to ``new_order``.
Parameters
----------
new_order : int
Maximum depth of the output degraded MOC.
Returns
-------
moc : `~mocpy.moc.MOC` or `~mocpy.tmoc.TimeMOC`
The degraded MOC. | [
"Degrades",
"the",
"MOC",
"instance",
"to",
"a",
"new",
"less",
"precise",
"MOC",
"."
]
| python | train | 28.625 |
hotzenklotz/pybeerxml | pybeerxml/parser.py | https://github.com/hotzenklotz/pybeerxml/blob/e9cf8d6090b1e01e5bbb101e255792b134affbe0/pybeerxml/parser.py#L40-L104 | def parse(self, xml_file):
"Get a list of parsed recipes from BeerXML input"
recipes = []
with open(xml_file, "rt") as f:
tree = ElementTree.parse(f)
for recipeNode in tree.iter():
if self.to_lower(recipeNode.tag) != "recipe":
continue
recipe = Recipe()
recipes.append(recipe)
for recipeProperty in list(recipeNode):
tag_name = self.to_lower(recipeProperty.tag)
if tag_name == "fermentables":
for fermentable_node in list(recipeProperty):
fermentable = Fermentable()
self.nodes_to_object(fermentable_node, fermentable)
recipe.fermentables.append(fermentable)
elif tag_name == "yeasts":
for yeast_node in list(recipeProperty):
yeast = Yeast()
self.nodes_to_object(yeast_node, yeast)
recipe.yeasts.append(yeast)
elif tag_name == "hops":
for hop_node in list(recipeProperty):
hop = Hop()
self.nodes_to_object(hop_node, hop)
recipe.hops.append(hop)
elif tag_name == "miscs":
for misc_node in list(recipeProperty):
misc = Misc()
self.nodes_to_object(misc_node, misc)
recipe.miscs.append(misc)
elif tag_name == "style":
style = Style()
recipe.style = style
self.nodes_to_object(recipeProperty, style)
elif tag_name == "mash":
for mash_node in list(recipeProperty):
mash = Mash()
recipe.mash = mash
if self.to_lower(mash_node.tag) == "mash_steps":
for mash_step_node in list(mash_node):
mash_step = MashStep()
self.nodes_to_object(mash_step_node, mash_step)
mash.steps.append(mash_step)
else:
self.nodes_to_object(mash_node, mash)
else:
self.node_to_object(recipeProperty, recipe)
return recipes | [
"def",
"parse",
"(",
"self",
",",
"xml_file",
")",
":",
"recipes",
"=",
"[",
"]",
"with",
"open",
"(",
"xml_file",
",",
"\"rt\"",
")",
"as",
"f",
":",
"tree",
"=",
"ElementTree",
".",
"parse",
"(",
"f",
")",
"for",
"recipeNode",
"in",
"tree",
".",
"iter",
"(",
")",
":",
"if",
"self",
".",
"to_lower",
"(",
"recipeNode",
".",
"tag",
")",
"!=",
"\"recipe\"",
":",
"continue",
"recipe",
"=",
"Recipe",
"(",
")",
"recipes",
".",
"append",
"(",
"recipe",
")",
"for",
"recipeProperty",
"in",
"list",
"(",
"recipeNode",
")",
":",
"tag_name",
"=",
"self",
".",
"to_lower",
"(",
"recipeProperty",
".",
"tag",
")",
"if",
"tag_name",
"==",
"\"fermentables\"",
":",
"for",
"fermentable_node",
"in",
"list",
"(",
"recipeProperty",
")",
":",
"fermentable",
"=",
"Fermentable",
"(",
")",
"self",
".",
"nodes_to_object",
"(",
"fermentable_node",
",",
"fermentable",
")",
"recipe",
".",
"fermentables",
".",
"append",
"(",
"fermentable",
")",
"elif",
"tag_name",
"==",
"\"yeasts\"",
":",
"for",
"yeast_node",
"in",
"list",
"(",
"recipeProperty",
")",
":",
"yeast",
"=",
"Yeast",
"(",
")",
"self",
".",
"nodes_to_object",
"(",
"yeast_node",
",",
"yeast",
")",
"recipe",
".",
"yeasts",
".",
"append",
"(",
"yeast",
")",
"elif",
"tag_name",
"==",
"\"hops\"",
":",
"for",
"hop_node",
"in",
"list",
"(",
"recipeProperty",
")",
":",
"hop",
"=",
"Hop",
"(",
")",
"self",
".",
"nodes_to_object",
"(",
"hop_node",
",",
"hop",
")",
"recipe",
".",
"hops",
".",
"append",
"(",
"hop",
")",
"elif",
"tag_name",
"==",
"\"miscs\"",
":",
"for",
"misc_node",
"in",
"list",
"(",
"recipeProperty",
")",
":",
"misc",
"=",
"Misc",
"(",
")",
"self",
".",
"nodes_to_object",
"(",
"misc_node",
",",
"misc",
")",
"recipe",
".",
"miscs",
".",
"append",
"(",
"misc",
")",
"elif",
"tag_name",
"==",
"\"style\"",
":",
"style",
"=",
"Style",
"(",
")",
"recipe",
".",
"style",
"=",
"style",
"self",
".",
"nodes_to_object",
"(",
"recipeProperty",
",",
"style",
")",
"elif",
"tag_name",
"==",
"\"mash\"",
":",
"for",
"mash_node",
"in",
"list",
"(",
"recipeProperty",
")",
":",
"mash",
"=",
"Mash",
"(",
")",
"recipe",
".",
"mash",
"=",
"mash",
"if",
"self",
".",
"to_lower",
"(",
"mash_node",
".",
"tag",
")",
"==",
"\"mash_steps\"",
":",
"for",
"mash_step_node",
"in",
"list",
"(",
"mash_node",
")",
":",
"mash_step",
"=",
"MashStep",
"(",
")",
"self",
".",
"nodes_to_object",
"(",
"mash_step_node",
",",
"mash_step",
")",
"mash",
".",
"steps",
".",
"append",
"(",
"mash_step",
")",
"else",
":",
"self",
".",
"nodes_to_object",
"(",
"mash_node",
",",
"mash",
")",
"else",
":",
"self",
".",
"node_to_object",
"(",
"recipeProperty",
",",
"recipe",
")",
"return",
"recipes"
]
| Get a list of parsed recipes from BeerXML input | [
"Get",
"a",
"list",
"of",
"parsed",
"recipes",
"from",
"BeerXML",
"input"
]
| python | train | 36.907692 |
nicolargo/glances | glances/stats.py | https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/stats.py#L262-L271 | def getAllExportsAsDict(self, plugin_list=None):
"""
Return all the stats to be exported (list).
Default behavor is to export all the stat
if plugin_list is provided, only export stats of given plugin (list)
"""
if plugin_list is None:
# All plugins should be exported
plugin_list = self._plugins
return {p: self._plugins[p].get_export() for p in plugin_list} | [
"def",
"getAllExportsAsDict",
"(",
"self",
",",
"plugin_list",
"=",
"None",
")",
":",
"if",
"plugin_list",
"is",
"None",
":",
"# All plugins should be exported",
"plugin_list",
"=",
"self",
".",
"_plugins",
"return",
"{",
"p",
":",
"self",
".",
"_plugins",
"[",
"p",
"]",
".",
"get_export",
"(",
")",
"for",
"p",
"in",
"plugin_list",
"}"
]
| Return all the stats to be exported (list).
Default behavor is to export all the stat
if plugin_list is provided, only export stats of given plugin (list) | [
"Return",
"all",
"the",
"stats",
"to",
"be",
"exported",
"(",
"list",
")",
".",
"Default",
"behavor",
"is",
"to",
"export",
"all",
"the",
"stat",
"if",
"plugin_list",
"is",
"provided",
"only",
"export",
"stats",
"of",
"given",
"plugin",
"(",
"list",
")"
]
| python | train | 43 |
burnash/gspread | gspread/client.py | https://github.com/burnash/gspread/blob/0e8debe208095aeed3e3e7136c2fa5cd74090946/gspread/client.py#L347-L421 | def insert_permission(
self,
file_id,
value,
perm_type,
role,
notify=True,
email_message=None,
with_link=False
):
"""Creates a new permission for a file.
:param file_id: a spreadsheet ID (aka file ID.)
:type file_id: str
:param value: user or group e-mail address, domain name
or None for 'default' type.
:type value: str, None
:param perm_type: (optional) The account type.
Allowed values are: ``user``, ``group``, ``domain``,
``anyone``
:type perm_type: str
:param role: (optional) The primary role for this user.
Allowed values are: ``owner``, ``writer``, ``reader``
:type str:
:param notify: (optional) Whether to send an email to the target user/domain.
:type notify: str
:param email_message: (optional) An email message to be sent if notify=True.
:type email_message: str
:param with_link: (optional) Whether the link is required for this permission to be active.
:type with_link: bool
Examples::
# Give write permissions to [email protected]
gc.insert_permission(
'0BmgG6nO_6dprnRRUWl1UFE',
'[email protected]',
perm_type='user',
role='writer'
)
# Make the spreadsheet publicly readable
gc.insert_permission(
'0BmgG6nO_6dprnRRUWl1UFE',
None,
perm_type='anyone',
role='reader'
)
"""
url = '{0}/{1}/permissions'.format(DRIVE_FILES_API_V2_URL, file_id)
payload = {
'value': value,
'type': perm_type,
'role': role,
'withLink': with_link
}
params = {
'sendNotificationEmails': notify,
'emailMessage': email_message
}
self.request(
'post',
url,
json=payload,
params=params
) | [
"def",
"insert_permission",
"(",
"self",
",",
"file_id",
",",
"value",
",",
"perm_type",
",",
"role",
",",
"notify",
"=",
"True",
",",
"email_message",
"=",
"None",
",",
"with_link",
"=",
"False",
")",
":",
"url",
"=",
"'{0}/{1}/permissions'",
".",
"format",
"(",
"DRIVE_FILES_API_V2_URL",
",",
"file_id",
")",
"payload",
"=",
"{",
"'value'",
":",
"value",
",",
"'type'",
":",
"perm_type",
",",
"'role'",
":",
"role",
",",
"'withLink'",
":",
"with_link",
"}",
"params",
"=",
"{",
"'sendNotificationEmails'",
":",
"notify",
",",
"'emailMessage'",
":",
"email_message",
"}",
"self",
".",
"request",
"(",
"'post'",
",",
"url",
",",
"json",
"=",
"payload",
",",
"params",
"=",
"params",
")"
]
| Creates a new permission for a file.
:param file_id: a spreadsheet ID (aka file ID.)
:type file_id: str
:param value: user or group e-mail address, domain name
or None for 'default' type.
:type value: str, None
:param perm_type: (optional) The account type.
Allowed values are: ``user``, ``group``, ``domain``,
``anyone``
:type perm_type: str
:param role: (optional) The primary role for this user.
Allowed values are: ``owner``, ``writer``, ``reader``
:type str:
:param notify: (optional) Whether to send an email to the target user/domain.
:type notify: str
:param email_message: (optional) An email message to be sent if notify=True.
:type email_message: str
:param with_link: (optional) Whether the link is required for this permission to be active.
:type with_link: bool
Examples::
# Give write permissions to [email protected]
gc.insert_permission(
'0BmgG6nO_6dprnRRUWl1UFE',
'[email protected]',
perm_type='user',
role='writer'
)
# Make the spreadsheet publicly readable
gc.insert_permission(
'0BmgG6nO_6dprnRRUWl1UFE',
None,
perm_type='anyone',
role='reader'
) | [
"Creates",
"a",
"new",
"permission",
"for",
"a",
"file",
"."
]
| python | train | 27.573333 |
rwl/pylon | pylon/io/rst.py | https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/io/rst.py#L87-L146 | def write_bus_data(self, file):
""" Writes bus data to a ReST table.
"""
report = CaseReport(self.case)
buses = self.case.buses
col_width = 8
col_width_2 = col_width * 2 + 1
col1_width = 6
sep = "=" * 6 + " " + ("=" * col_width + " ") * 6 + "\n"
file.write(sep)
# Line one of column headers
file.write("Name".center(col1_width) + " ")
file.write("Voltage (pu)".center(col_width_2) + " ")
file.write("Generation".center(col_width_2) + " ")
file.write("Load".center(col_width_2) + " ")
file.write("\n")
file.write("-" * col1_width +" "+ ("-" * col_width_2 + " ") * 3 + "\n")
# Line two of column header
file.write("..".ljust(col1_width) + " ")
file.write("Amp".center(col_width) + " ")
file.write("Phase".center(col_width) + " ")
file.write("P (MW)".center(col_width) + " ")
file.write("Q (MVAr)".center(col_width) + " ")
file.write("P (MW)".center(col_width) + " ")
file.write("Q (MVAr)".center(col_width) + " ")
file.write("\n")
file.write(sep)
# Bus rows
for bus in buses:
file.write(bus.name[:col1_width].ljust(col1_width))
file.write(" %8.3f" % bus.v_magnitude)
file.write(" %8.3f" % bus.v_angle)
file.write(" %8.2f" % self.case.s_supply(bus).real)
file.write(" %8.2f" % self.case.s_supply(bus).imag)
file.write(" %8.2f" % self.case.s_demand(bus).real)
file.write(" %8.2f" % self.case.s_demand(bus).imag)
file.write("\n")
# Totals
# file.write("..".ljust(col1_width) + " ")
# file.write(("..".ljust(col_width) + " ")*2)
# file.write(("_"*col_width + " ")*4 + "\n")
file.write("..".ljust(col1_width) + " " + "..".ljust(col_width) + " ")
file.write("*Total:*".rjust(col_width) + " ")
ptot = report.actual_pgen
qtot = report.actual_qgen
file.write("%8.2f " % ptot)
file.write("%8.2f " % qtot)
file.write("%8.2f " % report.p_demand)
file.write("%8.2f " % report.q_demand)
file.write("\n")
file.write(sep)
del report | [
"def",
"write_bus_data",
"(",
"self",
",",
"file",
")",
":",
"report",
"=",
"CaseReport",
"(",
"self",
".",
"case",
")",
"buses",
"=",
"self",
".",
"case",
".",
"buses",
"col_width",
"=",
"8",
"col_width_2",
"=",
"col_width",
"*",
"2",
"+",
"1",
"col1_width",
"=",
"6",
"sep",
"=",
"\"=\"",
"*",
"6",
"+",
"\" \"",
"+",
"(",
"\"=\"",
"*",
"col_width",
"+",
"\" \"",
")",
"*",
"6",
"+",
"\"\\n\"",
"file",
".",
"write",
"(",
"sep",
")",
"# Line one of column headers",
"file",
".",
"write",
"(",
"\"Name\"",
".",
"center",
"(",
"col1_width",
")",
"+",
"\" \"",
")",
"file",
".",
"write",
"(",
"\"Voltage (pu)\"",
".",
"center",
"(",
"col_width_2",
")",
"+",
"\" \"",
")",
"file",
".",
"write",
"(",
"\"Generation\"",
".",
"center",
"(",
"col_width_2",
")",
"+",
"\" \"",
")",
"file",
".",
"write",
"(",
"\"Load\"",
".",
"center",
"(",
"col_width_2",
")",
"+",
"\" \"",
")",
"file",
".",
"write",
"(",
"\"\\n\"",
")",
"file",
".",
"write",
"(",
"\"-\"",
"*",
"col1_width",
"+",
"\" \"",
"+",
"(",
"\"-\"",
"*",
"col_width_2",
"+",
"\" \"",
")",
"*",
"3",
"+",
"\"\\n\"",
")",
"# Line two of column header",
"file",
".",
"write",
"(",
"\"..\"",
".",
"ljust",
"(",
"col1_width",
")",
"+",
"\" \"",
")",
"file",
".",
"write",
"(",
"\"Amp\"",
".",
"center",
"(",
"col_width",
")",
"+",
"\" \"",
")",
"file",
".",
"write",
"(",
"\"Phase\"",
".",
"center",
"(",
"col_width",
")",
"+",
"\" \"",
")",
"file",
".",
"write",
"(",
"\"P (MW)\"",
".",
"center",
"(",
"col_width",
")",
"+",
"\" \"",
")",
"file",
".",
"write",
"(",
"\"Q (MVAr)\"",
".",
"center",
"(",
"col_width",
")",
"+",
"\" \"",
")",
"file",
".",
"write",
"(",
"\"P (MW)\"",
".",
"center",
"(",
"col_width",
")",
"+",
"\" \"",
")",
"file",
".",
"write",
"(",
"\"Q (MVAr)\"",
".",
"center",
"(",
"col_width",
")",
"+",
"\" \"",
")",
"file",
".",
"write",
"(",
"\"\\n\"",
")",
"file",
".",
"write",
"(",
"sep",
")",
"# Bus rows",
"for",
"bus",
"in",
"buses",
":",
"file",
".",
"write",
"(",
"bus",
".",
"name",
"[",
":",
"col1_width",
"]",
".",
"ljust",
"(",
"col1_width",
")",
")",
"file",
".",
"write",
"(",
"\" %8.3f\"",
"%",
"bus",
".",
"v_magnitude",
")",
"file",
".",
"write",
"(",
"\" %8.3f\"",
"%",
"bus",
".",
"v_angle",
")",
"file",
".",
"write",
"(",
"\" %8.2f\"",
"%",
"self",
".",
"case",
".",
"s_supply",
"(",
"bus",
")",
".",
"real",
")",
"file",
".",
"write",
"(",
"\" %8.2f\"",
"%",
"self",
".",
"case",
".",
"s_supply",
"(",
"bus",
")",
".",
"imag",
")",
"file",
".",
"write",
"(",
"\" %8.2f\"",
"%",
"self",
".",
"case",
".",
"s_demand",
"(",
"bus",
")",
".",
"real",
")",
"file",
".",
"write",
"(",
"\" %8.2f\"",
"%",
"self",
".",
"case",
".",
"s_demand",
"(",
"bus",
")",
".",
"imag",
")",
"file",
".",
"write",
"(",
"\"\\n\"",
")",
"# Totals",
"# file.write(\"..\".ljust(col1_width) + \" \")",
"# file.write((\"..\".ljust(col_width) + \" \")*2)",
"# file.write((\"_\"*col_width + \" \")*4 + \"\\n\")",
"file",
".",
"write",
"(",
"\"..\"",
".",
"ljust",
"(",
"col1_width",
")",
"+",
"\" \"",
"+",
"\"..\"",
".",
"ljust",
"(",
"col_width",
")",
"+",
"\" \"",
")",
"file",
".",
"write",
"(",
"\"*Total:*\"",
".",
"rjust",
"(",
"col_width",
")",
"+",
"\" \"",
")",
"ptot",
"=",
"report",
".",
"actual_pgen",
"qtot",
"=",
"report",
".",
"actual_qgen",
"file",
".",
"write",
"(",
"\"%8.2f \"",
"%",
"ptot",
")",
"file",
".",
"write",
"(",
"\"%8.2f \"",
"%",
"qtot",
")",
"file",
".",
"write",
"(",
"\"%8.2f \"",
"%",
"report",
".",
"p_demand",
")",
"file",
".",
"write",
"(",
"\"%8.2f \"",
"%",
"report",
".",
"q_demand",
")",
"file",
".",
"write",
"(",
"\"\\n\"",
")",
"file",
".",
"write",
"(",
"sep",
")",
"del",
"report"
]
| Writes bus data to a ReST table. | [
"Writes",
"bus",
"data",
"to",
"a",
"ReST",
"table",
"."
]
| python | train | 36.666667 |
python-gitlab/python-gitlab | gitlab/__init__.py | https://github.com/python-gitlab/python-gitlab/blob/16de1b03fde3dbbe8f851614dd1d8c09de102fe5/gitlab/__init__.py#L607-L636 | def http_post(self, path, query_data={}, post_data={}, files=None,
**kwargs):
"""Make a POST request to the Gitlab server.
Args:
path (str): Path or full URL to query ('/projects' or
'http://whatever/v4/api/projecs')
query_data (dict): Data to send as query parameters
post_data (dict): Data to send in the body (will be converted to
json)
files (dict): The files to send to the server
**kwargs: Extra options to send to the server (e.g. sudo)
Returns:
The parsed json returned by the server if json is return, else the
raw content
Raises:
GitlabHttpError: When the return code is not 2xx
GitlabParsingError: If the json data could not be parsed
"""
result = self.http_request('post', path, query_data=query_data,
post_data=post_data, files=files, **kwargs)
try:
if result.headers.get('Content-Type', None) == 'application/json':
return result.json()
except Exception:
raise GitlabParsingError(
error_message="Failed to parse the server message")
return result | [
"def",
"http_post",
"(",
"self",
",",
"path",
",",
"query_data",
"=",
"{",
"}",
",",
"post_data",
"=",
"{",
"}",
",",
"files",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"result",
"=",
"self",
".",
"http_request",
"(",
"'post'",
",",
"path",
",",
"query_data",
"=",
"query_data",
",",
"post_data",
"=",
"post_data",
",",
"files",
"=",
"files",
",",
"*",
"*",
"kwargs",
")",
"try",
":",
"if",
"result",
".",
"headers",
".",
"get",
"(",
"'Content-Type'",
",",
"None",
")",
"==",
"'application/json'",
":",
"return",
"result",
".",
"json",
"(",
")",
"except",
"Exception",
":",
"raise",
"GitlabParsingError",
"(",
"error_message",
"=",
"\"Failed to parse the server message\"",
")",
"return",
"result"
]
| Make a POST request to the Gitlab server.
Args:
path (str): Path or full URL to query ('/projects' or
'http://whatever/v4/api/projecs')
query_data (dict): Data to send as query parameters
post_data (dict): Data to send in the body (will be converted to
json)
files (dict): The files to send to the server
**kwargs: Extra options to send to the server (e.g. sudo)
Returns:
The parsed json returned by the server if json is return, else the
raw content
Raises:
GitlabHttpError: When the return code is not 2xx
GitlabParsingError: If the json data could not be parsed | [
"Make",
"a",
"POST",
"request",
"to",
"the",
"Gitlab",
"server",
"."
]
| python | train | 42.566667 |
spotify/docker_interface | docker_interface/plugins/base.py | https://github.com/spotify/docker_interface/blob/4df80e1fe072d958020080d32c16551ff7703d51/docker_interface/plugins/base.py#L93-L119 | def apply(self, configuration, schema, args):
"""
Apply the plugin to the configuration.
Inheriting plugins should implement this method to add additional functionality.
Parameters
----------
configuration : dict
configuration
schema : dict
JSON schema
args : argparse.NameSpace
parsed command line arguments
Returns
-------
configuration : dict
updated configuration after applying the plugin
"""
# Set values from the command line
for name, path in self.arguments.items():
value = getattr(args, name.replace('-', '_'))
if value is not None:
util.set_value(configuration, path, value)
return configuration | [
"def",
"apply",
"(",
"self",
",",
"configuration",
",",
"schema",
",",
"args",
")",
":",
"# Set values from the command line",
"for",
"name",
",",
"path",
"in",
"self",
".",
"arguments",
".",
"items",
"(",
")",
":",
"value",
"=",
"getattr",
"(",
"args",
",",
"name",
".",
"replace",
"(",
"'-'",
",",
"'_'",
")",
")",
"if",
"value",
"is",
"not",
"None",
":",
"util",
".",
"set_value",
"(",
"configuration",
",",
"path",
",",
"value",
")",
"return",
"configuration"
]
| Apply the plugin to the configuration.
Inheriting plugins should implement this method to add additional functionality.
Parameters
----------
configuration : dict
configuration
schema : dict
JSON schema
args : argparse.NameSpace
parsed command line arguments
Returns
-------
configuration : dict
updated configuration after applying the plugin | [
"Apply",
"the",
"plugin",
"to",
"the",
"configuration",
"."
]
| python | train | 29.333333 |
ecederstrand/exchangelib | exchangelib/util.py | https://github.com/ecederstrand/exchangelib/blob/736347b337c239fcd6d592db5b29e819f753c1ba/exchangelib/util.py#L83-L101 | def chunkify(iterable, chunksize):
"""
Splits an iterable into chunks of size ``chunksize``. The last chunk may be smaller than ``chunksize``.
"""
from .queryset import QuerySet
if hasattr(iterable, '__getitem__') and not isinstance(iterable, QuerySet):
# tuple, list. QuerySet has __getitem__ but that evaluates the entire query greedily. We don't want that here.
for i in range(0, len(iterable), chunksize):
yield iterable[i:i + chunksize]
else:
# generator, set, map, QuerySet
chunk = []
for i in iterable:
chunk.append(i)
if len(chunk) == chunksize:
yield chunk
chunk = []
if chunk:
yield chunk | [
"def",
"chunkify",
"(",
"iterable",
",",
"chunksize",
")",
":",
"from",
".",
"queryset",
"import",
"QuerySet",
"if",
"hasattr",
"(",
"iterable",
",",
"'__getitem__'",
")",
"and",
"not",
"isinstance",
"(",
"iterable",
",",
"QuerySet",
")",
":",
"# tuple, list. QuerySet has __getitem__ but that evaluates the entire query greedily. We don't want that here.",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"iterable",
")",
",",
"chunksize",
")",
":",
"yield",
"iterable",
"[",
"i",
":",
"i",
"+",
"chunksize",
"]",
"else",
":",
"# generator, set, map, QuerySet",
"chunk",
"=",
"[",
"]",
"for",
"i",
"in",
"iterable",
":",
"chunk",
".",
"append",
"(",
"i",
")",
"if",
"len",
"(",
"chunk",
")",
"==",
"chunksize",
":",
"yield",
"chunk",
"chunk",
"=",
"[",
"]",
"if",
"chunk",
":",
"yield",
"chunk"
]
| Splits an iterable into chunks of size ``chunksize``. The last chunk may be smaller than ``chunksize``. | [
"Splits",
"an",
"iterable",
"into",
"chunks",
"of",
"size",
"chunksize",
".",
"The",
"last",
"chunk",
"may",
"be",
"smaller",
"than",
"chunksize",
"."
]
| python | train | 38.526316 |
Alignak-monitoring/alignak | alignak/objects/host.py | https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/host.py#L1489-L1497 | def find_hosts_that_use_template(self, tpl_name):
"""Find hosts that use the template defined in argument tpl_name
:param tpl_name: the template name we filter or
:type tpl_name: str
:return: list of the host_name of the hosts that got the template tpl_name in tags
:rtype: list[str]
"""
return [h.host_name for h in self if tpl_name in h.tags if hasattr(h, "host_name")] | [
"def",
"find_hosts_that_use_template",
"(",
"self",
",",
"tpl_name",
")",
":",
"return",
"[",
"h",
".",
"host_name",
"for",
"h",
"in",
"self",
"if",
"tpl_name",
"in",
"h",
".",
"tags",
"if",
"hasattr",
"(",
"h",
",",
"\"host_name\"",
")",
"]"
]
| Find hosts that use the template defined in argument tpl_name
:param tpl_name: the template name we filter or
:type tpl_name: str
:return: list of the host_name of the hosts that got the template tpl_name in tags
:rtype: list[str] | [
"Find",
"hosts",
"that",
"use",
"the",
"template",
"defined",
"in",
"argument",
"tpl_name"
]
| python | train | 46.666667 |
CivicSpleen/ambry | ambry/mprlib/backends/sqlite.py | https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/mprlib/backends/sqlite.py#L307-L330 | def _execute(self, connection, query, fetch=True):
""" Executes given query using given connection.
Args:
connection (apsw.Connection): connection to the sqlite db who stores mpr data.
query (str): sql query
fetch (boolean, optional): if True, fetch query result and return it. If False, do not fetch.
Returns:
iterable with query result.
"""
cursor = connection.cursor()
try:
cursor.execute(query)
except Exception as e:
from ambry.mprlib.exceptions import BadSQLError
raise BadSQLError("Failed to execute query: {}; {}".format(query, e))
if fetch:
return cursor.fetchall()
else:
return cursor | [
"def",
"_execute",
"(",
"self",
",",
"connection",
",",
"query",
",",
"fetch",
"=",
"True",
")",
":",
"cursor",
"=",
"connection",
".",
"cursor",
"(",
")",
"try",
":",
"cursor",
".",
"execute",
"(",
"query",
")",
"except",
"Exception",
"as",
"e",
":",
"from",
"ambry",
".",
"mprlib",
".",
"exceptions",
"import",
"BadSQLError",
"raise",
"BadSQLError",
"(",
"\"Failed to execute query: {}; {}\"",
".",
"format",
"(",
"query",
",",
"e",
")",
")",
"if",
"fetch",
":",
"return",
"cursor",
".",
"fetchall",
"(",
")",
"else",
":",
"return",
"cursor"
]
| Executes given query using given connection.
Args:
connection (apsw.Connection): connection to the sqlite db who stores mpr data.
query (str): sql query
fetch (boolean, optional): if True, fetch query result and return it. If False, do not fetch.
Returns:
iterable with query result. | [
"Executes",
"given",
"query",
"using",
"given",
"connection",
"."
]
| python | train | 31.5 |
TrafficSenseMSD/SumoTools | traci/_gui.py | https://github.com/TrafficSenseMSD/SumoTools/blob/8607b4f885f1d1798e43240be643efe6dccccdaa/traci/_gui.py#L101-L110 | def screenshot(self, viewID, filename):
"""screenshot(string, string) -> None
Save a screenshot for the given view to the given filename.
The fileformat is guessed from the extension, the available
formats differ from platform to platform but should at least
include ps, svg and pdf, on linux probably gif, png and jpg as well.
"""
self._connection._sendStringCmd(
tc.CMD_SET_GUI_VARIABLE, tc.VAR_SCREENSHOT, viewID, filename) | [
"def",
"screenshot",
"(",
"self",
",",
"viewID",
",",
"filename",
")",
":",
"self",
".",
"_connection",
".",
"_sendStringCmd",
"(",
"tc",
".",
"CMD_SET_GUI_VARIABLE",
",",
"tc",
".",
"VAR_SCREENSHOT",
",",
"viewID",
",",
"filename",
")"
]
| screenshot(string, string) -> None
Save a screenshot for the given view to the given filename.
The fileformat is guessed from the extension, the available
formats differ from platform to platform but should at least
include ps, svg and pdf, on linux probably gif, png and jpg as well. | [
"screenshot",
"(",
"string",
"string",
")",
"-",
">",
"None"
]
| python | train | 48.6 |
jaraco/tempora | tempora/__init__.py | https://github.com/jaraco/tempora/blob/f0a9ab636103fe829aa9b495c93f5249aac5f2b8/tempora/__init__.py#L310-L338 | def get_period_seconds(period):
"""
return the number of seconds in the specified period
>>> get_period_seconds('day')
86400
>>> get_period_seconds(86400)
86400
>>> get_period_seconds(datetime.timedelta(hours=24))
86400
>>> get_period_seconds('day + os.system("rm -Rf *")')
Traceback (most recent call last):
...
ValueError: period not in (second, minute, hour, day, month, year)
"""
if isinstance(period, six.string_types):
try:
name = 'seconds_per_' + period.lower()
result = globals()[name]
except KeyError:
msg = "period not in (second, minute, hour, day, month, year)"
raise ValueError(msg)
elif isinstance(period, numbers.Number):
result = period
elif isinstance(period, datetime.timedelta):
result = period.days * get_period_seconds('day') + period.seconds
else:
raise TypeError('period must be a string or integer')
return result | [
"def",
"get_period_seconds",
"(",
"period",
")",
":",
"if",
"isinstance",
"(",
"period",
",",
"six",
".",
"string_types",
")",
":",
"try",
":",
"name",
"=",
"'seconds_per_'",
"+",
"period",
".",
"lower",
"(",
")",
"result",
"=",
"globals",
"(",
")",
"[",
"name",
"]",
"except",
"KeyError",
":",
"msg",
"=",
"\"period not in (second, minute, hour, day, month, year)\"",
"raise",
"ValueError",
"(",
"msg",
")",
"elif",
"isinstance",
"(",
"period",
",",
"numbers",
".",
"Number",
")",
":",
"result",
"=",
"period",
"elif",
"isinstance",
"(",
"period",
",",
"datetime",
".",
"timedelta",
")",
":",
"result",
"=",
"period",
".",
"days",
"*",
"get_period_seconds",
"(",
"'day'",
")",
"+",
"period",
".",
"seconds",
"else",
":",
"raise",
"TypeError",
"(",
"'period must be a string or integer'",
")",
"return",
"result"
]
| return the number of seconds in the specified period
>>> get_period_seconds('day')
86400
>>> get_period_seconds(86400)
86400
>>> get_period_seconds(datetime.timedelta(hours=24))
86400
>>> get_period_seconds('day + os.system("rm -Rf *")')
Traceback (most recent call last):
...
ValueError: period not in (second, minute, hour, day, month, year) | [
"return",
"the",
"number",
"of",
"seconds",
"in",
"the",
"specified",
"period"
]
| python | valid | 29.310345 |
NicolasLM/atoma | atoma/json_feed.py | https://github.com/NicolasLM/atoma/blob/16c6956112f975eb2ce774b2d5f8e9ddffde569f/atoma/json_feed.py#L216-L223 | def parse_json_feed_bytes(data: bytes) -> JSONFeed:
"""Parse a JSON feed from a byte-string containing JSON data."""
try:
root = json.loads(data)
except json.decoder.JSONDecodeError:
raise FeedJSONError('Not a valid JSON document')
return parse_json_feed(root) | [
"def",
"parse_json_feed_bytes",
"(",
"data",
":",
"bytes",
")",
"->",
"JSONFeed",
":",
"try",
":",
"root",
"=",
"json",
".",
"loads",
"(",
"data",
")",
"except",
"json",
".",
"decoder",
".",
"JSONDecodeError",
":",
"raise",
"FeedJSONError",
"(",
"'Not a valid JSON document'",
")",
"return",
"parse_json_feed",
"(",
"root",
")"
]
| Parse a JSON feed from a byte-string containing JSON data. | [
"Parse",
"a",
"JSON",
"feed",
"from",
"a",
"byte",
"-",
"string",
"containing",
"JSON",
"data",
"."
]
| python | train | 35.75 |
Microsoft/nni | src/sdk/pynni/nni/metis_tuner/metis_tuner.py | https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/src/sdk/pynni/nni/metis_tuner/metis_tuner.py#L215-L255 | def receive_trial_result(self, parameter_id, parameters, value):
"""Tuner receive result from trial.
Parameters
----------
parameter_id : int
parameters : dict
value : dict/float
if value is dict, it should have "default" key.
"""
value = extract_scalar_reward(value)
if self.optimize_mode == OptimizeMode.Maximize:
value = -value
logger.info("Received trial result.")
logger.info("value is :" + str(value))
logger.info("parameter is : " + str(parameters))
# parse parameter to sample_x
sample_x = [0 for i in range(len(self.key_order))]
for key in parameters:
idx = self.key_order.index(key)
sample_x[idx] = parameters[key]
# parse value to sample_y
temp_y = []
if sample_x in self.samples_x:
idx = self.samples_x.index(sample_x)
temp_y = self.samples_y[idx]
temp_y.append(value)
self.samples_y[idx] = temp_y
# calculate y aggregation
median = get_median(temp_y)
self.samples_y_aggregation[idx] = [median]
else:
self.samples_x.append(sample_x)
self.samples_y.append([value])
# calculate y aggregation
self.samples_y_aggregation.append([value]) | [
"def",
"receive_trial_result",
"(",
"self",
",",
"parameter_id",
",",
"parameters",
",",
"value",
")",
":",
"value",
"=",
"extract_scalar_reward",
"(",
"value",
")",
"if",
"self",
".",
"optimize_mode",
"==",
"OptimizeMode",
".",
"Maximize",
":",
"value",
"=",
"-",
"value",
"logger",
".",
"info",
"(",
"\"Received trial result.\"",
")",
"logger",
".",
"info",
"(",
"\"value is :\"",
"+",
"str",
"(",
"value",
")",
")",
"logger",
".",
"info",
"(",
"\"parameter is : \"",
"+",
"str",
"(",
"parameters",
")",
")",
"# parse parameter to sample_x",
"sample_x",
"=",
"[",
"0",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"key_order",
")",
")",
"]",
"for",
"key",
"in",
"parameters",
":",
"idx",
"=",
"self",
".",
"key_order",
".",
"index",
"(",
"key",
")",
"sample_x",
"[",
"idx",
"]",
"=",
"parameters",
"[",
"key",
"]",
"# parse value to sample_y",
"temp_y",
"=",
"[",
"]",
"if",
"sample_x",
"in",
"self",
".",
"samples_x",
":",
"idx",
"=",
"self",
".",
"samples_x",
".",
"index",
"(",
"sample_x",
")",
"temp_y",
"=",
"self",
".",
"samples_y",
"[",
"idx",
"]",
"temp_y",
".",
"append",
"(",
"value",
")",
"self",
".",
"samples_y",
"[",
"idx",
"]",
"=",
"temp_y",
"# calculate y aggregation",
"median",
"=",
"get_median",
"(",
"temp_y",
")",
"self",
".",
"samples_y_aggregation",
"[",
"idx",
"]",
"=",
"[",
"median",
"]",
"else",
":",
"self",
".",
"samples_x",
".",
"append",
"(",
"sample_x",
")",
"self",
".",
"samples_y",
".",
"append",
"(",
"[",
"value",
"]",
")",
"# calculate y aggregation",
"self",
".",
"samples_y_aggregation",
".",
"append",
"(",
"[",
"value",
"]",
")"
]
| Tuner receive result from trial.
Parameters
----------
parameter_id : int
parameters : dict
value : dict/float
if value is dict, it should have "default" key. | [
"Tuner",
"receive",
"result",
"from",
"trial",
"."
]
| python | train | 32.731707 |
limodou/uliweb | uliweb/core/SimpleFrame.py | https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/core/SimpleFrame.py#L1391-L1404 | def get_config(self, config_filename):
"""
Collection all config file in all available apps, and merge them into ini object
:return: ini object
"""
x = pyini.Ini(lazy=True, basepath=os.path.join(self.project_dir, 'apps'))
for p in reversed(self.apps):
app_path = get_app_dir(p)
filename = os.path.join(app_path, config_filename)
if os.path.exists(filename):
x.read(filename)
x.freeze()
return x | [
"def",
"get_config",
"(",
"self",
",",
"config_filename",
")",
":",
"x",
"=",
"pyini",
".",
"Ini",
"(",
"lazy",
"=",
"True",
",",
"basepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"project_dir",
",",
"'apps'",
")",
")",
"for",
"p",
"in",
"reversed",
"(",
"self",
".",
"apps",
")",
":",
"app_path",
"=",
"get_app_dir",
"(",
"p",
")",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"app_path",
",",
"config_filename",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"filename",
")",
":",
"x",
".",
"read",
"(",
"filename",
")",
"x",
".",
"freeze",
"(",
")",
"return",
"x"
]
| Collection all config file in all available apps, and merge them into ini object
:return: ini object | [
"Collection",
"all",
"config",
"file",
"in",
"all",
"available",
"apps",
"and",
"merge",
"them",
"into",
"ini",
"object",
":",
"return",
":",
"ini",
"object"
]
| python | train | 35.571429 |
NicolasLM/atoma | atoma/opml.py | https://github.com/NicolasLM/atoma/blob/16c6956112f975eb2ce774b2d5f8e9ddffde569f/atoma/opml.py#L88-L91 | def parse_opml_bytes(data: bytes) -> OPML:
"""Parse an OPML document from a byte-string containing XML data."""
root = parse_xml(BytesIO(data)).getroot()
return _parse_opml(root) | [
"def",
"parse_opml_bytes",
"(",
"data",
":",
"bytes",
")",
"->",
"OPML",
":",
"root",
"=",
"parse_xml",
"(",
"BytesIO",
"(",
"data",
")",
")",
".",
"getroot",
"(",
")",
"return",
"_parse_opml",
"(",
"root",
")"
]
| Parse an OPML document from a byte-string containing XML data. | [
"Parse",
"an",
"OPML",
"document",
"from",
"a",
"byte",
"-",
"string",
"containing",
"XML",
"data",
"."
]
| python | train | 46.75 |
tensorflow/tensor2tensor | tensor2tensor/layers/transformer_layers.py | https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/transformer_layers.py#L242-L380 | def transformer_ffn_layer(x,
hparams,
pad_remover=None,
conv_padding="LEFT",
nonpadding_mask=None,
losses=None,
cache=None,
decode_loop_step=None,
readout_filter_size=0,
layer_collection=None):
"""Feed-forward layer in the transformer.
Args:
x: a Tensor of shape [batch_size, length, hparams.hidden_size]
hparams: hyperparameters for model
pad_remover: an expert_utils.PadRemover object tracking the padding
positions. If provided, when using convolutional settings, the padding
is removed before applying the convolution, and restored afterward. This
can give a significant speedup.
conv_padding: a string - either "LEFT" or "SAME".
nonpadding_mask: an optional Tensor with shape [batch_size, length].
needed for convolutional layers with "SAME" padding.
Contains 1.0 in positions corresponding to nonpadding.
losses: optional list onto which to append extra training losses
cache: dict, containing tensors which are the results of previous
attentions, used for fast decoding.
decode_loop_step: An integer, step number of the decoding loop.
Only used for inference on TPU.
readout_filter_size: if it's greater than 0, then it will be used instead of
filter_size
layer_collection: A tensorflow_kfac.LayerCollection. Only used by the
KFAC optimizer. Default is None.
Returns:
a Tensor of shape [batch_size, length, hparams.hidden_size]
Raises:
ValueError: If losses arg is None, but layer generates extra losses.
"""
ffn_layer = hparams.ffn_layer
relu_dropout_broadcast_dims = (
common_layers.comma_separated_string_to_integer_list(
getattr(hparams, "relu_dropout_broadcast_dims", "")))
if ffn_layer == "conv_hidden_relu":
# Backwards compatibility
ffn_layer = "dense_relu_dense"
if ffn_layer == "dense_relu_dense":
# In simple convolution mode, use `pad_remover` to speed up processing.
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_FFN_FILTER_DENSE,
value={
"filter_size": hparams.filter_size,
"use_bias": "True",
"activation": mlperf_log.RELU
})
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_FFN_OUTPUT_DENSE,
value={
"hidden_size": hparams.hidden_size,
"use_bias": "True",
})
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_RELU_DROPOUT, value=hparams.relu_dropout)
if pad_remover:
original_shape = common_layers.shape_list(x)
# Collapse `x` across examples, and remove padding positions.
x = tf.reshape(x, tf.concat([[-1], original_shape[2:]], axis=0))
x = tf.expand_dims(pad_remover.remove(x), axis=0)
conv_output = common_layers.dense_relu_dense(
x,
hparams.filter_size,
hparams.hidden_size,
dropout=hparams.relu_dropout,
dropout_broadcast_dims=relu_dropout_broadcast_dims,
layer_collection=layer_collection)
if pad_remover:
# Restore `conv_output` to the original shape of `x`, including padding.
conv_output = tf.reshape(
pad_remover.restore(tf.squeeze(conv_output, axis=0)), original_shape)
return conv_output
elif ffn_layer == "conv_relu_conv":
return common_layers.conv_relu_conv(
x,
readout_filter_size or hparams.filter_size,
hparams.hidden_size,
first_kernel_size=hparams.conv_first_kernel,
second_kernel_size=1,
padding=conv_padding,
nonpadding_mask=nonpadding_mask,
dropout=hparams.relu_dropout,
cache=cache,
decode_loop_step=decode_loop_step)
elif ffn_layer == "parameter_attention":
return common_attention.parameter_attention(
x, hparams.parameter_attention_key_channels or hparams.hidden_size,
hparams.parameter_attention_value_channels or hparams.hidden_size,
hparams.hidden_size, readout_filter_size or hparams.filter_size,
hparams.num_heads,
hparams.attention_dropout)
elif ffn_layer == "conv_hidden_relu_with_sepconv":
return common_layers.conv_hidden_relu(
x,
readout_filter_size or hparams.filter_size,
hparams.hidden_size,
kernel_size=(3, 1),
second_kernel_size=(31, 1),
padding="LEFT",
dropout=hparams.relu_dropout)
elif ffn_layer == "sru":
return common_layers.sru(x)
elif ffn_layer == "local_moe_tpu":
overhead = hparams.moe_overhead_eval
if hparams.mode == tf.estimator.ModeKeys.TRAIN:
overhead = hparams.moe_overhead_train
ret, loss = expert_utils.local_moe_tpu(
x,
hparams.filter_size // 2,
hparams.hidden_size,
hparams.moe_num_experts,
overhead=overhead,
loss_coef=hparams.moe_loss_coef)
elif ffn_layer == "local_moe":
overhead = hparams.moe_overhead_eval
if hparams.mode == tf.estimator.ModeKeys.TRAIN:
overhead = hparams.moe_overhead_train
ret, loss = expert_utils.local_moe(
x,
True,
expert_utils.ffn_expert_fn(hparams.hidden_size, [hparams.filter_size],
hparams.hidden_size),
hparams.moe_num_experts,
k=hparams.moe_k,
hparams=hparams)
losses.append(loss)
return ret
else:
assert ffn_layer == "none"
return x | [
"def",
"transformer_ffn_layer",
"(",
"x",
",",
"hparams",
",",
"pad_remover",
"=",
"None",
",",
"conv_padding",
"=",
"\"LEFT\"",
",",
"nonpadding_mask",
"=",
"None",
",",
"losses",
"=",
"None",
",",
"cache",
"=",
"None",
",",
"decode_loop_step",
"=",
"None",
",",
"readout_filter_size",
"=",
"0",
",",
"layer_collection",
"=",
"None",
")",
":",
"ffn_layer",
"=",
"hparams",
".",
"ffn_layer",
"relu_dropout_broadcast_dims",
"=",
"(",
"common_layers",
".",
"comma_separated_string_to_integer_list",
"(",
"getattr",
"(",
"hparams",
",",
"\"relu_dropout_broadcast_dims\"",
",",
"\"\"",
")",
")",
")",
"if",
"ffn_layer",
"==",
"\"conv_hidden_relu\"",
":",
"# Backwards compatibility",
"ffn_layer",
"=",
"\"dense_relu_dense\"",
"if",
"ffn_layer",
"==",
"\"dense_relu_dense\"",
":",
"# In simple convolution mode, use `pad_remover` to speed up processing.",
"mlperf_log",
".",
"transformer_print",
"(",
"key",
"=",
"mlperf_log",
".",
"MODEL_HP_FFN_FILTER_DENSE",
",",
"value",
"=",
"{",
"\"filter_size\"",
":",
"hparams",
".",
"filter_size",
",",
"\"use_bias\"",
":",
"\"True\"",
",",
"\"activation\"",
":",
"mlperf_log",
".",
"RELU",
"}",
")",
"mlperf_log",
".",
"transformer_print",
"(",
"key",
"=",
"mlperf_log",
".",
"MODEL_HP_FFN_OUTPUT_DENSE",
",",
"value",
"=",
"{",
"\"hidden_size\"",
":",
"hparams",
".",
"hidden_size",
",",
"\"use_bias\"",
":",
"\"True\"",
",",
"}",
")",
"mlperf_log",
".",
"transformer_print",
"(",
"key",
"=",
"mlperf_log",
".",
"MODEL_HP_RELU_DROPOUT",
",",
"value",
"=",
"hparams",
".",
"relu_dropout",
")",
"if",
"pad_remover",
":",
"original_shape",
"=",
"common_layers",
".",
"shape_list",
"(",
"x",
")",
"# Collapse `x` across examples, and remove padding positions.",
"x",
"=",
"tf",
".",
"reshape",
"(",
"x",
",",
"tf",
".",
"concat",
"(",
"[",
"[",
"-",
"1",
"]",
",",
"original_shape",
"[",
"2",
":",
"]",
"]",
",",
"axis",
"=",
"0",
")",
")",
"x",
"=",
"tf",
".",
"expand_dims",
"(",
"pad_remover",
".",
"remove",
"(",
"x",
")",
",",
"axis",
"=",
"0",
")",
"conv_output",
"=",
"common_layers",
".",
"dense_relu_dense",
"(",
"x",
",",
"hparams",
".",
"filter_size",
",",
"hparams",
".",
"hidden_size",
",",
"dropout",
"=",
"hparams",
".",
"relu_dropout",
",",
"dropout_broadcast_dims",
"=",
"relu_dropout_broadcast_dims",
",",
"layer_collection",
"=",
"layer_collection",
")",
"if",
"pad_remover",
":",
"# Restore `conv_output` to the original shape of `x`, including padding.",
"conv_output",
"=",
"tf",
".",
"reshape",
"(",
"pad_remover",
".",
"restore",
"(",
"tf",
".",
"squeeze",
"(",
"conv_output",
",",
"axis",
"=",
"0",
")",
")",
",",
"original_shape",
")",
"return",
"conv_output",
"elif",
"ffn_layer",
"==",
"\"conv_relu_conv\"",
":",
"return",
"common_layers",
".",
"conv_relu_conv",
"(",
"x",
",",
"readout_filter_size",
"or",
"hparams",
".",
"filter_size",
",",
"hparams",
".",
"hidden_size",
",",
"first_kernel_size",
"=",
"hparams",
".",
"conv_first_kernel",
",",
"second_kernel_size",
"=",
"1",
",",
"padding",
"=",
"conv_padding",
",",
"nonpadding_mask",
"=",
"nonpadding_mask",
",",
"dropout",
"=",
"hparams",
".",
"relu_dropout",
",",
"cache",
"=",
"cache",
",",
"decode_loop_step",
"=",
"decode_loop_step",
")",
"elif",
"ffn_layer",
"==",
"\"parameter_attention\"",
":",
"return",
"common_attention",
".",
"parameter_attention",
"(",
"x",
",",
"hparams",
".",
"parameter_attention_key_channels",
"or",
"hparams",
".",
"hidden_size",
",",
"hparams",
".",
"parameter_attention_value_channels",
"or",
"hparams",
".",
"hidden_size",
",",
"hparams",
".",
"hidden_size",
",",
"readout_filter_size",
"or",
"hparams",
".",
"filter_size",
",",
"hparams",
".",
"num_heads",
",",
"hparams",
".",
"attention_dropout",
")",
"elif",
"ffn_layer",
"==",
"\"conv_hidden_relu_with_sepconv\"",
":",
"return",
"common_layers",
".",
"conv_hidden_relu",
"(",
"x",
",",
"readout_filter_size",
"or",
"hparams",
".",
"filter_size",
",",
"hparams",
".",
"hidden_size",
",",
"kernel_size",
"=",
"(",
"3",
",",
"1",
")",
",",
"second_kernel_size",
"=",
"(",
"31",
",",
"1",
")",
",",
"padding",
"=",
"\"LEFT\"",
",",
"dropout",
"=",
"hparams",
".",
"relu_dropout",
")",
"elif",
"ffn_layer",
"==",
"\"sru\"",
":",
"return",
"common_layers",
".",
"sru",
"(",
"x",
")",
"elif",
"ffn_layer",
"==",
"\"local_moe_tpu\"",
":",
"overhead",
"=",
"hparams",
".",
"moe_overhead_eval",
"if",
"hparams",
".",
"mode",
"==",
"tf",
".",
"estimator",
".",
"ModeKeys",
".",
"TRAIN",
":",
"overhead",
"=",
"hparams",
".",
"moe_overhead_train",
"ret",
",",
"loss",
"=",
"expert_utils",
".",
"local_moe_tpu",
"(",
"x",
",",
"hparams",
".",
"filter_size",
"//",
"2",
",",
"hparams",
".",
"hidden_size",
",",
"hparams",
".",
"moe_num_experts",
",",
"overhead",
"=",
"overhead",
",",
"loss_coef",
"=",
"hparams",
".",
"moe_loss_coef",
")",
"elif",
"ffn_layer",
"==",
"\"local_moe\"",
":",
"overhead",
"=",
"hparams",
".",
"moe_overhead_eval",
"if",
"hparams",
".",
"mode",
"==",
"tf",
".",
"estimator",
".",
"ModeKeys",
".",
"TRAIN",
":",
"overhead",
"=",
"hparams",
".",
"moe_overhead_train",
"ret",
",",
"loss",
"=",
"expert_utils",
".",
"local_moe",
"(",
"x",
",",
"True",
",",
"expert_utils",
".",
"ffn_expert_fn",
"(",
"hparams",
".",
"hidden_size",
",",
"[",
"hparams",
".",
"filter_size",
"]",
",",
"hparams",
".",
"hidden_size",
")",
",",
"hparams",
".",
"moe_num_experts",
",",
"k",
"=",
"hparams",
".",
"moe_k",
",",
"hparams",
"=",
"hparams",
")",
"losses",
".",
"append",
"(",
"loss",
")",
"return",
"ret",
"else",
":",
"assert",
"ffn_layer",
"==",
"\"none\"",
"return",
"x"
]
| Feed-forward layer in the transformer.
Args:
x: a Tensor of shape [batch_size, length, hparams.hidden_size]
hparams: hyperparameters for model
pad_remover: an expert_utils.PadRemover object tracking the padding
positions. If provided, when using convolutional settings, the padding
is removed before applying the convolution, and restored afterward. This
can give a significant speedup.
conv_padding: a string - either "LEFT" or "SAME".
nonpadding_mask: an optional Tensor with shape [batch_size, length].
needed for convolutional layers with "SAME" padding.
Contains 1.0 in positions corresponding to nonpadding.
losses: optional list onto which to append extra training losses
cache: dict, containing tensors which are the results of previous
attentions, used for fast decoding.
decode_loop_step: An integer, step number of the decoding loop.
Only used for inference on TPU.
readout_filter_size: if it's greater than 0, then it will be used instead of
filter_size
layer_collection: A tensorflow_kfac.LayerCollection. Only used by the
KFAC optimizer. Default is None.
Returns:
a Tensor of shape [batch_size, length, hparams.hidden_size]
Raises:
ValueError: If losses arg is None, but layer generates extra losses. | [
"Feed",
"-",
"forward",
"layer",
"in",
"the",
"transformer",
"."
]
| python | train | 39.179856 |
python-rope/rope | rope/base/pycore.py | https://github.com/python-rope/rope/blob/1c9f9cd5964b099a99a9111e998f0dc728860688/rope/base/pycore.py#L326-L334 | def is_changed(self, start, end):
"""Tell whether any of start till end lines have changed
The end points are inclusive and indices start from 1.
"""
left, right = self._get_changed(start, end)
if left < right:
return True
return False | [
"def",
"is_changed",
"(",
"self",
",",
"start",
",",
"end",
")",
":",
"left",
",",
"right",
"=",
"self",
".",
"_get_changed",
"(",
"start",
",",
"end",
")",
"if",
"left",
"<",
"right",
":",
"return",
"True",
"return",
"False"
]
| Tell whether any of start till end lines have changed
The end points are inclusive and indices start from 1. | [
"Tell",
"whether",
"any",
"of",
"start",
"till",
"end",
"lines",
"have",
"changed"
]
| python | train | 32 |
jepegit/cellpy | cellpy/utils/ocv_rlx.py | https://github.com/jepegit/cellpy/blob/9f4a84cdd11f72cfa02cda8c2d7b5174abbb7370/cellpy/utils/ocv_rlx.py#L325-L338 | def get_best_fit_parameters_translated_grouped(self):
"""Returns the parameters as a dictionary of the 'real units' for the best fit."""
result_dict = dict()
result_dict['ocv'] = [parameters['ocv'] for parameters in
self.best_fit_parameters_translated]
result_dict['ir'] = [parameters['ir'] for parameters in
self.best_fit_parameters_translated]
for i in range(self.circuits):
result_dict['r' + str(i)] = [parameters['r' + str(i)] for parameters
in self.best_fit_parameters_translated]
result_dict['c' + str(i)] = [parameters['c' + str(i)] for parameters
in self.best_fit_parameters_translated]
return result_dict | [
"def",
"get_best_fit_parameters_translated_grouped",
"(",
"self",
")",
":",
"result_dict",
"=",
"dict",
"(",
")",
"result_dict",
"[",
"'ocv'",
"]",
"=",
"[",
"parameters",
"[",
"'ocv'",
"]",
"for",
"parameters",
"in",
"self",
".",
"best_fit_parameters_translated",
"]",
"result_dict",
"[",
"'ir'",
"]",
"=",
"[",
"parameters",
"[",
"'ir'",
"]",
"for",
"parameters",
"in",
"self",
".",
"best_fit_parameters_translated",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"circuits",
")",
":",
"result_dict",
"[",
"'r'",
"+",
"str",
"(",
"i",
")",
"]",
"=",
"[",
"parameters",
"[",
"'r'",
"+",
"str",
"(",
"i",
")",
"]",
"for",
"parameters",
"in",
"self",
".",
"best_fit_parameters_translated",
"]",
"result_dict",
"[",
"'c'",
"+",
"str",
"(",
"i",
")",
"]",
"=",
"[",
"parameters",
"[",
"'c'",
"+",
"str",
"(",
"i",
")",
"]",
"for",
"parameters",
"in",
"self",
".",
"best_fit_parameters_translated",
"]",
"return",
"result_dict"
]
| Returns the parameters as a dictionary of the 'real units' for the best fit. | [
"Returns",
"the",
"parameters",
"as",
"a",
"dictionary",
"of",
"the",
"real",
"units",
"for",
"the",
"best",
"fit",
"."
]
| python | train | 58.142857 |
iotile/coretools | iotilebuild/iotile/build/tilebus/block.py | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/tilebus/block.py#L46-L63 | def to_dict(self):
"""Convert this object into a dictionary.
Returns:
dict: A dict with the same information as this object.
"""
out_dict = {}
out_dict['commands'] = self.commands
out_dict['configs'] = self.configs
out_dict['short_name'] = self.name
out_dict['versions'] = {
'module': self.module_version,
'api': self.api_version
}
return out_dict | [
"def",
"to_dict",
"(",
"self",
")",
":",
"out_dict",
"=",
"{",
"}",
"out_dict",
"[",
"'commands'",
"]",
"=",
"self",
".",
"commands",
"out_dict",
"[",
"'configs'",
"]",
"=",
"self",
".",
"configs",
"out_dict",
"[",
"'short_name'",
"]",
"=",
"self",
".",
"name",
"out_dict",
"[",
"'versions'",
"]",
"=",
"{",
"'module'",
":",
"self",
".",
"module_version",
",",
"'api'",
":",
"self",
".",
"api_version",
"}",
"return",
"out_dict"
]
| Convert this object into a dictionary.
Returns:
dict: A dict with the same information as this object. | [
"Convert",
"this",
"object",
"into",
"a",
"dictionary",
"."
]
| python | train | 25 |
keon/algorithms | algorithms/set/set_covering.py | https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/set/set_covering.py#L25-L34 | def powerset(iterable):
"""Calculate the powerset of any iterable.
For a range of integers up to the length of the given list,
make all possible combinations and chain them together as one object.
From https://docs.python.org/3/library/itertools.html#itertools-recipes
"""
"list(powerset([1,2,3])) --> [(), (1,), (2,), (3,), (1,2), (1,3), (2,3), (1,2,3)]"
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1)) | [
"def",
"powerset",
"(",
"iterable",
")",
":",
"\"list(powerset([1,2,3])) --> [(), (1,), (2,), (3,), (1,2), (1,3), (2,3), (1,2,3)]\"",
"s",
"=",
"list",
"(",
"iterable",
")",
"return",
"chain",
".",
"from_iterable",
"(",
"combinations",
"(",
"s",
",",
"r",
")",
"for",
"r",
"in",
"range",
"(",
"len",
"(",
"s",
")",
"+",
"1",
")",
")"
]
| Calculate the powerset of any iterable.
For a range of integers up to the length of the given list,
make all possible combinations and chain them together as one object.
From https://docs.python.org/3/library/itertools.html#itertools-recipes | [
"Calculate",
"the",
"powerset",
"of",
"any",
"iterable",
"."
]
| python | train | 47.2 |
gem/oq-engine | openquake/hazardlib/gsim/skarlatoudis_2013.py | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/skarlatoudis_2013.py#L76-L105 | def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
# extracting dictionary of coefficients specific to required
# intensity measure type.
C = self.COEFFS[imt]
imean = (self._compute_magnitude(rup, C) +
self._compute_distance(rup, dists, C) +
self._get_site_amplification(sites, C) +
self._compute_forearc_backarc_term(C, sites, dists, rup))
istddevs = self._get_stddevs(C,
stddev_types,
num_sites=len(sites.vs30))
# Convert units to g,
# but only for PGA and SA (not PGV):
if imt.name in "SA PGA":
mean = np.log((10.0 ** (imean - 2.0)) / g)
else:
# PGV:
mean = np.log(10.0 ** imean)
# Return stddevs in terms of natural log scaling
stddevs = np.log(10.0 ** np.array(istddevs))
# mean_LogNaturale = np.log((10 ** mean) * 1e-2 / g)
return mean, stddevs | [
"def",
"get_mean_and_stddevs",
"(",
"self",
",",
"sites",
",",
"rup",
",",
"dists",
",",
"imt",
",",
"stddev_types",
")",
":",
"# extracting dictionary of coefficients specific to required",
"# intensity measure type.",
"C",
"=",
"self",
".",
"COEFFS",
"[",
"imt",
"]",
"imean",
"=",
"(",
"self",
".",
"_compute_magnitude",
"(",
"rup",
",",
"C",
")",
"+",
"self",
".",
"_compute_distance",
"(",
"rup",
",",
"dists",
",",
"C",
")",
"+",
"self",
".",
"_get_site_amplification",
"(",
"sites",
",",
"C",
")",
"+",
"self",
".",
"_compute_forearc_backarc_term",
"(",
"C",
",",
"sites",
",",
"dists",
",",
"rup",
")",
")",
"istddevs",
"=",
"self",
".",
"_get_stddevs",
"(",
"C",
",",
"stddev_types",
",",
"num_sites",
"=",
"len",
"(",
"sites",
".",
"vs30",
")",
")",
"# Convert units to g,",
"# but only for PGA and SA (not PGV):",
"if",
"imt",
".",
"name",
"in",
"\"SA PGA\"",
":",
"mean",
"=",
"np",
".",
"log",
"(",
"(",
"10.0",
"**",
"(",
"imean",
"-",
"2.0",
")",
")",
"/",
"g",
")",
"else",
":",
"# PGV:",
"mean",
"=",
"np",
".",
"log",
"(",
"10.0",
"**",
"imean",
")",
"# Return stddevs in terms of natural log scaling",
"stddevs",
"=",
"np",
".",
"log",
"(",
"10.0",
"**",
"np",
".",
"array",
"(",
"istddevs",
")",
")",
"# mean_LogNaturale = np.log((10 ** mean) * 1e-2 / g)",
"return",
"mean",
",",
"stddevs"
]
| See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values. | [
"See",
":",
"meth",
":",
"superclass",
"method",
"<",
".",
"base",
".",
"GroundShakingIntensityModel",
".",
"get_mean_and_stddevs",
">",
"for",
"spec",
"of",
"input",
"and",
"result",
"values",
"."
]
| python | train | 39.333333 |
raiden-network/raiden | raiden/storage/sqlite.py | https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/storage/sqlite.py#L415-L422 | def update_state_changes(self, state_changes_data: List[Tuple[str, int]]) -> None:
"""Given a list of identifier/data state tuples update them in the DB"""
cursor = self.conn.cursor()
cursor.executemany(
'UPDATE state_changes SET data=? WHERE identifier=?',
state_changes_data,
)
self.maybe_commit() | [
"def",
"update_state_changes",
"(",
"self",
",",
"state_changes_data",
":",
"List",
"[",
"Tuple",
"[",
"str",
",",
"int",
"]",
"]",
")",
"->",
"None",
":",
"cursor",
"=",
"self",
".",
"conn",
".",
"cursor",
"(",
")",
"cursor",
".",
"executemany",
"(",
"'UPDATE state_changes SET data=? WHERE identifier=?'",
",",
"state_changes_data",
",",
")",
"self",
".",
"maybe_commit",
"(",
")"
]
| Given a list of identifier/data state tuples update them in the DB | [
"Given",
"a",
"list",
"of",
"identifier",
"/",
"data",
"state",
"tuples",
"update",
"them",
"in",
"the",
"DB"
]
| python | train | 44.5 |
ministryofjustice/money-to-prisoners-common | mtp_common/templatetags/mtp_common.py | https://github.com/ministryofjustice/money-to-prisoners-common/blob/33c43a2912cb990d9148da7c8718f480f07d90a1/mtp_common/templatetags/mtp_common.py#L115-L128 | def get_form_errors(form):
"""
Django form errors do not obey natural field order,
this template tag returns non-field and field-specific errors
:param form: the form instance
"""
return {
'non_field': form.non_field_errors(),
'field_specific': OrderedDict(
(field, form.errors[field.name])
for field in form
if field.name in form.errors
)
} | [
"def",
"get_form_errors",
"(",
"form",
")",
":",
"return",
"{",
"'non_field'",
":",
"form",
".",
"non_field_errors",
"(",
")",
",",
"'field_specific'",
":",
"OrderedDict",
"(",
"(",
"field",
",",
"form",
".",
"errors",
"[",
"field",
".",
"name",
"]",
")",
"for",
"field",
"in",
"form",
"if",
"field",
".",
"name",
"in",
"form",
".",
"errors",
")",
"}"
]
| Django form errors do not obey natural field order,
this template tag returns non-field and field-specific errors
:param form: the form instance | [
"Django",
"form",
"errors",
"do",
"not",
"obey",
"natural",
"field",
"order",
"this",
"template",
"tag",
"returns",
"non",
"-",
"field",
"and",
"field",
"-",
"specific",
"errors",
":",
"param",
"form",
":",
"the",
"form",
"instance"
]
| python | train | 29.714286 |
ramrod-project/database-brain | schema/brain/telemetry/reads.py | https://github.com/ramrod-project/database-brain/blob/b024cb44f34cabb9d80af38271ddb65c25767083/schema/brain/telemetry/reads.py#L10-L16 | def target_query(plugin, port, location):
"""
prepared ReQL for target
"""
return ((r.row[PLUGIN_NAME_KEY] == plugin) &
(r.row[PORT_FIELD] == port) &
(r.row[LOCATION_FIELD] == location)) | [
"def",
"target_query",
"(",
"plugin",
",",
"port",
",",
"location",
")",
":",
"return",
"(",
"(",
"r",
".",
"row",
"[",
"PLUGIN_NAME_KEY",
"]",
"==",
"plugin",
")",
"&",
"(",
"r",
".",
"row",
"[",
"PORT_FIELD",
"]",
"==",
"port",
")",
"&",
"(",
"r",
".",
"row",
"[",
"LOCATION_FIELD",
"]",
"==",
"location",
")",
")"
]
| prepared ReQL for target | [
"prepared",
"ReQL",
"for",
"target"
]
| python | train | 31.428571 |
log2timeline/dfvfs | dfvfs/file_io/fake_file_io.py | https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/file_io/fake_file_io.py#L31-L56 | def _Open(self, path_spec=None, mode='rb'):
"""Opens the file-like object defined by path specification.
Args:
path_spec (PathSpec): path specification.
mode (Optional[str]): file access mode.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file-like object could not be opened.
OSError: if the file-like object could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
"""
if not path_spec:
raise ValueError('Missing path specification.')
if path_spec.HasParent():
raise errors.PathSpecError('Unsupported path specification with parent.')
location = getattr(path_spec, 'location', None)
if location is None:
raise errors.PathSpecError('Path specification missing location.')
self._current_offset = 0
self._size = len(self._file_data) | [
"def",
"_Open",
"(",
"self",
",",
"path_spec",
"=",
"None",
",",
"mode",
"=",
"'rb'",
")",
":",
"if",
"not",
"path_spec",
":",
"raise",
"ValueError",
"(",
"'Missing path specification.'",
")",
"if",
"path_spec",
".",
"HasParent",
"(",
")",
":",
"raise",
"errors",
".",
"PathSpecError",
"(",
"'Unsupported path specification with parent.'",
")",
"location",
"=",
"getattr",
"(",
"path_spec",
",",
"'location'",
",",
"None",
")",
"if",
"location",
"is",
"None",
":",
"raise",
"errors",
".",
"PathSpecError",
"(",
"'Path specification missing location.'",
")",
"self",
".",
"_current_offset",
"=",
"0",
"self",
".",
"_size",
"=",
"len",
"(",
"self",
".",
"_file_data",
")"
]
| Opens the file-like object defined by path specification.
Args:
path_spec (PathSpec): path specification.
mode (Optional[str]): file access mode.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file-like object could not be opened.
OSError: if the file-like object could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid. | [
"Opens",
"the",
"file",
"-",
"like",
"object",
"defined",
"by",
"path",
"specification",
"."
]
| python | train | 35.153846 |
yt-project/unyt | unyt/_parsing.py | https://github.com/yt-project/unyt/blob/7a4eafc229f83784f4c63d639aee554f9a6b1ca0/unyt/_parsing.py#L25-L68 | def _auto_positive_symbol(tokens, local_dict, global_dict):
"""
Inserts calls to ``Symbol`` for undefined variables.
Passes in positive=True as a keyword argument.
Adapted from sympy.sympy.parsing.sympy_parser.auto_symbol
"""
result = []
tokens.append((None, None)) # so zip traverses all tokens
for tok, nextTok in zip(tokens, tokens[1:]):
tokNum, tokVal = tok
nextTokNum, nextTokVal = nextTok
if tokNum == token.NAME:
name = tokVal
if name in global_dict:
obj = global_dict[name]
if isinstance(obj, (Basic, type)) or callable(obj):
result.append((token.NAME, name))
continue
# try to resolve known alternative unit name
try:
used_name = inv_name_alternatives[str(name)]
except KeyError:
# if we don't know this name it's a user-defined unit name
# so we should create a new symbol for it
used_name = str(name)
result.extend(
[
(token.NAME, "Symbol"),
(token.OP, "("),
(token.NAME, repr(used_name)),
(token.OP, ","),
(token.NAME, "positive"),
(token.OP, "="),
(token.NAME, "True"),
(token.OP, ")"),
]
)
else:
result.append((tokNum, tokVal))
return result | [
"def",
"_auto_positive_symbol",
"(",
"tokens",
",",
"local_dict",
",",
"global_dict",
")",
":",
"result",
"=",
"[",
"]",
"tokens",
".",
"append",
"(",
"(",
"None",
",",
"None",
")",
")",
"# so zip traverses all tokens",
"for",
"tok",
",",
"nextTok",
"in",
"zip",
"(",
"tokens",
",",
"tokens",
"[",
"1",
":",
"]",
")",
":",
"tokNum",
",",
"tokVal",
"=",
"tok",
"nextTokNum",
",",
"nextTokVal",
"=",
"nextTok",
"if",
"tokNum",
"==",
"token",
".",
"NAME",
":",
"name",
"=",
"tokVal",
"if",
"name",
"in",
"global_dict",
":",
"obj",
"=",
"global_dict",
"[",
"name",
"]",
"if",
"isinstance",
"(",
"obj",
",",
"(",
"Basic",
",",
"type",
")",
")",
"or",
"callable",
"(",
"obj",
")",
":",
"result",
".",
"append",
"(",
"(",
"token",
".",
"NAME",
",",
"name",
")",
")",
"continue",
"# try to resolve known alternative unit name",
"try",
":",
"used_name",
"=",
"inv_name_alternatives",
"[",
"str",
"(",
"name",
")",
"]",
"except",
"KeyError",
":",
"# if we don't know this name it's a user-defined unit name",
"# so we should create a new symbol for it",
"used_name",
"=",
"str",
"(",
"name",
")",
"result",
".",
"extend",
"(",
"[",
"(",
"token",
".",
"NAME",
",",
"\"Symbol\"",
")",
",",
"(",
"token",
".",
"OP",
",",
"\"(\"",
")",
",",
"(",
"token",
".",
"NAME",
",",
"repr",
"(",
"used_name",
")",
")",
",",
"(",
"token",
".",
"OP",
",",
"\",\"",
")",
",",
"(",
"token",
".",
"NAME",
",",
"\"positive\"",
")",
",",
"(",
"token",
".",
"OP",
",",
"\"=\"",
")",
",",
"(",
"token",
".",
"NAME",
",",
"\"True\"",
")",
",",
"(",
"token",
".",
"OP",
",",
"\")\"",
")",
",",
"]",
")",
"else",
":",
"result",
".",
"append",
"(",
"(",
"tokNum",
",",
"tokVal",
")",
")",
"return",
"result"
]
| Inserts calls to ``Symbol`` for undefined variables.
Passes in positive=True as a keyword argument.
Adapted from sympy.sympy.parsing.sympy_parser.auto_symbol | [
"Inserts",
"calls",
"to",
"Symbol",
"for",
"undefined",
"variables",
".",
"Passes",
"in",
"positive",
"=",
"True",
"as",
"a",
"keyword",
"argument",
".",
"Adapted",
"from",
"sympy",
".",
"sympy",
".",
"parsing",
".",
"sympy_parser",
".",
"auto_symbol"
]
| python | train | 34.295455 |
wdbm/datavision | datavision.py | https://github.com/wdbm/datavision/blob/b6f26287264632d6f8c9f8911aaf3a8e4fc4dcf5/datavision.py#L1206-L1225 | def normalize_to_range(
values,
minimum = 0.0,
maximum = 1.0
):
"""
This function normalizes values of a list to a specified range and returns
the original object if the values are not of the types integer or float.
"""
normalized_values = []
minimum_value = min(values)
maximum_value = max(values)
for value in values:
numerator = value - minimum_value
denominator = maximum_value - minimum_value
value_normalized = (maximum - minimum) * numerator/denominator + minimum
normalized_values.append(value_normalized)
return normalized_values | [
"def",
"normalize_to_range",
"(",
"values",
",",
"minimum",
"=",
"0.0",
",",
"maximum",
"=",
"1.0",
")",
":",
"normalized_values",
"=",
"[",
"]",
"minimum_value",
"=",
"min",
"(",
"values",
")",
"maximum_value",
"=",
"max",
"(",
"values",
")",
"for",
"value",
"in",
"values",
":",
"numerator",
"=",
"value",
"-",
"minimum_value",
"denominator",
"=",
"maximum_value",
"-",
"minimum_value",
"value_normalized",
"=",
"(",
"maximum",
"-",
"minimum",
")",
"*",
"numerator",
"/",
"denominator",
"+",
"minimum",
"normalized_values",
".",
"append",
"(",
"value_normalized",
")",
"return",
"normalized_values"
]
| This function normalizes values of a list to a specified range and returns
the original object if the values are not of the types integer or float. | [
"This",
"function",
"normalizes",
"values",
"of",
"a",
"list",
"to",
"a",
"specified",
"range",
"and",
"returns",
"the",
"original",
"object",
"if",
"the",
"values",
"are",
"not",
"of",
"the",
"types",
"integer",
"or",
"float",
"."
]
| python | train | 30.25 |
singularityhub/singularity-cli | spython/main/parse/docker.py | https://github.com/singularityhub/singularity-cli/blob/cb36b4504812ca87e29c6a40b222a545d1865799/spython/main/parse/docker.py#L428-L454 | def _parse(self):
'''parse is the base function for parsing the Dockerfile, and extracting
elements into the correct data structures. Everything is parsed into
lists or dictionaries that can be assembled again on demand.
Environment: Since Docker also exports environment as we go,
we add environment to the environment section and
install
Labels: include anything that is a LABEL, ARG, or (deprecated)
maintainer.
Add/Copy: are treated the same
'''
parser = None
previous = None
for line in self.lines:
parser = self._get_mapping(line, parser, previous)
# Parse it, if appropriate
if parser:
parser(line)
previous = line | [
"def",
"_parse",
"(",
"self",
")",
":",
"parser",
"=",
"None",
"previous",
"=",
"None",
"for",
"line",
"in",
"self",
".",
"lines",
":",
"parser",
"=",
"self",
".",
"_get_mapping",
"(",
"line",
",",
"parser",
",",
"previous",
")",
"# Parse it, if appropriate",
"if",
"parser",
":",
"parser",
"(",
"line",
")",
"previous",
"=",
"line"
]
| parse is the base function for parsing the Dockerfile, and extracting
elements into the correct data structures. Everything is parsed into
lists or dictionaries that can be assembled again on demand.
Environment: Since Docker also exports environment as we go,
we add environment to the environment section and
install
Labels: include anything that is a LABEL, ARG, or (deprecated)
maintainer.
Add/Copy: are treated the same | [
"parse",
"is",
"the",
"base",
"function",
"for",
"parsing",
"the",
"Dockerfile",
"and",
"extracting",
"elements",
"into",
"the",
"correct",
"data",
"structures",
".",
"Everything",
"is",
"parsed",
"into",
"lists",
"or",
"dictionaries",
"that",
"can",
"be",
"assembled",
"again",
"on",
"demand",
"."
]
| python | train | 30.814815 |
openai/baselines | baselines/common/mpi_util.py | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/mpi_util.py#L69-L85 | def share_file(comm, path):
"""
Copies the file from rank 0 to all other ranks
Puts it in the same place on all machines
"""
localrank, _ = get_local_rank_size(comm)
if comm.Get_rank() == 0:
with open(path, 'rb') as fh:
data = fh.read()
comm.bcast(data)
else:
data = comm.bcast(None)
if localrank == 0:
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, 'wb') as fh:
fh.write(data)
comm.Barrier() | [
"def",
"share_file",
"(",
"comm",
",",
"path",
")",
":",
"localrank",
",",
"_",
"=",
"get_local_rank_size",
"(",
"comm",
")",
"if",
"comm",
".",
"Get_rank",
"(",
")",
"==",
"0",
":",
"with",
"open",
"(",
"path",
",",
"'rb'",
")",
"as",
"fh",
":",
"data",
"=",
"fh",
".",
"read",
"(",
")",
"comm",
".",
"bcast",
"(",
"data",
")",
"else",
":",
"data",
"=",
"comm",
".",
"bcast",
"(",
"None",
")",
"if",
"localrank",
"==",
"0",
":",
"os",
".",
"makedirs",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
",",
"exist_ok",
"=",
"True",
")",
"with",
"open",
"(",
"path",
",",
"'wb'",
")",
"as",
"fh",
":",
"fh",
".",
"write",
"(",
"data",
")",
"comm",
".",
"Barrier",
"(",
")"
]
| Copies the file from rank 0 to all other ranks
Puts it in the same place on all machines | [
"Copies",
"the",
"file",
"from",
"rank",
"0",
"to",
"all",
"other",
"ranks",
"Puts",
"it",
"in",
"the",
"same",
"place",
"on",
"all",
"machines"
]
| python | valid | 30.058824 |
vijaykatam/django-cache-manager | django_cache_manager/models.py | https://github.com/vijaykatam/django-cache-manager/blob/05142c44eb349d3f24f962592945888d9d367375/django_cache_manager/models.py#L21-L26 | def update_model_cache(table_name):
"""
Updates model cache by generating a new key for the model
"""
model_cache_info = ModelCacheInfo(table_name, uuid.uuid4().hex)
model_cache_backend.share_model_cache_info(model_cache_info) | [
"def",
"update_model_cache",
"(",
"table_name",
")",
":",
"model_cache_info",
"=",
"ModelCacheInfo",
"(",
"table_name",
",",
"uuid",
".",
"uuid4",
"(",
")",
".",
"hex",
")",
"model_cache_backend",
".",
"share_model_cache_info",
"(",
"model_cache_info",
")"
]
| Updates model cache by generating a new key for the model | [
"Updates",
"model",
"cache",
"by",
"generating",
"a",
"new",
"key",
"for",
"the",
"model"
]
| python | train | 40.166667 |
kejbaly2/metrique | metrique/plotting.py | https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/plotting.py#L215-L244 | def plot(self, series, series_diff=None, label='', color=None, style=None):
'''
:param pandas.Series series:
The series to be plotted, all values must be positive if stacked
is True.
:param pandas.Series series_diff:
The series representing the diff that will be plotted in the
bottom part.
:param string label:
The label for the series.
:param integer/string color:
Color for the plot. Can be an index for the color from COLORS
or a key(string) from CNAMES.
:param string style:
Style forwarded to the plt.plot.
'''
color = self.get_color(color)
if series_diff is None and self.autodiffs:
series_diff = series.diff()
if self.stacked:
series += self.running_sum
self.ax1.fill_between(series.index, self.running_sum, series,
facecolor=ALPHAS[color])
self.running_sum = series
self.ax1.set_ylim(bottom=0, top=int(series.max() * 1.05))
series.plot(label=label, c=COLORS[color], linewidth=2, style=style,
ax=self.ax1)
if series_diff is not None:
series_diff.plot(label=label, c=COLORS[color], linewidth=2,
style=style, ax=self.ax2) | [
"def",
"plot",
"(",
"self",
",",
"series",
",",
"series_diff",
"=",
"None",
",",
"label",
"=",
"''",
",",
"color",
"=",
"None",
",",
"style",
"=",
"None",
")",
":",
"color",
"=",
"self",
".",
"get_color",
"(",
"color",
")",
"if",
"series_diff",
"is",
"None",
"and",
"self",
".",
"autodiffs",
":",
"series_diff",
"=",
"series",
".",
"diff",
"(",
")",
"if",
"self",
".",
"stacked",
":",
"series",
"+=",
"self",
".",
"running_sum",
"self",
".",
"ax1",
".",
"fill_between",
"(",
"series",
".",
"index",
",",
"self",
".",
"running_sum",
",",
"series",
",",
"facecolor",
"=",
"ALPHAS",
"[",
"color",
"]",
")",
"self",
".",
"running_sum",
"=",
"series",
"self",
".",
"ax1",
".",
"set_ylim",
"(",
"bottom",
"=",
"0",
",",
"top",
"=",
"int",
"(",
"series",
".",
"max",
"(",
")",
"*",
"1.05",
")",
")",
"series",
".",
"plot",
"(",
"label",
"=",
"label",
",",
"c",
"=",
"COLORS",
"[",
"color",
"]",
",",
"linewidth",
"=",
"2",
",",
"style",
"=",
"style",
",",
"ax",
"=",
"self",
".",
"ax1",
")",
"if",
"series_diff",
"is",
"not",
"None",
":",
"series_diff",
".",
"plot",
"(",
"label",
"=",
"label",
",",
"c",
"=",
"COLORS",
"[",
"color",
"]",
",",
"linewidth",
"=",
"2",
",",
"style",
"=",
"style",
",",
"ax",
"=",
"self",
".",
"ax2",
")"
]
| :param pandas.Series series:
The series to be plotted, all values must be positive if stacked
is True.
:param pandas.Series series_diff:
The series representing the diff that will be plotted in the
bottom part.
:param string label:
The label for the series.
:param integer/string color:
Color for the plot. Can be an index for the color from COLORS
or a key(string) from CNAMES.
:param string style:
Style forwarded to the plt.plot. | [
":",
"param",
"pandas",
".",
"Series",
"series",
":",
"The",
"series",
"to",
"be",
"plotted",
"all",
"values",
"must",
"be",
"positive",
"if",
"stacked",
"is",
"True",
".",
":",
"param",
"pandas",
".",
"Series",
"series_diff",
":",
"The",
"series",
"representing",
"the",
"diff",
"that",
"will",
"be",
"plotted",
"in",
"the",
"bottom",
"part",
".",
":",
"param",
"string",
"label",
":",
"The",
"label",
"for",
"the",
"series",
".",
":",
"param",
"integer",
"/",
"string",
"color",
":",
"Color",
"for",
"the",
"plot",
".",
"Can",
"be",
"an",
"index",
"for",
"the",
"color",
"from",
"COLORS",
"or",
"a",
"key",
"(",
"string",
")",
"from",
"CNAMES",
".",
":",
"param",
"string",
"style",
":",
"Style",
"forwarded",
"to",
"the",
"plt",
".",
"plot",
"."
]
| python | train | 44.833333 |
noahbenson/neuropythy | neuropythy/graphics/core.py | https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/graphics/core.py#L630-L638 | def apply_cmap(zs, cmap, vmin=None, vmax=None):
'''
apply_cmap(z, cmap) applies the given cmap to the values in z; if vmin and/or vmad are passed,
they are used to scale z.
'''
if vmin is None: vmin = np.min(zs)
if vmax is None: vmax = np.max(zs)
if pimms.is_str(cmap): cmap = matplotlib.cm.get_cmap(cmap)
return cmap((zs - vmin) / (vmax - vmin)) | [
"def",
"apply_cmap",
"(",
"zs",
",",
"cmap",
",",
"vmin",
"=",
"None",
",",
"vmax",
"=",
"None",
")",
":",
"if",
"vmin",
"is",
"None",
":",
"vmin",
"=",
"np",
".",
"min",
"(",
"zs",
")",
"if",
"vmax",
"is",
"None",
":",
"vmax",
"=",
"np",
".",
"max",
"(",
"zs",
")",
"if",
"pimms",
".",
"is_str",
"(",
"cmap",
")",
":",
"cmap",
"=",
"matplotlib",
".",
"cm",
".",
"get_cmap",
"(",
"cmap",
")",
"return",
"cmap",
"(",
"(",
"zs",
"-",
"vmin",
")",
"/",
"(",
"vmax",
"-",
"vmin",
")",
")"
]
| apply_cmap(z, cmap) applies the given cmap to the values in z; if vmin and/or vmad are passed,
they are used to scale z. | [
"apply_cmap",
"(",
"z",
"cmap",
")",
"applies",
"the",
"given",
"cmap",
"to",
"the",
"values",
"in",
"z",
";",
"if",
"vmin",
"and",
"/",
"or",
"vmad",
"are",
"passed",
"they",
"are",
"used",
"to",
"scale",
"z",
"."
]
| python | train | 41.333333 |
BerkeleyAutomation/autolab_core | autolab_core/logger.py | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/logger.py#L128-L148 | def add_log_file(logger, log_file, global_log_file=False):
"""
Add a log file to this logger. If global_log_file is true, log_file will be handed the root logger, otherwise it will only be used by this particular logger.
Parameters
----------
logger :obj:`logging.Logger`
The logger.
log_file :obj:`str`
The path to the log file to log to.
global_log_file :obj:`bool`
Whether or not to use the given log_file for this particular logger or for the root logger.
"""
if global_log_file:
add_root_log_file(log_file)
else:
hdlr = logging.FileHandler(log_file)
formatter = logging.Formatter('%(asctime)s %(name)-10s %(levelname)-8s %(message)s', datefmt='%m-%d %H:%M:%S')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr) | [
"def",
"add_log_file",
"(",
"logger",
",",
"log_file",
",",
"global_log_file",
"=",
"False",
")",
":",
"if",
"global_log_file",
":",
"add_root_log_file",
"(",
"log_file",
")",
"else",
":",
"hdlr",
"=",
"logging",
".",
"FileHandler",
"(",
"log_file",
")",
"formatter",
"=",
"logging",
".",
"Formatter",
"(",
"'%(asctime)s %(name)-10s %(levelname)-8s %(message)s'",
",",
"datefmt",
"=",
"'%m-%d %H:%M:%S'",
")",
"hdlr",
".",
"setFormatter",
"(",
"formatter",
")",
"logger",
".",
"addHandler",
"(",
"hdlr",
")"
]
| Add a log file to this logger. If global_log_file is true, log_file will be handed the root logger, otherwise it will only be used by this particular logger.
Parameters
----------
logger :obj:`logging.Logger`
The logger.
log_file :obj:`str`
The path to the log file to log to.
global_log_file :obj:`bool`
Whether or not to use the given log_file for this particular logger or for the root logger. | [
"Add",
"a",
"log",
"file",
"to",
"this",
"logger",
".",
"If",
"global_log_file",
"is",
"true",
"log_file",
"will",
"be",
"handed",
"the",
"root",
"logger",
"otherwise",
"it",
"will",
"only",
"be",
"used",
"by",
"this",
"particular",
"logger",
"."
]
| python | train | 41.714286 |
iopipe/iopipe-python | iopipe/agent.py | https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/agent.py#L222-L244 | def run_hooks(self, name, event=None, context=None):
"""
Runs plugin hooks for each registered plugin.
"""
hooks = {
"pre:setup": lambda p: p.pre_setup(self),
"post:setup": lambda p: p.post_setup(self),
"pre:invoke": lambda p: p.pre_invoke(event, context),
"post:invoke": lambda p: p.post_invoke(event, context),
"pre:report": lambda p: p.pre_report(self.report),
"post:report": lambda p: p.post_report(self.report),
}
if name in hooks:
for p in self.plugins:
if p.enabled:
try:
hooks[name](p)
except Exception as e:
logger.error(
"IOpipe plugin %s hook raised error" % (name, str(e))
)
logger.exception(e) | [
"def",
"run_hooks",
"(",
"self",
",",
"name",
",",
"event",
"=",
"None",
",",
"context",
"=",
"None",
")",
":",
"hooks",
"=",
"{",
"\"pre:setup\"",
":",
"lambda",
"p",
":",
"p",
".",
"pre_setup",
"(",
"self",
")",
",",
"\"post:setup\"",
":",
"lambda",
"p",
":",
"p",
".",
"post_setup",
"(",
"self",
")",
",",
"\"pre:invoke\"",
":",
"lambda",
"p",
":",
"p",
".",
"pre_invoke",
"(",
"event",
",",
"context",
")",
",",
"\"post:invoke\"",
":",
"lambda",
"p",
":",
"p",
".",
"post_invoke",
"(",
"event",
",",
"context",
")",
",",
"\"pre:report\"",
":",
"lambda",
"p",
":",
"p",
".",
"pre_report",
"(",
"self",
".",
"report",
")",
",",
"\"post:report\"",
":",
"lambda",
"p",
":",
"p",
".",
"post_report",
"(",
"self",
".",
"report",
")",
",",
"}",
"if",
"name",
"in",
"hooks",
":",
"for",
"p",
"in",
"self",
".",
"plugins",
":",
"if",
"p",
".",
"enabled",
":",
"try",
":",
"hooks",
"[",
"name",
"]",
"(",
"p",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"\"IOpipe plugin %s hook raised error\"",
"%",
"(",
"name",
",",
"str",
"(",
"e",
")",
")",
")",
"logger",
".",
"exception",
"(",
"e",
")"
]
| Runs plugin hooks for each registered plugin. | [
"Runs",
"plugin",
"hooks",
"for",
"each",
"registered",
"plugin",
"."
]
| python | train | 39 |
signetlabdei/sem | sem/database.py | https://github.com/signetlabdei/sem/blob/5077dd7a6d15644a18790bb6fde320e905f0fef0/sem/database.py#L180-L186 | def get_next_rngruns(self):
"""
Yield the next RngRun values that can be used in this campaign.
"""
available_runs = [result['params']['RngRun'] for result in
self.get_results()]
yield from DatabaseManager.get_next_values(available_runs) | [
"def",
"get_next_rngruns",
"(",
"self",
")",
":",
"available_runs",
"=",
"[",
"result",
"[",
"'params'",
"]",
"[",
"'RngRun'",
"]",
"for",
"result",
"in",
"self",
".",
"get_results",
"(",
")",
"]",
"yield",
"from",
"DatabaseManager",
".",
"get_next_values",
"(",
"available_runs",
")"
]
| Yield the next RngRun values that can be used in this campaign. | [
"Yield",
"the",
"next",
"RngRun",
"values",
"that",
"can",
"be",
"used",
"in",
"this",
"campaign",
"."
]
| python | train | 42.428571 |
StackStorm/pybind | pybind/slxos/v17s_1_02/overlay_gateway/map_/__init__.py | https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/overlay_gateway/map_/__init__.py#L162-L183 | def _set_vlan_and_bd(self, v, load=False):
"""
Setter method for vlan_and_bd, mapped from YANG variable /overlay_gateway/map/vlan_and_bd (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_vlan_and_bd is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vlan_and_bd() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=vlan_and_bd.vlan_and_bd, is_container='container', presence=False, yang_name="vlan-and-bd", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Specify VLAN/BD to VNI mappings for the Overlay Gateway.', u'cli-drop-node-name': None, u'callpoint': u'autoVlanToVNIMappingCallPoint', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """vlan_and_bd must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=vlan_and_bd.vlan_and_bd, is_container='container', presence=False, yang_name="vlan-and-bd", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Specify VLAN/BD to VNI mappings for the Overlay Gateway.', u'cli-drop-node-name': None, u'callpoint': u'autoVlanToVNIMappingCallPoint', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='container', is_config=True)""",
})
self.__vlan_and_bd = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_vlan_and_bd",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"vlan_and_bd",
".",
"vlan_and_bd",
",",
"is_container",
"=",
"'container'",
",",
"presence",
"=",
"False",
",",
"yang_name",
"=",
"\"vlan-and-bd\"",
",",
"rest_name",
"=",
"\"\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"True",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'info'",
":",
"u'Specify VLAN/BD to VNI mappings for the Overlay Gateway.'",
",",
"u'cli-drop-node-name'",
":",
"None",
",",
"u'callpoint'",
":",
"u'autoVlanToVNIMappingCallPoint'",
",",
"u'cli-incomplete-no'",
":",
"None",
",",
"u'cli-incomplete-command'",
":",
"None",
"}",
"}",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-tunnels'",
",",
"defining_module",
"=",
"'brocade-tunnels'",
",",
"yang_type",
"=",
"'container'",
",",
"is_config",
"=",
"True",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"vlan_and_bd must be of a type compatible with container\"\"\"",
",",
"'defined-type'",
":",
"\"container\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=vlan_and_bd.vlan_and_bd, is_container='container', presence=False, yang_name=\"vlan-and-bd\", rest_name=\"\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Specify VLAN/BD to VNI mappings for the Overlay Gateway.', u'cli-drop-node-name': None, u'callpoint': u'autoVlanToVNIMappingCallPoint', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='container', is_config=True)\"\"\"",
",",
"}",
")",
"self",
".",
"__vlan_and_bd",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
]
| Setter method for vlan_and_bd, mapped from YANG variable /overlay_gateway/map/vlan_and_bd (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_vlan_and_bd is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vlan_and_bd() directly. | [
"Setter",
"method",
"for",
"vlan_and_bd",
"mapped",
"from",
"YANG",
"variable",
"/",
"overlay_gateway",
"/",
"map",
"/",
"vlan_and_bd",
"(",
"container",
")",
"If",
"this",
"variable",
"is",
"read",
"-",
"only",
"(",
"config",
":",
"false",
")",
"in",
"the",
"source",
"YANG",
"file",
"then",
"_set_vlan_and_bd",
"is",
"considered",
"as",
"a",
"private",
"method",
".",
"Backends",
"looking",
"to",
"populate",
"this",
"variable",
"should",
"do",
"so",
"via",
"calling",
"thisObj",
".",
"_set_vlan_and_bd",
"()",
"directly",
"."
]
| python | train | 88.136364 |
radjkarl/fancyTools | fancytools/pystructure/GetCallablesInPackage.py | https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/pystructure/GetCallablesInPackage.py#L67-L76 | def _cleanRecursive(self, subSelf):
"""
Delete all NestedOrderedDict that haven't any entries.
"""
for key, item in list(subSelf.items()):
if self.isNestedDict(item):
if not item:
subSelf.pop(key)
else:
self._cleanRecursive(item) | [
"def",
"_cleanRecursive",
"(",
"self",
",",
"subSelf",
")",
":",
"for",
"key",
",",
"item",
"in",
"list",
"(",
"subSelf",
".",
"items",
"(",
")",
")",
":",
"if",
"self",
".",
"isNestedDict",
"(",
"item",
")",
":",
"if",
"not",
"item",
":",
"subSelf",
".",
"pop",
"(",
"key",
")",
"else",
":",
"self",
".",
"_cleanRecursive",
"(",
"item",
")"
]
| Delete all NestedOrderedDict that haven't any entries. | [
"Delete",
"all",
"NestedOrderedDict",
"that",
"haven",
"t",
"any",
"entries",
"."
]
| python | train | 33.6 |
bitprophet/ssh | ssh/channel.py | https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/channel.py#L242-L267 | def resize_pty(self, width=80, height=24):
"""
Resize the pseudo-terminal. This can be used to change the width and
height of the terminal emulation created in a previous L{get_pty} call.
@param width: new width (in characters) of the terminal screen
@type width: int
@param height: new height (in characters) of the terminal screen
@type height: int
@raise SSHException: if the request was rejected or the channel was
closed
"""
if self.closed or self.eof_received or self.eof_sent or not self.active:
raise SSHException('Channel is not open')
m = Message()
m.add_byte(chr(MSG_CHANNEL_REQUEST))
m.add_int(self.remote_chanid)
m.add_string('window-change')
m.add_boolean(True)
m.add_int(width)
m.add_int(height)
m.add_int(0).add_int(0)
self._event_pending()
self.transport._send_user_message(m)
self._wait_for_event() | [
"def",
"resize_pty",
"(",
"self",
",",
"width",
"=",
"80",
",",
"height",
"=",
"24",
")",
":",
"if",
"self",
".",
"closed",
"or",
"self",
".",
"eof_received",
"or",
"self",
".",
"eof_sent",
"or",
"not",
"self",
".",
"active",
":",
"raise",
"SSHException",
"(",
"'Channel is not open'",
")",
"m",
"=",
"Message",
"(",
")",
"m",
".",
"add_byte",
"(",
"chr",
"(",
"MSG_CHANNEL_REQUEST",
")",
")",
"m",
".",
"add_int",
"(",
"self",
".",
"remote_chanid",
")",
"m",
".",
"add_string",
"(",
"'window-change'",
")",
"m",
".",
"add_boolean",
"(",
"True",
")",
"m",
".",
"add_int",
"(",
"width",
")",
"m",
".",
"add_int",
"(",
"height",
")",
"m",
".",
"add_int",
"(",
"0",
")",
".",
"add_int",
"(",
"0",
")",
"self",
".",
"_event_pending",
"(",
")",
"self",
".",
"transport",
".",
"_send_user_message",
"(",
"m",
")",
"self",
".",
"_wait_for_event",
"(",
")"
]
| Resize the pseudo-terminal. This can be used to change the width and
height of the terminal emulation created in a previous L{get_pty} call.
@param width: new width (in characters) of the terminal screen
@type width: int
@param height: new height (in characters) of the terminal screen
@type height: int
@raise SSHException: if the request was rejected or the channel was
closed | [
"Resize",
"the",
"pseudo",
"-",
"terminal",
".",
"This",
"can",
"be",
"used",
"to",
"change",
"the",
"width",
"and",
"height",
"of",
"the",
"terminal",
"emulation",
"created",
"in",
"a",
"previous",
"L",
"{",
"get_pty",
"}",
"call",
"."
]
| python | train | 37.923077 |
QInfer/python-qinfer | src/qinfer/distributions.py | https://github.com/QInfer/python-qinfer/blob/8170c84a0be1723f8c6b09e0d3c7a40a886f1fe3/src/qinfer/distributions.py#L616-L641 | def region_est_hull(self, level=0.95, modelparam_slice=None):
"""
Estimates a credible region over models by taking the convex hull of
a credible subset of particles.
:param float level: The desired crediblity level (see
:meth:`SMCUpdater.est_credible_region`).
:param slice modelparam_slice: Slice over which model parameters
to consider.
:return: The tuple ``(faces, vertices)`` where ``faces`` describes all the
vertices of all of the faces on the exterior of the convex hull, and
``vertices`` is a list of all vertices on the exterior of the
convex hull.
:rtype: ``faces`` is a ``numpy.ndarray`` with shape
``(n_face, n_mps, n_mps)`` and indeces ``(idx_face, idx_vertex, idx_mps)``
where ``n_mps`` corresponds to the size of ``modelparam_slice``.
``vertices`` is an ``numpy.ndarray`` of shape ``(n_vertices, n_mps)``.
"""
points = self.est_credible_region(
level=level,
modelparam_slice=modelparam_slice
)
hull = ConvexHull(points)
return points[hull.simplices], points[u.uniquify(hull.vertices.flatten())] | [
"def",
"region_est_hull",
"(",
"self",
",",
"level",
"=",
"0.95",
",",
"modelparam_slice",
"=",
"None",
")",
":",
"points",
"=",
"self",
".",
"est_credible_region",
"(",
"level",
"=",
"level",
",",
"modelparam_slice",
"=",
"modelparam_slice",
")",
"hull",
"=",
"ConvexHull",
"(",
"points",
")",
"return",
"points",
"[",
"hull",
".",
"simplices",
"]",
",",
"points",
"[",
"u",
".",
"uniquify",
"(",
"hull",
".",
"vertices",
".",
"flatten",
"(",
")",
")",
"]"
]
| Estimates a credible region over models by taking the convex hull of
a credible subset of particles.
:param float level: The desired crediblity level (see
:meth:`SMCUpdater.est_credible_region`).
:param slice modelparam_slice: Slice over which model parameters
to consider.
:return: The tuple ``(faces, vertices)`` where ``faces`` describes all the
vertices of all of the faces on the exterior of the convex hull, and
``vertices`` is a list of all vertices on the exterior of the
convex hull.
:rtype: ``faces`` is a ``numpy.ndarray`` with shape
``(n_face, n_mps, n_mps)`` and indeces ``(idx_face, idx_vertex, idx_mps)``
where ``n_mps`` corresponds to the size of ``modelparam_slice``.
``vertices`` is an ``numpy.ndarray`` of shape ``(n_vertices, n_mps)``. | [
"Estimates",
"a",
"credible",
"region",
"over",
"models",
"by",
"taking",
"the",
"convex",
"hull",
"of",
"a",
"credible",
"subset",
"of",
"particles",
"."
]
| python | train | 46.346154 |
vburenin/xjpath | xjpath/xjpath.py | https://github.com/vburenin/xjpath/blob/98a19fd6e6d0bcdc5ecbd3651ffa8915f06d7d44/xjpath/xjpath.py#L247-L264 | def _split_path(xj_path):
"""Extract the last piece of XJPath.
:param str xj_path: A XJPath expression.
:rtype: tuple[str|None, str]
:return: A tuple where first element is a root XJPath and the second is
a last piece of key.
"""
res = xj_path.rsplit('.', 1)
root_key = res[0]
if len(res) > 1:
return root_key, res[1]
else:
if root_key and root_key != '.':
return None, root_key
else:
raise XJPathError('Path cannot be empty', (xj_path,)) | [
"def",
"_split_path",
"(",
"xj_path",
")",
":",
"res",
"=",
"xj_path",
".",
"rsplit",
"(",
"'.'",
",",
"1",
")",
"root_key",
"=",
"res",
"[",
"0",
"]",
"if",
"len",
"(",
"res",
")",
">",
"1",
":",
"return",
"root_key",
",",
"res",
"[",
"1",
"]",
"else",
":",
"if",
"root_key",
"and",
"root_key",
"!=",
"'.'",
":",
"return",
"None",
",",
"root_key",
"else",
":",
"raise",
"XJPathError",
"(",
"'Path cannot be empty'",
",",
"(",
"xj_path",
",",
")",
")"
]
| Extract the last piece of XJPath.
:param str xj_path: A XJPath expression.
:rtype: tuple[str|None, str]
:return: A tuple where first element is a root XJPath and the second is
a last piece of key. | [
"Extract",
"the",
"last",
"piece",
"of",
"XJPath",
"."
]
| python | train | 28.888889 |
SmokinCaterpillar/pypet | pypet/environment.py | https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/pypet/environment.py#L110-L114 | def _configure_pool(kwargs):
"""Configures the pool and keeps the storage service"""
_pool_single_run.storage_service = kwargs['storage_service']
_configure_niceness(kwargs)
_configure_logging(kwargs, extract=False) | [
"def",
"_configure_pool",
"(",
"kwargs",
")",
":",
"_pool_single_run",
".",
"storage_service",
"=",
"kwargs",
"[",
"'storage_service'",
"]",
"_configure_niceness",
"(",
"kwargs",
")",
"_configure_logging",
"(",
"kwargs",
",",
"extract",
"=",
"False",
")"
]
| Configures the pool and keeps the storage service | [
"Configures",
"the",
"pool",
"and",
"keeps",
"the",
"storage",
"service"
]
| python | test | 45.4 |
insightindustry/validator-collection | validator_collection/validators.py | https://github.com/insightindustry/validator-collection/blob/8c8047a0fa36cc88a021771279898278c4cc98e3/validator_collection/validators.py#L1543-L1594 | def _numeric_coercion(value,
coercion_function = None,
allow_empty = False,
minimum = None,
maximum = None):
"""Validate that ``value`` is numeric and coerce using ``coercion_function``.
:param value: The value to validate.
:param coercion_function: The function to use to coerce ``value`` to the desired
type.
:type coercion_function: callable
:param allow_empty: If ``True``, returns :obj:`None <python:None>` if
``value`` is :obj:`None <python:None>`. If ``False``, raises a
:class:`EmptyValueError <validator_collection.errors.EmptyValueError>` if
``value`` is :obj:`None <python:None>`. Defaults to ``False``.
:type allow_empty: :class:`bool <python:bool>`
:returns: ``value`` / :obj:`None <python:None>`
:rtype: the type returned by ``coercion_function``
:raises CoercionFunctionEmptyError: if ``coercion_function`` is empty
:raises EmptyValueError: if ``value`` is :obj:`None <python:None>` and
``allow_empty`` is ``False``
:raises CannotCoerceError: if ``coercion_function`` raises an
:class:`ValueError <python:ValueError>`, :class:`TypeError <python:TypeError>`,
:class:`AttributeError <python:AttributeError>`,
:class:`IndexError <python:IndexError>, or
:class:`SyntaxError <python:SyntaxError>`
"""
if coercion_function is None:
raise errors.CoercionFunctionEmptyError('coercion_function cannot be empty')
elif not hasattr(coercion_function, '__call__'):
raise errors.NotCallableError('coercion_function must be callable')
value = numeric(value, # pylint: disable=E1123
allow_empty = allow_empty,
minimum = minimum,
maximum = maximum,
force_run = True)
if value is not None:
try:
value = coercion_function(value)
except (ValueError, TypeError, AttributeError, IndexError, SyntaxError):
raise errors.CannotCoerceError(
'cannot coerce value (%s) to desired type' % value
)
return value | [
"def",
"_numeric_coercion",
"(",
"value",
",",
"coercion_function",
"=",
"None",
",",
"allow_empty",
"=",
"False",
",",
"minimum",
"=",
"None",
",",
"maximum",
"=",
"None",
")",
":",
"if",
"coercion_function",
"is",
"None",
":",
"raise",
"errors",
".",
"CoercionFunctionEmptyError",
"(",
"'coercion_function cannot be empty'",
")",
"elif",
"not",
"hasattr",
"(",
"coercion_function",
",",
"'__call__'",
")",
":",
"raise",
"errors",
".",
"NotCallableError",
"(",
"'coercion_function must be callable'",
")",
"value",
"=",
"numeric",
"(",
"value",
",",
"# pylint: disable=E1123",
"allow_empty",
"=",
"allow_empty",
",",
"minimum",
"=",
"minimum",
",",
"maximum",
"=",
"maximum",
",",
"force_run",
"=",
"True",
")",
"if",
"value",
"is",
"not",
"None",
":",
"try",
":",
"value",
"=",
"coercion_function",
"(",
"value",
")",
"except",
"(",
"ValueError",
",",
"TypeError",
",",
"AttributeError",
",",
"IndexError",
",",
"SyntaxError",
")",
":",
"raise",
"errors",
".",
"CannotCoerceError",
"(",
"'cannot coerce value (%s) to desired type'",
"%",
"value",
")",
"return",
"value"
]
| Validate that ``value`` is numeric and coerce using ``coercion_function``.
:param value: The value to validate.
:param coercion_function: The function to use to coerce ``value`` to the desired
type.
:type coercion_function: callable
:param allow_empty: If ``True``, returns :obj:`None <python:None>` if
``value`` is :obj:`None <python:None>`. If ``False``, raises a
:class:`EmptyValueError <validator_collection.errors.EmptyValueError>` if
``value`` is :obj:`None <python:None>`. Defaults to ``False``.
:type allow_empty: :class:`bool <python:bool>`
:returns: ``value`` / :obj:`None <python:None>`
:rtype: the type returned by ``coercion_function``
:raises CoercionFunctionEmptyError: if ``coercion_function`` is empty
:raises EmptyValueError: if ``value`` is :obj:`None <python:None>` and
``allow_empty`` is ``False``
:raises CannotCoerceError: if ``coercion_function`` raises an
:class:`ValueError <python:ValueError>`, :class:`TypeError <python:TypeError>`,
:class:`AttributeError <python:AttributeError>`,
:class:`IndexError <python:IndexError>, or
:class:`SyntaxError <python:SyntaxError>` | [
"Validate",
"that",
"value",
"is",
"numeric",
"and",
"coerce",
"using",
"coercion_function",
"."
]
| python | train | 41.942308 |
scour-project/scour | scour/scour.py | https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L857-L872 | def removeUnreferencedIDs(referencedIDs, identifiedElements):
"""
Removes the unreferenced ID attributes.
Returns the number of ID attributes removed
"""
global _num_ids_removed
keepTags = ['font']
num = 0
for id in identifiedElements:
node = identifiedElements[id]
if id not in referencedIDs and node.nodeName not in keepTags:
node.removeAttribute('id')
_num_ids_removed += 1
num += 1
return num | [
"def",
"removeUnreferencedIDs",
"(",
"referencedIDs",
",",
"identifiedElements",
")",
":",
"global",
"_num_ids_removed",
"keepTags",
"=",
"[",
"'font'",
"]",
"num",
"=",
"0",
"for",
"id",
"in",
"identifiedElements",
":",
"node",
"=",
"identifiedElements",
"[",
"id",
"]",
"if",
"id",
"not",
"in",
"referencedIDs",
"and",
"node",
".",
"nodeName",
"not",
"in",
"keepTags",
":",
"node",
".",
"removeAttribute",
"(",
"'id'",
")",
"_num_ids_removed",
"+=",
"1",
"num",
"+=",
"1",
"return",
"num"
]
| Removes the unreferenced ID attributes.
Returns the number of ID attributes removed | [
"Removes",
"the",
"unreferenced",
"ID",
"attributes",
"."
]
| python | train | 29.375 |
ARMmbed/icetea | icetea_lib/build/build.py | https://github.com/ARMmbed/icetea/blob/b2b97ac607429830cf7d62dae2e3903692c7c778/icetea_lib/build/build.py#L152-L161 | def _load(self):
"""
Function load.
:return: file contents
:raises: NotFoundError if file not found
"""
if self.is_exists():
return open(self._ref, "rb").read()
raise NotFoundError("File %s not found" % self._ref) | [
"def",
"_load",
"(",
"self",
")",
":",
"if",
"self",
".",
"is_exists",
"(",
")",
":",
"return",
"open",
"(",
"self",
".",
"_ref",
",",
"\"rb\"",
")",
".",
"read",
"(",
")",
"raise",
"NotFoundError",
"(",
"\"File %s not found\"",
"%",
"self",
".",
"_ref",
")"
]
| Function load.
:return: file contents
:raises: NotFoundError if file not found | [
"Function",
"load",
"."
]
| python | train | 27.3 |
acutesoftware/AIKIF | aikif/agents/explore/agent_explore_grid.py | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/agents/explore/agent_explore_grid.py#L43-L95 | def do_your_job(self):
"""
the goal of the explore agent is to move to the
target while avoiding blockages on the grid.
This function is messy and needs to be looked at.
It currently has a bug in that the backtrack oscillates
so need a new method of doing this - probably checking if
previously backtracked in that direction for those coords, ie
keep track of cells visited and number of times visited?
"""
y,x = self.get_intended_direction() # first find out where we should go
if self.target_x == self.current_x and self.target_y == self.current_y:
#print(self.name + " : TARGET ACQUIRED")
if len(self.results) == 0:
self.results.append("TARGET ACQUIRED")
self.lg_mv(2, self.name + ": TARGET ACQUIRED" )
return
self.num_steps += 1
# first try is to move on the x axis in a simple greedy search
accessible = ['\\', '-', '|', '/', '.']
# randomly move in Y direction instead of X if all paths clear
if y != 0 and x != 0 and self.backtrack == [0,0]:
if random.randint(1,10) > 6:
if self.grd.get_tile(self.current_y + y, self.current_x) in accessible:
self.current_y += y
self.lg_mv(3, self.name + ": randomly moving Y axis " + str(self.num_steps) )
return
if x == 1:
if self.grd.get_tile(self.current_y, self.current_x + 1) in accessible:
self.current_x += 1
self.lg_mv(3, self.name + ": move# " + str(self.num_steps) + " - moving West" )
return
elif x == -1:
if self.grd.get_tile(self.current_y, self.current_x - 1) in accessible:
self.current_x -= 1
self.lg_mv(3, self.name + ": move# " + str(self.num_steps) + " - moving East" )
return
elif y == 1:
if self.grd.get_tile(self.current_y + 1, self.current_x) in accessible:
self.current_y += 1
self.lg_mv(3, self.name + ": move# " + str(self.num_steps) + " - moving South" )
return
elif y == -1:
if self.grd.get_tile(self.current_y - 1, self.current_x) in accessible:
self.current_y -= 1
self.lg_mv(3, self.name + ": move# " + str(self.num_steps) + " - moving North")
return
self.grd.set_tile(self.start_y, self.start_x, 'A')
self.grd.save(os.path.join(os.getcwd(), 'agent.txt')) | [
"def",
"do_your_job",
"(",
"self",
")",
":",
"y",
",",
"x",
"=",
"self",
".",
"get_intended_direction",
"(",
")",
"# first find out where we should go",
"if",
"self",
".",
"target_x",
"==",
"self",
".",
"current_x",
"and",
"self",
".",
"target_y",
"==",
"self",
".",
"current_y",
":",
"#print(self.name + \" : TARGET ACQUIRED\")",
"if",
"len",
"(",
"self",
".",
"results",
")",
"==",
"0",
":",
"self",
".",
"results",
".",
"append",
"(",
"\"TARGET ACQUIRED\"",
")",
"self",
".",
"lg_mv",
"(",
"2",
",",
"self",
".",
"name",
"+",
"\": TARGET ACQUIRED\"",
")",
"return",
"self",
".",
"num_steps",
"+=",
"1",
"# first try is to move on the x axis in a simple greedy search",
"accessible",
"=",
"[",
"'\\\\'",
",",
"'-'",
",",
"'|'",
",",
"'/'",
",",
"'.'",
"]",
"# randomly move in Y direction instead of X if all paths clear",
"if",
"y",
"!=",
"0",
"and",
"x",
"!=",
"0",
"and",
"self",
".",
"backtrack",
"==",
"[",
"0",
",",
"0",
"]",
":",
"if",
"random",
".",
"randint",
"(",
"1",
",",
"10",
")",
">",
"6",
":",
"if",
"self",
".",
"grd",
".",
"get_tile",
"(",
"self",
".",
"current_y",
"+",
"y",
",",
"self",
".",
"current_x",
")",
"in",
"accessible",
":",
"self",
".",
"current_y",
"+=",
"y",
"self",
".",
"lg_mv",
"(",
"3",
",",
"self",
".",
"name",
"+",
"\": randomly moving Y axis \"",
"+",
"str",
"(",
"self",
".",
"num_steps",
")",
")",
"return",
"if",
"x",
"==",
"1",
":",
"if",
"self",
".",
"grd",
".",
"get_tile",
"(",
"self",
".",
"current_y",
",",
"self",
".",
"current_x",
"+",
"1",
")",
"in",
"accessible",
":",
"self",
".",
"current_x",
"+=",
"1",
"self",
".",
"lg_mv",
"(",
"3",
",",
"self",
".",
"name",
"+",
"\": move# \"",
"+",
"str",
"(",
"self",
".",
"num_steps",
")",
"+",
"\" - moving West\"",
")",
"return",
"elif",
"x",
"==",
"-",
"1",
":",
"if",
"self",
".",
"grd",
".",
"get_tile",
"(",
"self",
".",
"current_y",
",",
"self",
".",
"current_x",
"-",
"1",
")",
"in",
"accessible",
":",
"self",
".",
"current_x",
"-=",
"1",
"self",
".",
"lg_mv",
"(",
"3",
",",
"self",
".",
"name",
"+",
"\": move# \"",
"+",
"str",
"(",
"self",
".",
"num_steps",
")",
"+",
"\" - moving East\"",
")",
"return",
"elif",
"y",
"==",
"1",
":",
"if",
"self",
".",
"grd",
".",
"get_tile",
"(",
"self",
".",
"current_y",
"+",
"1",
",",
"self",
".",
"current_x",
")",
"in",
"accessible",
":",
"self",
".",
"current_y",
"+=",
"1",
"self",
".",
"lg_mv",
"(",
"3",
",",
"self",
".",
"name",
"+",
"\": move# \"",
"+",
"str",
"(",
"self",
".",
"num_steps",
")",
"+",
"\" - moving South\"",
")",
"return",
"elif",
"y",
"==",
"-",
"1",
":",
"if",
"self",
".",
"grd",
".",
"get_tile",
"(",
"self",
".",
"current_y",
"-",
"1",
",",
"self",
".",
"current_x",
")",
"in",
"accessible",
":",
"self",
".",
"current_y",
"-=",
"1",
"self",
".",
"lg_mv",
"(",
"3",
",",
"self",
".",
"name",
"+",
"\": move# \"",
"+",
"str",
"(",
"self",
".",
"num_steps",
")",
"+",
"\" - moving North\"",
")",
"return",
"self",
".",
"grd",
".",
"set_tile",
"(",
"self",
".",
"start_y",
",",
"self",
".",
"start_x",
",",
"'A'",
")",
"self",
".",
"grd",
".",
"save",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"'agent.txt'",
")",
")"
]
| the goal of the explore agent is to move to the
target while avoiding blockages on the grid.
This function is messy and needs to be looked at.
It currently has a bug in that the backtrack oscillates
so need a new method of doing this - probably checking if
previously backtracked in that direction for those coords, ie
keep track of cells visited and number of times visited? | [
"the",
"goal",
"of",
"the",
"explore",
"agent",
"is",
"to",
"move",
"to",
"the",
"target",
"while",
"avoiding",
"blockages",
"on",
"the",
"grid",
".",
"This",
"function",
"is",
"messy",
"and",
"needs",
"to",
"be",
"looked",
"at",
".",
"It",
"currently",
"has",
"a",
"bug",
"in",
"that",
"the",
"backtrack",
"oscillates",
"so",
"need",
"a",
"new",
"method",
"of",
"doing",
"this",
"-",
"probably",
"checking",
"if",
"previously",
"backtracked",
"in",
"that",
"direction",
"for",
"those",
"coords",
"ie",
"keep",
"track",
"of",
"cells",
"visited",
"and",
"number",
"of",
"times",
"visited?"
]
| python | train | 48.943396 |
tradenity/python-sdk | tradenity/resources/payment_card.py | https://github.com/tradenity/python-sdk/blob/d13fbe23f4d6ff22554c6d8d2deaf209371adaf1/tradenity/resources/payment_card.py#L558-L578 | def delete_payment_card_by_id(cls, payment_card_id, **kwargs):
"""Delete PaymentCard
Delete an instance of PaymentCard by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_payment_card_by_id(payment_card_id, async=True)
>>> result = thread.get()
:param async bool
:param str payment_card_id: ID of paymentCard to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_payment_card_by_id_with_http_info(payment_card_id, **kwargs)
else:
(data) = cls._delete_payment_card_by_id_with_http_info(payment_card_id, **kwargs)
return data | [
"def",
"delete_payment_card_by_id",
"(",
"cls",
",",
"payment_card_id",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async'",
")",
":",
"return",
"cls",
".",
"_delete_payment_card_by_id_with_http_info",
"(",
"payment_card_id",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"(",
"data",
")",
"=",
"cls",
".",
"_delete_payment_card_by_id_with_http_info",
"(",
"payment_card_id",
",",
"*",
"*",
"kwargs",
")",
"return",
"data"
]
| Delete PaymentCard
Delete an instance of PaymentCard by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_payment_card_by_id(payment_card_id, async=True)
>>> result = thread.get()
:param async bool
:param str payment_card_id: ID of paymentCard to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread. | [
"Delete",
"PaymentCard"
]
| python | train | 43.666667 |
Karaage-Cluster/karaage | karaage/datastores/mam.py | https://github.com/Karaage-Cluster/karaage/blob/2f4c8b4e2d728b3fcbb151160c49000f1c04f5c9/karaage/datastores/mam.py#L372-L375 | def set_account_username(self, account, old_username, new_username):
""" Account's username was changed. """
self._delete_account(account, old_username)
self._save_account(account, new_username) | [
"def",
"set_account_username",
"(",
"self",
",",
"account",
",",
"old_username",
",",
"new_username",
")",
":",
"self",
".",
"_delete_account",
"(",
"account",
",",
"old_username",
")",
"self",
".",
"_save_account",
"(",
"account",
",",
"new_username",
")"
]
| Account's username was changed. | [
"Account",
"s",
"username",
"was",
"changed",
"."
]
| python | train | 53.75 |
fracpete/python-weka-wrapper3 | python/weka/flow/sink.py | https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/flow/sink.py#L185-L202 | def fix_config(self, options):
"""
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict
"""
options = super(FileOutputSink, self).fix_config(options)
opt = "output"
if opt not in options:
options[opt] = "."
if opt not in self.help:
self.help[opt] = "The file to write to (string)."
return options | [
"def",
"fix_config",
"(",
"self",
",",
"options",
")",
":",
"options",
"=",
"super",
"(",
"FileOutputSink",
",",
"self",
")",
".",
"fix_config",
"(",
"options",
")",
"opt",
"=",
"\"output\"",
"if",
"opt",
"not",
"in",
"options",
":",
"options",
"[",
"opt",
"]",
"=",
"\".\"",
"if",
"opt",
"not",
"in",
"self",
".",
"help",
":",
"self",
".",
"help",
"[",
"opt",
"]",
"=",
"\"The file to write to (string).\"",
"return",
"options"
]
| Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict | [
"Fixes",
"the",
"options",
"if",
"necessary",
".",
"I",
".",
"e",
".",
"it",
"adds",
"all",
"required",
"elements",
"to",
"the",
"dictionary",
"."
]
| python | train | 30.333333 |
dlintott/gns3-converter | gns3converter/utils.py | https://github.com/dlintott/gns3-converter/blob/acbc55da51de86388dc5b5f6da55809b3c86b7ca/gns3converter/utils.py#L18-L31 | def fix_path(path):
"""
Fix windows path's. Linux path's will remain unaltered
:param str path: The path to be fixed
:return: The fixed path
:rtype: str
"""
if '\\' in path:
path = path.replace('\\', '/')
path = os.path.normpath(path)
return path | [
"def",
"fix_path",
"(",
"path",
")",
":",
"if",
"'\\\\'",
"in",
"path",
":",
"path",
"=",
"path",
".",
"replace",
"(",
"'\\\\'",
",",
"'/'",
")",
"path",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"path",
")",
"return",
"path"
]
| Fix windows path's. Linux path's will remain unaltered
:param str path: The path to be fixed
:return: The fixed path
:rtype: str | [
"Fix",
"windows",
"path",
"s",
".",
"Linux",
"path",
"s",
"will",
"remain",
"unaltered"
]
| python | train | 20 |
pyblish/pyblish-qml | pyblish_qml/control.py | https://github.com/pyblish/pyblish-qml/blob/6095d18b2ec0afd0409a9b1a17e53b0658887283/pyblish_qml/control.py#L376-L435 | def getPluginActions(self, index):
"""Return actions from plug-in at `index`
Arguments:
index (int): Index at which item is located in model
"""
index = self.data["proxies"]["plugin"].mapToSource(
self.data["proxies"]["plugin"].index(
index, 0, QtCore.QModelIndex())).row()
item = self.data["models"]["item"].items[index]
# Inject reference to the original index
actions = [
dict(action, **{"index": index})
for action in item.actions
]
# Context specific actions
for action in list(actions):
if action["on"] == "failed" and not item.hasError:
actions.remove(action)
if action["on"] == "succeeded" and not item.succeeded:
actions.remove(action)
if action["on"] == "processed" and not item.processed:
actions.remove(action)
if action["on"] == "notProcessed" and item.processed:
actions.remove(action)
# Discard empty categories, separators
remaining_actions = list()
index = 0
try:
action = actions[index]
except IndexError:
pass
else:
while action:
try:
action = actions[index]
except IndexError:
break
isempty = False
if action["__type__"] in ("category", "separator"):
try:
next_ = actions[index + 1]
if next_["__type__"] != "action":
isempty = True
except IndexError:
isempty = True
if not isempty:
remaining_actions.append(action)
index += 1
return remaining_actions | [
"def",
"getPluginActions",
"(",
"self",
",",
"index",
")",
":",
"index",
"=",
"self",
".",
"data",
"[",
"\"proxies\"",
"]",
"[",
"\"plugin\"",
"]",
".",
"mapToSource",
"(",
"self",
".",
"data",
"[",
"\"proxies\"",
"]",
"[",
"\"plugin\"",
"]",
".",
"index",
"(",
"index",
",",
"0",
",",
"QtCore",
".",
"QModelIndex",
"(",
")",
")",
")",
".",
"row",
"(",
")",
"item",
"=",
"self",
".",
"data",
"[",
"\"models\"",
"]",
"[",
"\"item\"",
"]",
".",
"items",
"[",
"index",
"]",
"# Inject reference to the original index",
"actions",
"=",
"[",
"dict",
"(",
"action",
",",
"*",
"*",
"{",
"\"index\"",
":",
"index",
"}",
")",
"for",
"action",
"in",
"item",
".",
"actions",
"]",
"# Context specific actions",
"for",
"action",
"in",
"list",
"(",
"actions",
")",
":",
"if",
"action",
"[",
"\"on\"",
"]",
"==",
"\"failed\"",
"and",
"not",
"item",
".",
"hasError",
":",
"actions",
".",
"remove",
"(",
"action",
")",
"if",
"action",
"[",
"\"on\"",
"]",
"==",
"\"succeeded\"",
"and",
"not",
"item",
".",
"succeeded",
":",
"actions",
".",
"remove",
"(",
"action",
")",
"if",
"action",
"[",
"\"on\"",
"]",
"==",
"\"processed\"",
"and",
"not",
"item",
".",
"processed",
":",
"actions",
".",
"remove",
"(",
"action",
")",
"if",
"action",
"[",
"\"on\"",
"]",
"==",
"\"notProcessed\"",
"and",
"item",
".",
"processed",
":",
"actions",
".",
"remove",
"(",
"action",
")",
"# Discard empty categories, separators",
"remaining_actions",
"=",
"list",
"(",
")",
"index",
"=",
"0",
"try",
":",
"action",
"=",
"actions",
"[",
"index",
"]",
"except",
"IndexError",
":",
"pass",
"else",
":",
"while",
"action",
":",
"try",
":",
"action",
"=",
"actions",
"[",
"index",
"]",
"except",
"IndexError",
":",
"break",
"isempty",
"=",
"False",
"if",
"action",
"[",
"\"__type__\"",
"]",
"in",
"(",
"\"category\"",
",",
"\"separator\"",
")",
":",
"try",
":",
"next_",
"=",
"actions",
"[",
"index",
"+",
"1",
"]",
"if",
"next_",
"[",
"\"__type__\"",
"]",
"!=",
"\"action\"",
":",
"isempty",
"=",
"True",
"except",
"IndexError",
":",
"isempty",
"=",
"True",
"if",
"not",
"isempty",
":",
"remaining_actions",
".",
"append",
"(",
"action",
")",
"index",
"+=",
"1",
"return",
"remaining_actions"
]
| Return actions from plug-in at `index`
Arguments:
index (int): Index at which item is located in model | [
"Return",
"actions",
"from",
"plug",
"-",
"in",
"at",
"index"
]
| python | train | 31.1 |
aestrivex/bctpy | bct/algorithms/clustering.py | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/clustering.py#L653-L671 | def transitivity_wu(W):
'''
Transitivity is the ratio of 'triangles to triplets' in the network.
(A classical version of the clustering coefficient).
Parameters
----------
W : NxN np.ndarray
weighted undirected connection matrix
Returns
-------
T : int
transitivity scalar
'''
K = np.sum(np.logical_not(W == 0), axis=1)
ws = cuberoot(W)
cyc3 = np.diag(np.dot(ws, np.dot(ws, ws)))
return np.sum(cyc3, axis=0) / np.sum(K * (K - 1), axis=0) | [
"def",
"transitivity_wu",
"(",
"W",
")",
":",
"K",
"=",
"np",
".",
"sum",
"(",
"np",
".",
"logical_not",
"(",
"W",
"==",
"0",
")",
",",
"axis",
"=",
"1",
")",
"ws",
"=",
"cuberoot",
"(",
"W",
")",
"cyc3",
"=",
"np",
".",
"diag",
"(",
"np",
".",
"dot",
"(",
"ws",
",",
"np",
".",
"dot",
"(",
"ws",
",",
"ws",
")",
")",
")",
"return",
"np",
".",
"sum",
"(",
"cyc3",
",",
"axis",
"=",
"0",
")",
"/",
"np",
".",
"sum",
"(",
"K",
"*",
"(",
"K",
"-",
"1",
")",
",",
"axis",
"=",
"0",
")"
]
| Transitivity is the ratio of 'triangles to triplets' in the network.
(A classical version of the clustering coefficient).
Parameters
----------
W : NxN np.ndarray
weighted undirected connection matrix
Returns
-------
T : int
transitivity scalar | [
"Transitivity",
"is",
"the",
"ratio",
"of",
"triangles",
"to",
"triplets",
"in",
"the",
"network",
".",
"(",
"A",
"classical",
"version",
"of",
"the",
"clustering",
"coefficient",
")",
"."
]
| python | train | 25.947368 |
csparpa/pyowm | pyowm/weatherapi25/historian.py | https://github.com/csparpa/pyowm/blob/cdd59eb72f32f7238624ceef9b2e2329a5ebd472/pyowm/weatherapi25/historian.py#L69-L78 | def pressure_series(self):
"""Returns the atmospheric pressure time series relative to the
meteostation, in the form of a list of tuples, each one containing the
couple timestamp-value
:returns: a list of tuples
"""
return [(tstamp, \
self._station_history.get_measurements()[tstamp]['pressure']) \
for tstamp in self._station_history.get_measurements()] | [
"def",
"pressure_series",
"(",
"self",
")",
":",
"return",
"[",
"(",
"tstamp",
",",
"self",
".",
"_station_history",
".",
"get_measurements",
"(",
")",
"[",
"tstamp",
"]",
"[",
"'pressure'",
"]",
")",
"for",
"tstamp",
"in",
"self",
".",
"_station_history",
".",
"get_measurements",
"(",
")",
"]"
]
| Returns the atmospheric pressure time series relative to the
meteostation, in the form of a list of tuples, each one containing the
couple timestamp-value
:returns: a list of tuples | [
"Returns",
"the",
"atmospheric",
"pressure",
"time",
"series",
"relative",
"to",
"the",
"meteostation",
"in",
"the",
"form",
"of",
"a",
"list",
"of",
"tuples",
"each",
"one",
"containing",
"the",
"couple",
"timestamp",
"-",
"value"
]
| python | train | 42.6 |
aparo/pyes | pyes/es.py | https://github.com/aparo/pyes/blob/712eb6095961755067b2b5baa262008ade6584b3/pyes/es.py#L127-L146 | def string_to_datetime(self, obj):
"""
Decode a datetime string to a datetime object
"""
if isinstance(obj, six.string_types) and len(obj) == 19:
try:
return datetime.strptime(obj, "%Y-%m-%dT%H:%M:%S")
except ValueError:
pass
if isinstance(obj, six.string_types) and len(obj) > 19:
try:
return datetime.strptime(obj, "%Y-%m-%dT%H:%M:%S.%f")
except ValueError:
pass
if isinstance(obj, six.string_types) and len(obj) == 10:
try:
return datetime.strptime(obj, "%Y-%m-%d")
except ValueError:
pass
return obj | [
"def",
"string_to_datetime",
"(",
"self",
",",
"obj",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"six",
".",
"string_types",
")",
"and",
"len",
"(",
"obj",
")",
"==",
"19",
":",
"try",
":",
"return",
"datetime",
".",
"strptime",
"(",
"obj",
",",
"\"%Y-%m-%dT%H:%M:%S\"",
")",
"except",
"ValueError",
":",
"pass",
"if",
"isinstance",
"(",
"obj",
",",
"six",
".",
"string_types",
")",
"and",
"len",
"(",
"obj",
")",
">",
"19",
":",
"try",
":",
"return",
"datetime",
".",
"strptime",
"(",
"obj",
",",
"\"%Y-%m-%dT%H:%M:%S.%f\"",
")",
"except",
"ValueError",
":",
"pass",
"if",
"isinstance",
"(",
"obj",
",",
"six",
".",
"string_types",
")",
"and",
"len",
"(",
"obj",
")",
"==",
"10",
":",
"try",
":",
"return",
"datetime",
".",
"strptime",
"(",
"obj",
",",
"\"%Y-%m-%d\"",
")",
"except",
"ValueError",
":",
"pass",
"return",
"obj"
]
| Decode a datetime string to a datetime object | [
"Decode",
"a",
"datetime",
"string",
"to",
"a",
"datetime",
"object"
]
| python | train | 35.4 |
Fantomas42/django-blog-zinnia | zinnia/search.py | https://github.com/Fantomas42/django-blog-zinnia/blob/b4949304b104a8e1a7a7a0773cbfd024313c3a15/zinnia/search.py#L90-L113 | def union_q(token):
"""
Appends all the Q() objects.
"""
query = Q()
operation = 'and'
negation = False
for t in token:
if type(t) is ParseResults: # See tokens recursively
query &= union_q(t)
else:
if t in ('or', 'and'): # Set the new op and go to next token
operation = t
elif t == '-': # Next tokens needs to be negated
negation = True
else: # Append to query the token
if negation:
t = ~t
if operation == 'or':
query |= t
else:
query &= t
return query | [
"def",
"union_q",
"(",
"token",
")",
":",
"query",
"=",
"Q",
"(",
")",
"operation",
"=",
"'and'",
"negation",
"=",
"False",
"for",
"t",
"in",
"token",
":",
"if",
"type",
"(",
"t",
")",
"is",
"ParseResults",
":",
"# See tokens recursively",
"query",
"&=",
"union_q",
"(",
"t",
")",
"else",
":",
"if",
"t",
"in",
"(",
"'or'",
",",
"'and'",
")",
":",
"# Set the new op and go to next token",
"operation",
"=",
"t",
"elif",
"t",
"==",
"'-'",
":",
"# Next tokens needs to be negated",
"negation",
"=",
"True",
"else",
":",
"# Append to query the token",
"if",
"negation",
":",
"t",
"=",
"~",
"t",
"if",
"operation",
"==",
"'or'",
":",
"query",
"|=",
"t",
"else",
":",
"query",
"&=",
"t",
"return",
"query"
]
| Appends all the Q() objects. | [
"Appends",
"all",
"the",
"Q",
"()",
"objects",
"."
]
| python | train | 28.041667 |
GeorgeArgyros/sfalearn | sfalearn/angluin_sfa.py | https://github.com/GeorgeArgyros/sfalearn/blob/68a93f507e2fb7d89ca04bd8a8f0da2d6c680443/sfalearn/angluin_sfa.py#L50-L75 | def is_closed(self):
"""
_check if the observation table is closed.
Args:
None
Returns:
tuple (bool, str): True if the observation table is closed and false otherwise.
If the table is not closed the escaping string is returned.
"""
old_training_data = self.training_data
self.training_data = {x: [] for x in self.sm_vector}
for t in self.smi_vector:
src_state = t[:-1]
symbol = t[-1:]
found = False
for dst_state in self.sm_vector:
if self.observation_table[dst_state] == self.observation_table[t]:
self._add_training_data(src_state, dst_state, symbol)
found = True
break
if not found:
return False, t
assert self.training_data != old_training_data, \
"No update happened from previous round. The algo will loop infinetely"
return True, None | [
"def",
"is_closed",
"(",
"self",
")",
":",
"old_training_data",
"=",
"self",
".",
"training_data",
"self",
".",
"training_data",
"=",
"{",
"x",
":",
"[",
"]",
"for",
"x",
"in",
"self",
".",
"sm_vector",
"}",
"for",
"t",
"in",
"self",
".",
"smi_vector",
":",
"src_state",
"=",
"t",
"[",
":",
"-",
"1",
"]",
"symbol",
"=",
"t",
"[",
"-",
"1",
":",
"]",
"found",
"=",
"False",
"for",
"dst_state",
"in",
"self",
".",
"sm_vector",
":",
"if",
"self",
".",
"observation_table",
"[",
"dst_state",
"]",
"==",
"self",
".",
"observation_table",
"[",
"t",
"]",
":",
"self",
".",
"_add_training_data",
"(",
"src_state",
",",
"dst_state",
",",
"symbol",
")",
"found",
"=",
"True",
"break",
"if",
"not",
"found",
":",
"return",
"False",
",",
"t",
"assert",
"self",
".",
"training_data",
"!=",
"old_training_data",
",",
"\"No update happened from previous round. The algo will loop infinetely\"",
"return",
"True",
",",
"None"
]
| _check if the observation table is closed.
Args:
None
Returns:
tuple (bool, str): True if the observation table is closed and false otherwise.
If the table is not closed the escaping string is returned. | [
"_check",
"if",
"the",
"observation",
"table",
"is",
"closed",
".",
"Args",
":",
"None",
"Returns",
":",
"tuple",
"(",
"bool",
"str",
")",
":",
"True",
"if",
"the",
"observation",
"table",
"is",
"closed",
"and",
"false",
"otherwise",
".",
"If",
"the",
"table",
"is",
"not",
"closed",
"the",
"escaping",
"string",
"is",
"returned",
"."
]
| python | train | 39.076923 |
ubc/ubcpi | ubcpi/persistence.py | https://github.com/ubc/ubcpi/blob/7b6de03f93f3a4a8af4b92dfde7c69eeaf21f46e/ubcpi/persistence.py#L45-L60 | def add_answer_for_student(student_item, vote, rationale):
"""
Add an answer for a student to the backend
Args:
student_item (dict): The location of the problem this submission is
associated with, as defined by a course, student, and item.
vote (int): the option that student voted for
rationale (str): the reason why the student vote for the option
"""
answers = get_answers_for_student(student_item)
answers.add_answer(vote, rationale)
sub_api.create_submission(student_item, {
ANSWER_LIST_KEY: answers.get_answers_as_list()
}) | [
"def",
"add_answer_for_student",
"(",
"student_item",
",",
"vote",
",",
"rationale",
")",
":",
"answers",
"=",
"get_answers_for_student",
"(",
"student_item",
")",
"answers",
".",
"add_answer",
"(",
"vote",
",",
"rationale",
")",
"sub_api",
".",
"create_submission",
"(",
"student_item",
",",
"{",
"ANSWER_LIST_KEY",
":",
"answers",
".",
"get_answers_as_list",
"(",
")",
"}",
")"
]
| Add an answer for a student to the backend
Args:
student_item (dict): The location of the problem this submission is
associated with, as defined by a course, student, and item.
vote (int): the option that student voted for
rationale (str): the reason why the student vote for the option | [
"Add",
"an",
"answer",
"for",
"a",
"student",
"to",
"the",
"backend"
]
| python | train | 37 |
chaoss/grimoirelab-sortinghat | sortinghat/db/database.py | https://github.com/chaoss/grimoirelab-sortinghat/blob/391cd37a75fea26311dc6908bc1c953c540a8e04/sortinghat/db/database.py#L152-L164 | def handle_flush_error(cls, exception):
"""Handle flush error exceptions."""
trace = exception.args[0]
m = re.match(cls.MYSQL_FLUSH_ERROR_REGEX, trace)
if not m:
raise exception
entity = m.group('entity')
eid = m.group('eid')
raise AlreadyExistsError(entity=entity, eid=eid) | [
"def",
"handle_flush_error",
"(",
"cls",
",",
"exception",
")",
":",
"trace",
"=",
"exception",
".",
"args",
"[",
"0",
"]",
"m",
"=",
"re",
".",
"match",
"(",
"cls",
".",
"MYSQL_FLUSH_ERROR_REGEX",
",",
"trace",
")",
"if",
"not",
"m",
":",
"raise",
"exception",
"entity",
"=",
"m",
".",
"group",
"(",
"'entity'",
")",
"eid",
"=",
"m",
".",
"group",
"(",
"'eid'",
")",
"raise",
"AlreadyExistsError",
"(",
"entity",
"=",
"entity",
",",
"eid",
"=",
"eid",
")"
]
| Handle flush error exceptions. | [
"Handle",
"flush",
"error",
"exceptions",
"."
]
| python | train | 25.692308 |
DataONEorg/d1_python | client_onedrive/src/d1_onedrive/impl/resolver/time_period.py | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/client_onedrive/src/d1_onedrive/impl/resolver/time_period.py#L160-L167 | def _decade_ranges_in_date_range(self, begin_date, end_date):
"""Return a list of decades which is covered by date range."""
begin_dated = begin_date.year / 10
end_dated = end_date.year / 10
decades = []
for d in range(begin_dated, end_dated + 1):
decades.append('{}-{}'.format(d * 10, d * 10 + 9))
return decades | [
"def",
"_decade_ranges_in_date_range",
"(",
"self",
",",
"begin_date",
",",
"end_date",
")",
":",
"begin_dated",
"=",
"begin_date",
".",
"year",
"/",
"10",
"end_dated",
"=",
"end_date",
".",
"year",
"/",
"10",
"decades",
"=",
"[",
"]",
"for",
"d",
"in",
"range",
"(",
"begin_dated",
",",
"end_dated",
"+",
"1",
")",
":",
"decades",
".",
"append",
"(",
"'{}-{}'",
".",
"format",
"(",
"d",
"*",
"10",
",",
"d",
"*",
"10",
"+",
"9",
")",
")",
"return",
"decades"
]
| Return a list of decades which is covered by date range. | [
"Return",
"a",
"list",
"of",
"decades",
"which",
"is",
"covered",
"by",
"date",
"range",
"."
]
| python | train | 45.75 |
rodluger/everest | everest/detrender.py | https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/detrender.py#L327-L373 | def cv_precompute(self, mask, b):
'''
Pre-compute the matrices :py:obj:`A` and :py:obj:`B`
(cross-validation step only)
for chunk :py:obj:`b`.
'''
# Get current chunk and mask outliers
m1 = self.get_masked_chunk(b)
flux = self.fraw[m1]
K = GetCovariance(self.kernel, self.kernel_params,
self.time[m1], self.fraw_err[m1])
med = np.nanmedian(flux)
# Now mask the validation set
M = lambda x, axis = 0: np.delete(x, mask, axis=axis)
m2 = M(m1)
mK = M(M(K, axis=0), axis=1)
f = M(flux) - med
# Pre-compute the matrices
A = [None for i in range(self.pld_order)]
B = [None for i in range(self.pld_order)]
for n in range(self.pld_order):
# Only compute up to the current PLD order
if self.lam_idx >= n:
X2 = self.X(n, m2)
X1 = self.X(n, m1)
A[n] = np.dot(X2, X2.T)
B[n] = np.dot(X1, X2.T)
del X1, X2
if self.transit_model is None:
C = 0
else:
C = np.zeros((len(m2), len(m2)))
mean_transit_model = med * \
np.sum([tm.depth * tm(self.time[m2])
for tm in self.transit_model], axis=0)
f -= mean_transit_model
for tm in self.transit_model:
X2 = tm(self.time[m2]).reshape(-1, 1)
C += tm.var_depth * np.dot(X2, X2.T)
del X2
return A, B, C, mK, f, m1, m2 | [
"def",
"cv_precompute",
"(",
"self",
",",
"mask",
",",
"b",
")",
":",
"# Get current chunk and mask outliers",
"m1",
"=",
"self",
".",
"get_masked_chunk",
"(",
"b",
")",
"flux",
"=",
"self",
".",
"fraw",
"[",
"m1",
"]",
"K",
"=",
"GetCovariance",
"(",
"self",
".",
"kernel",
",",
"self",
".",
"kernel_params",
",",
"self",
".",
"time",
"[",
"m1",
"]",
",",
"self",
".",
"fraw_err",
"[",
"m1",
"]",
")",
"med",
"=",
"np",
".",
"nanmedian",
"(",
"flux",
")",
"# Now mask the validation set",
"M",
"=",
"lambda",
"x",
",",
"axis",
"=",
"0",
":",
"np",
".",
"delete",
"(",
"x",
",",
"mask",
",",
"axis",
"=",
"axis",
")",
"m2",
"=",
"M",
"(",
"m1",
")",
"mK",
"=",
"M",
"(",
"M",
"(",
"K",
",",
"axis",
"=",
"0",
")",
",",
"axis",
"=",
"1",
")",
"f",
"=",
"M",
"(",
"flux",
")",
"-",
"med",
"# Pre-compute the matrices",
"A",
"=",
"[",
"None",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"pld_order",
")",
"]",
"B",
"=",
"[",
"None",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"pld_order",
")",
"]",
"for",
"n",
"in",
"range",
"(",
"self",
".",
"pld_order",
")",
":",
"# Only compute up to the current PLD order",
"if",
"self",
".",
"lam_idx",
">=",
"n",
":",
"X2",
"=",
"self",
".",
"X",
"(",
"n",
",",
"m2",
")",
"X1",
"=",
"self",
".",
"X",
"(",
"n",
",",
"m1",
")",
"A",
"[",
"n",
"]",
"=",
"np",
".",
"dot",
"(",
"X2",
",",
"X2",
".",
"T",
")",
"B",
"[",
"n",
"]",
"=",
"np",
".",
"dot",
"(",
"X1",
",",
"X2",
".",
"T",
")",
"del",
"X1",
",",
"X2",
"if",
"self",
".",
"transit_model",
"is",
"None",
":",
"C",
"=",
"0",
"else",
":",
"C",
"=",
"np",
".",
"zeros",
"(",
"(",
"len",
"(",
"m2",
")",
",",
"len",
"(",
"m2",
")",
")",
")",
"mean_transit_model",
"=",
"med",
"*",
"np",
".",
"sum",
"(",
"[",
"tm",
".",
"depth",
"*",
"tm",
"(",
"self",
".",
"time",
"[",
"m2",
"]",
")",
"for",
"tm",
"in",
"self",
".",
"transit_model",
"]",
",",
"axis",
"=",
"0",
")",
"f",
"-=",
"mean_transit_model",
"for",
"tm",
"in",
"self",
".",
"transit_model",
":",
"X2",
"=",
"tm",
"(",
"self",
".",
"time",
"[",
"m2",
"]",
")",
".",
"reshape",
"(",
"-",
"1",
",",
"1",
")",
"C",
"+=",
"tm",
".",
"var_depth",
"*",
"np",
".",
"dot",
"(",
"X2",
",",
"X2",
".",
"T",
")",
"del",
"X2",
"return",
"A",
",",
"B",
",",
"C",
",",
"mK",
",",
"f",
",",
"m1",
",",
"m2"
]
| Pre-compute the matrices :py:obj:`A` and :py:obj:`B`
(cross-validation step only)
for chunk :py:obj:`b`. | [
"Pre",
"-",
"compute",
"the",
"matrices",
":",
"py",
":",
"obj",
":",
"A",
"and",
":",
"py",
":",
"obj",
":",
"B",
"(",
"cross",
"-",
"validation",
"step",
"only",
")",
"for",
"chunk",
":",
"py",
":",
"obj",
":",
"b",
"."
]
| python | train | 33.042553 |
aiidateam/aiida-nwchem | aiida_nwchem/parsers/nwcpymatgen.py | https://github.com/aiidateam/aiida-nwchem/blob/21034e7f8ea8249948065c28030f4b572a6ecf05/aiida_nwchem/parsers/nwcpymatgen.py#L32-L65 | def _get_output_nodes(self, output_path, error_path):
"""
Extracts output nodes from the standard output and standard error
files.
"""
from pymatgen.io.nwchem import NwOutput
from aiida.orm.data.structure import StructureData
from aiida.orm.data.array.trajectory import TrajectoryData
ret_dict = []
nwo = NwOutput(output_path)
for out in nwo.data:
molecules = out.pop('molecules', None)
structures = out.pop('structures', None)
if molecules:
structlist = [StructureData(pymatgen_molecule=m)
for m in molecules]
ret_dict.append(('trajectory',
TrajectoryData(structurelist=structlist)))
if structures:
structlist = [StructureData(pymatgen_structure=s)
for s in structures]
ret_dict.append(('trajectory',
TrajectoryData(structurelist=structlist)))
ret_dict.append(('output', ParameterData(dict=out)))
# Since ParameterData rewrites it's properties (using _set_attr())
# with keys from the supplied dictionary, ``source`` has to be
# moved to another key. See issue #9 for details:
# (https://bitbucket.org/epfl_theos/aiida_epfl/issues/9)
nwo.job_info['program_source'] = nwo.job_info.pop('source', None)
ret_dict.append(('job_info', ParameterData(dict=nwo.job_info)))
return ret_dict | [
"def",
"_get_output_nodes",
"(",
"self",
",",
"output_path",
",",
"error_path",
")",
":",
"from",
"pymatgen",
".",
"io",
".",
"nwchem",
"import",
"NwOutput",
"from",
"aiida",
".",
"orm",
".",
"data",
".",
"structure",
"import",
"StructureData",
"from",
"aiida",
".",
"orm",
".",
"data",
".",
"array",
".",
"trajectory",
"import",
"TrajectoryData",
"ret_dict",
"=",
"[",
"]",
"nwo",
"=",
"NwOutput",
"(",
"output_path",
")",
"for",
"out",
"in",
"nwo",
".",
"data",
":",
"molecules",
"=",
"out",
".",
"pop",
"(",
"'molecules'",
",",
"None",
")",
"structures",
"=",
"out",
".",
"pop",
"(",
"'structures'",
",",
"None",
")",
"if",
"molecules",
":",
"structlist",
"=",
"[",
"StructureData",
"(",
"pymatgen_molecule",
"=",
"m",
")",
"for",
"m",
"in",
"molecules",
"]",
"ret_dict",
".",
"append",
"(",
"(",
"'trajectory'",
",",
"TrajectoryData",
"(",
"structurelist",
"=",
"structlist",
")",
")",
")",
"if",
"structures",
":",
"structlist",
"=",
"[",
"StructureData",
"(",
"pymatgen_structure",
"=",
"s",
")",
"for",
"s",
"in",
"structures",
"]",
"ret_dict",
".",
"append",
"(",
"(",
"'trajectory'",
",",
"TrajectoryData",
"(",
"structurelist",
"=",
"structlist",
")",
")",
")",
"ret_dict",
".",
"append",
"(",
"(",
"'output'",
",",
"ParameterData",
"(",
"dict",
"=",
"out",
")",
")",
")",
"# Since ParameterData rewrites it's properties (using _set_attr())",
"# with keys from the supplied dictionary, ``source`` has to be",
"# moved to another key. See issue #9 for details:",
"# (https://bitbucket.org/epfl_theos/aiida_epfl/issues/9)",
"nwo",
".",
"job_info",
"[",
"'program_source'",
"]",
"=",
"nwo",
".",
"job_info",
".",
"pop",
"(",
"'source'",
",",
"None",
")",
"ret_dict",
".",
"append",
"(",
"(",
"'job_info'",
",",
"ParameterData",
"(",
"dict",
"=",
"nwo",
".",
"job_info",
")",
")",
")",
"return",
"ret_dict"
]
| Extracts output nodes from the standard output and standard error
files. | [
"Extracts",
"output",
"nodes",
"from",
"the",
"standard",
"output",
"and",
"standard",
"error",
"files",
"."
]
| python | train | 45.411765 |
aleju/imgaug | imgaug/parameters.py | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/parameters.py#L1957-L1992 | def Negative(other_param, mode="invert", reroll_count_max=2):
"""
Converts another parameter's results to negative values.
Parameters
----------
other_param : imgaug.parameters.StochasticParameter
Other parameter which's sampled values are to be
modified.
mode : {'invert', 'reroll'}, optional
How to change the signs. Valid values are ``invert`` and ``reroll``.
``invert`` means that wrong signs are simply flipped.
``reroll`` means that all samples with wrong signs are sampled again,
optionally many times, until they randomly end up having the correct
sign.
reroll_count_max : int, optional
If `mode` is set to ``reroll``, this determines how often values may
be rerolled before giving up and simply flipping the sign (as in
``mode="invert"``). This shouldn't be set too high, as rerolling is
expensive.
Examples
--------
>>> param = Negative(Normal(0, 1), mode="reroll")
Generates a normal distribution that has only negative values.
"""
return ForceSign(
other_param=other_param,
positive=False,
mode=mode,
reroll_count_max=reroll_count_max
) | [
"def",
"Negative",
"(",
"other_param",
",",
"mode",
"=",
"\"invert\"",
",",
"reroll_count_max",
"=",
"2",
")",
":",
"return",
"ForceSign",
"(",
"other_param",
"=",
"other_param",
",",
"positive",
"=",
"False",
",",
"mode",
"=",
"mode",
",",
"reroll_count_max",
"=",
"reroll_count_max",
")"
]
| Converts another parameter's results to negative values.
Parameters
----------
other_param : imgaug.parameters.StochasticParameter
Other parameter which's sampled values are to be
modified.
mode : {'invert', 'reroll'}, optional
How to change the signs. Valid values are ``invert`` and ``reroll``.
``invert`` means that wrong signs are simply flipped.
``reroll`` means that all samples with wrong signs are sampled again,
optionally many times, until they randomly end up having the correct
sign.
reroll_count_max : int, optional
If `mode` is set to ``reroll``, this determines how often values may
be rerolled before giving up and simply flipping the sign (as in
``mode="invert"``). This shouldn't be set too high, as rerolling is
expensive.
Examples
--------
>>> param = Negative(Normal(0, 1), mode="reroll")
Generates a normal distribution that has only negative values. | [
"Converts",
"another",
"parameter",
"s",
"results",
"to",
"negative",
"values",
"."
]
| python | valid | 33.194444 |
tcalmant/ipopo | pelix/utilities.py | https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/utilities.py#L692-L711 | def step(self):
# type: () -> bool
"""
Decreases the internal counter. Raises an error if the counter goes
below 0
:return: True if this step was the final one, else False
:raise ValueError: The counter has gone below 0
"""
with self.__lock:
self.__value -= 1
if self.__value == 0:
# All done
self.__event.set()
return True
elif self.__value < 0:
# Gone too far
raise ValueError("The counter has gone below 0")
return False | [
"def",
"step",
"(",
"self",
")",
":",
"# type: () -> bool",
"with",
"self",
".",
"__lock",
":",
"self",
".",
"__value",
"-=",
"1",
"if",
"self",
".",
"__value",
"==",
"0",
":",
"# All done",
"self",
".",
"__event",
".",
"set",
"(",
")",
"return",
"True",
"elif",
"self",
".",
"__value",
"<",
"0",
":",
"# Gone too far",
"raise",
"ValueError",
"(",
"\"The counter has gone below 0\"",
")",
"return",
"False"
]
| Decreases the internal counter. Raises an error if the counter goes
below 0
:return: True if this step was the final one, else False
:raise ValueError: The counter has gone below 0 | [
"Decreases",
"the",
"internal",
"counter",
".",
"Raises",
"an",
"error",
"if",
"the",
"counter",
"goes",
"below",
"0"
]
| python | train | 29.7 |
tamasgal/km3pipe | km3pipe/hardware.py | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/hardware.py#L391-L395 | def write(self, filename):
"""Save detx file."""
with open(filename, 'w') as f:
f.write(self.ascii)
self.print("Detector file saved as '{0}'".format(filename)) | [
"def",
"write",
"(",
"self",
",",
"filename",
")",
":",
"with",
"open",
"(",
"filename",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"self",
".",
"ascii",
")",
"self",
".",
"print",
"(",
"\"Detector file saved as '{0}'\"",
".",
"format",
"(",
"filename",
")",
")"
]
| Save detx file. | [
"Save",
"detx",
"file",
"."
]
| python | train | 38.2 |
DocNow/twarc | twarc/client.py | https://github.com/DocNow/twarc/blob/47dd87d0c00592a4d583412c9d660ba574fc6f26/twarc/client.py#L254-L278 | def follower_ids(self, user):
"""
Returns Twitter user id lists for the specified user's followers.
A user can be a specific using their screen_name or user_id
"""
user = str(user)
user = user.lstrip('@')
url = 'https://api.twitter.com/1.1/followers/ids.json'
if re.match(r'^\d+$', user):
params = {'user_id': user, 'cursor': -1}
else:
params = {'screen_name': user, 'cursor': -1}
while params['cursor'] != 0:
try:
resp = self.get(url, params=params, allow_404=True)
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
log.info("no users matching %s", screen_name)
raise e
user_ids = resp.json()
for user_id in user_ids['ids']:
yield str_type(user_id)
params['cursor'] = user_ids['next_cursor'] | [
"def",
"follower_ids",
"(",
"self",
",",
"user",
")",
":",
"user",
"=",
"str",
"(",
"user",
")",
"user",
"=",
"user",
".",
"lstrip",
"(",
"'@'",
")",
"url",
"=",
"'https://api.twitter.com/1.1/followers/ids.json'",
"if",
"re",
".",
"match",
"(",
"r'^\\d+$'",
",",
"user",
")",
":",
"params",
"=",
"{",
"'user_id'",
":",
"user",
",",
"'cursor'",
":",
"-",
"1",
"}",
"else",
":",
"params",
"=",
"{",
"'screen_name'",
":",
"user",
",",
"'cursor'",
":",
"-",
"1",
"}",
"while",
"params",
"[",
"'cursor'",
"]",
"!=",
"0",
":",
"try",
":",
"resp",
"=",
"self",
".",
"get",
"(",
"url",
",",
"params",
"=",
"params",
",",
"allow_404",
"=",
"True",
")",
"except",
"requests",
".",
"exceptions",
".",
"HTTPError",
"as",
"e",
":",
"if",
"e",
".",
"response",
".",
"status_code",
"==",
"404",
":",
"log",
".",
"info",
"(",
"\"no users matching %s\"",
",",
"screen_name",
")",
"raise",
"e",
"user_ids",
"=",
"resp",
".",
"json",
"(",
")",
"for",
"user_id",
"in",
"user_ids",
"[",
"'ids'",
"]",
":",
"yield",
"str_type",
"(",
"user_id",
")",
"params",
"[",
"'cursor'",
"]",
"=",
"user_ids",
"[",
"'next_cursor'",
"]"
]
| Returns Twitter user id lists for the specified user's followers.
A user can be a specific using their screen_name or user_id | [
"Returns",
"Twitter",
"user",
"id",
"lists",
"for",
"the",
"specified",
"user",
"s",
"followers",
".",
"A",
"user",
"can",
"be",
"a",
"specific",
"using",
"their",
"screen_name",
"or",
"user_id"
]
| python | train | 37.8 |
SatelliteQE/nailgun | docs/create_organization_nailgun_v2.py | https://github.com/SatelliteQE/nailgun/blob/c36d8c20862e87bf6975bd48ac1ca40a9e634eaa/docs/create_organization_nailgun_v2.py#L8-L12 | def main():
"""Create an organization, print out its attributes and delete it."""
org = Organization(name='junk org').create()
pprint(org.get_values()) # e.g. {'name': 'junk org', …}
org.delete() | [
"def",
"main",
"(",
")",
":",
"org",
"=",
"Organization",
"(",
"name",
"=",
"'junk org'",
")",
".",
"create",
"(",
")",
"pprint",
"(",
"org",
".",
"get_values",
"(",
")",
")",
"# e.g. {'name': 'junk org', …}",
"org",
".",
"delete",
"(",
")"
]
| Create an organization, print out its attributes and delete it. | [
"Create",
"an",
"organization",
"print",
"out",
"its",
"attributes",
"and",
"delete",
"it",
"."
]
| python | train | 41.6 |
insilichem/ommprotocol | ommprotocol/io.py | https://github.com/insilichem/ommprotocol/blob/7283fddba7203e5ac3542fdab41fc1279d3b444e/ommprotocol/io.py#L222-L252 | def from_pdb(cls, path, forcefield=None, loader=PDBFile, strict=True, **kwargs):
"""
Loads topology, positions and, potentially, velocities and vectors,
from a PDB or PDBx file
Parameters
----------
path : str
Path to PDB/PDBx file
forcefields : list of str
Paths to FFXML and/or FRCMOD forcefields. REQUIRED.
Returns
-------
pdb : SystemHandler
SystemHandler with topology, positions, and, potentially, velocities and
box vectors. Forcefields are embedded in the `master` attribute.
"""
pdb = loader(path)
box = kwargs.pop('box', pdb.topology.getPeriodicBoxVectors())
positions = kwargs.pop('positions', pdb.positions)
velocities = kwargs.pop('velocities', getattr(pdb, 'velocities', None))
if strict and not forcefield:
from .md import FORCEFIELDS as forcefield
logger.info('! Forcefields for PDB not specified. Using default: %s',
', '.join(forcefield))
pdb.forcefield = ForceField(*list(process_forcefield(*forcefield)))
return cls(master=pdb.forcefield, topology=pdb.topology, positions=positions,
velocities=velocities, box=box, path=path, **kwargs) | [
"def",
"from_pdb",
"(",
"cls",
",",
"path",
",",
"forcefield",
"=",
"None",
",",
"loader",
"=",
"PDBFile",
",",
"strict",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"pdb",
"=",
"loader",
"(",
"path",
")",
"box",
"=",
"kwargs",
".",
"pop",
"(",
"'box'",
",",
"pdb",
".",
"topology",
".",
"getPeriodicBoxVectors",
"(",
")",
")",
"positions",
"=",
"kwargs",
".",
"pop",
"(",
"'positions'",
",",
"pdb",
".",
"positions",
")",
"velocities",
"=",
"kwargs",
".",
"pop",
"(",
"'velocities'",
",",
"getattr",
"(",
"pdb",
",",
"'velocities'",
",",
"None",
")",
")",
"if",
"strict",
"and",
"not",
"forcefield",
":",
"from",
".",
"md",
"import",
"FORCEFIELDS",
"as",
"forcefield",
"logger",
".",
"info",
"(",
"'! Forcefields for PDB not specified. Using default: %s'",
",",
"', '",
".",
"join",
"(",
"forcefield",
")",
")",
"pdb",
".",
"forcefield",
"=",
"ForceField",
"(",
"*",
"list",
"(",
"process_forcefield",
"(",
"*",
"forcefield",
")",
")",
")",
"return",
"cls",
"(",
"master",
"=",
"pdb",
".",
"forcefield",
",",
"topology",
"=",
"pdb",
".",
"topology",
",",
"positions",
"=",
"positions",
",",
"velocities",
"=",
"velocities",
",",
"box",
"=",
"box",
",",
"path",
"=",
"path",
",",
"*",
"*",
"kwargs",
")"
]
| Loads topology, positions and, potentially, velocities and vectors,
from a PDB or PDBx file
Parameters
----------
path : str
Path to PDB/PDBx file
forcefields : list of str
Paths to FFXML and/or FRCMOD forcefields. REQUIRED.
Returns
-------
pdb : SystemHandler
SystemHandler with topology, positions, and, potentially, velocities and
box vectors. Forcefields are embedded in the `master` attribute. | [
"Loads",
"topology",
"positions",
"and",
"potentially",
"velocities",
"and",
"vectors",
"from",
"a",
"PDB",
"or",
"PDBx",
"file"
]
| python | train | 41.548387 |
totalgood/pugnlp | src/pugnlp/util.py | https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L1980-L2020 | def normalize_scientific_notation(s, ignore_commas=True, verbosity=1):
"""Produce a string convertable with float(s), if possible, fixing some common scientific notations
Deletes commas and allows addition.
>>> normalize_scientific_notation(' -123 x 10^-45 ')
'-123e-45'
>>> normalize_scientific_notation(' -1+1,234 x 10^-5,678 ')
'1233e-5678'
>>> normalize_scientific_notation('$42.42')
'42.42'
"""
s = s.lstrip(charlist.not_digits_nor_sign)
s = s.rstrip(charlist.not_digits)
# print s
# TODO: substitute ** for ^ and just eval the expression rather than insisting on a base-10 representation
num_strings = rex.scientific_notation_exponent.split(s, maxsplit=2)
# print num_strings
# get rid of commas
s = rex.re.sub(r"[^.0-9-+" + "," * int(not ignore_commas) + r"]+", '', num_strings[0])
# print s
# if this value gets so large that it requires an exponential notation, this will break the conversion
if not s:
return None
try:
s = str(eval(s.strip().lstrip('0')))
except (IndexError, ValueError, AttributeError, TypeError):
if verbosity > 1:
print('Unable to evaluate %s' % repr(s))
try:
s = str(float(s))
except (IndexError, ValueError, AttributeError, TypeError):
print('Unable to float %s' % repr(s))
s = ''
# print s
if len(num_strings) > 1:
if not s:
s = '1'
s += 'e' + rex.re.sub(r'[^.0-9-+]+', '', num_strings[1])
if s:
return s
return None | [
"def",
"normalize_scientific_notation",
"(",
"s",
",",
"ignore_commas",
"=",
"True",
",",
"verbosity",
"=",
"1",
")",
":",
"s",
"=",
"s",
".",
"lstrip",
"(",
"charlist",
".",
"not_digits_nor_sign",
")",
"s",
"=",
"s",
".",
"rstrip",
"(",
"charlist",
".",
"not_digits",
")",
"# print s",
"# TODO: substitute ** for ^ and just eval the expression rather than insisting on a base-10 representation",
"num_strings",
"=",
"rex",
".",
"scientific_notation_exponent",
".",
"split",
"(",
"s",
",",
"maxsplit",
"=",
"2",
")",
"# print num_strings",
"# get rid of commas",
"s",
"=",
"rex",
".",
"re",
".",
"sub",
"(",
"r\"[^.0-9-+\"",
"+",
"\",\"",
"*",
"int",
"(",
"not",
"ignore_commas",
")",
"+",
"r\"]+\"",
",",
"''",
",",
"num_strings",
"[",
"0",
"]",
")",
"# print s",
"# if this value gets so large that it requires an exponential notation, this will break the conversion",
"if",
"not",
"s",
":",
"return",
"None",
"try",
":",
"s",
"=",
"str",
"(",
"eval",
"(",
"s",
".",
"strip",
"(",
")",
".",
"lstrip",
"(",
"'0'",
")",
")",
")",
"except",
"(",
"IndexError",
",",
"ValueError",
",",
"AttributeError",
",",
"TypeError",
")",
":",
"if",
"verbosity",
">",
"1",
":",
"print",
"(",
"'Unable to evaluate %s'",
"%",
"repr",
"(",
"s",
")",
")",
"try",
":",
"s",
"=",
"str",
"(",
"float",
"(",
"s",
")",
")",
"except",
"(",
"IndexError",
",",
"ValueError",
",",
"AttributeError",
",",
"TypeError",
")",
":",
"print",
"(",
"'Unable to float %s'",
"%",
"repr",
"(",
"s",
")",
")",
"s",
"=",
"''",
"# print s",
"if",
"len",
"(",
"num_strings",
")",
">",
"1",
":",
"if",
"not",
"s",
":",
"s",
"=",
"'1'",
"s",
"+=",
"'e'",
"+",
"rex",
".",
"re",
".",
"sub",
"(",
"r'[^.0-9-+]+'",
",",
"''",
",",
"num_strings",
"[",
"1",
"]",
")",
"if",
"s",
":",
"return",
"s",
"return",
"None"
]
| Produce a string convertable with float(s), if possible, fixing some common scientific notations
Deletes commas and allows addition.
>>> normalize_scientific_notation(' -123 x 10^-45 ')
'-123e-45'
>>> normalize_scientific_notation(' -1+1,234 x 10^-5,678 ')
'1233e-5678'
>>> normalize_scientific_notation('$42.42')
'42.42' | [
"Produce",
"a",
"string",
"convertable",
"with",
"float",
"(",
"s",
")",
"if",
"possible",
"fixing",
"some",
"common",
"scientific",
"notations"
]
| python | train | 37.512195 |
f3at/feat | src/feat/common/guard.py | https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/src/feat/common/guard.py#L37-L45 | def mutable(function):
'''Add the instance internal state as the second parameter
of the decorated function.'''
def wrapper(self, *args, **kwargs):
state = self._get_state()
return function(self, state, *args, **kwargs)
return wrapper | [
"def",
"mutable",
"(",
"function",
")",
":",
"def",
"wrapper",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"state",
"=",
"self",
".",
"_get_state",
"(",
")",
"return",
"function",
"(",
"self",
",",
"state",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapper"
]
| Add the instance internal state as the second parameter
of the decorated function. | [
"Add",
"the",
"instance",
"internal",
"state",
"as",
"the",
"second",
"parameter",
"of",
"the",
"decorated",
"function",
"."
]
| python | train | 28.888889 |
Alignak-monitoring/alignak | alignak/util.py | https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/util.py#L587-L604 | def dict_to_serialized_dict(ref, the_dict):
"""Serialize the list of elements to a dictionary
Used for the retention store
:param ref: Not used
:type ref:
:param the_dict: dictionary to convert
:type the_dict: dict
:return: dict of serialized
:rtype: dict
"""
result = {}
for elt in list(the_dict.values()):
if not getattr(elt, 'serialize', None):
continue
result[elt.uuid] = elt.serialize()
return result | [
"def",
"dict_to_serialized_dict",
"(",
"ref",
",",
"the_dict",
")",
":",
"result",
"=",
"{",
"}",
"for",
"elt",
"in",
"list",
"(",
"the_dict",
".",
"values",
"(",
")",
")",
":",
"if",
"not",
"getattr",
"(",
"elt",
",",
"'serialize'",
",",
"None",
")",
":",
"continue",
"result",
"[",
"elt",
".",
"uuid",
"]",
"=",
"elt",
".",
"serialize",
"(",
")",
"return",
"result"
]
| Serialize the list of elements to a dictionary
Used for the retention store
:param ref: Not used
:type ref:
:param the_dict: dictionary to convert
:type the_dict: dict
:return: dict of serialized
:rtype: dict | [
"Serialize",
"the",
"list",
"of",
"elements",
"to",
"a",
"dictionary"
]
| python | train | 25.888889 |
pyviz/holoviews | holoviews/core/options.py | https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/options.py#L1163-L1171 | def loads(cls, pickle_string):
"""
Equivalent to pickle.loads except that the HoloViews trees is
restored appropriately.
"""
cls.load_counter_offset = StoreOptions.id_offset()
val = pickle.loads(pickle_string)
cls.load_counter_offset = None
return val | [
"def",
"loads",
"(",
"cls",
",",
"pickle_string",
")",
":",
"cls",
".",
"load_counter_offset",
"=",
"StoreOptions",
".",
"id_offset",
"(",
")",
"val",
"=",
"pickle",
".",
"loads",
"(",
"pickle_string",
")",
"cls",
".",
"load_counter_offset",
"=",
"None",
"return",
"val"
]
| Equivalent to pickle.loads except that the HoloViews trees is
restored appropriately. | [
"Equivalent",
"to",
"pickle",
".",
"loads",
"except",
"that",
"the",
"HoloViews",
"trees",
"is",
"restored",
"appropriately",
"."
]
| python | train | 34.111111 |
ArchiveTeam/wpull | wpull/protocol/ftp/client.py | https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/protocol/ftp/client.py#L114-L151 | def start(self, request: Request) -> Response:
'''Start a file or directory listing download.
Args:
request: Request.
Returns:
A Response populated with the initial data connection reply.
Once the response is received, call :meth:`download`.
Coroutine.
'''
if self._session_state != SessionState.ready:
raise RuntimeError('Session not ready')
response = Response()
yield from self._prepare_fetch(request, response)
response.file_transfer_size = yield from self._fetch_size(request)
if request.restart_value:
try:
yield from self._commander.restart(request.restart_value)
response.restart_value = request.restart_value
except FTPServerError:
_logger.debug('Could not restart file.', exc_info=1)
yield from self._open_data_stream()
command = Command('RETR', request.file_path)
yield from self._begin_stream(command)
self._session_state = SessionState.file_request_sent
return response | [
"def",
"start",
"(",
"self",
",",
"request",
":",
"Request",
")",
"->",
"Response",
":",
"if",
"self",
".",
"_session_state",
"!=",
"SessionState",
".",
"ready",
":",
"raise",
"RuntimeError",
"(",
"'Session not ready'",
")",
"response",
"=",
"Response",
"(",
")",
"yield",
"from",
"self",
".",
"_prepare_fetch",
"(",
"request",
",",
"response",
")",
"response",
".",
"file_transfer_size",
"=",
"yield",
"from",
"self",
".",
"_fetch_size",
"(",
"request",
")",
"if",
"request",
".",
"restart_value",
":",
"try",
":",
"yield",
"from",
"self",
".",
"_commander",
".",
"restart",
"(",
"request",
".",
"restart_value",
")",
"response",
".",
"restart_value",
"=",
"request",
".",
"restart_value",
"except",
"FTPServerError",
":",
"_logger",
".",
"debug",
"(",
"'Could not restart file.'",
",",
"exc_info",
"=",
"1",
")",
"yield",
"from",
"self",
".",
"_open_data_stream",
"(",
")",
"command",
"=",
"Command",
"(",
"'RETR'",
",",
"request",
".",
"file_path",
")",
"yield",
"from",
"self",
".",
"_begin_stream",
"(",
"command",
")",
"self",
".",
"_session_state",
"=",
"SessionState",
".",
"file_request_sent",
"return",
"response"
]
| Start a file or directory listing download.
Args:
request: Request.
Returns:
A Response populated with the initial data connection reply.
Once the response is received, call :meth:`download`.
Coroutine. | [
"Start",
"a",
"file",
"or",
"directory",
"listing",
"download",
"."
]
| python | train | 28.789474 |
Galarzaa90/tibia.py | tibiapy/house.py | https://github.com/Galarzaa90/tibia.py/blob/02ba1a8f1e18177ef5c7dcd44affc8d761d59e12/tibiapy/house.py#L215-L247 | def _parse_status(self, status):
"""Parses the house's state description and applies the corresponding values
Parameters
----------
status: :class:`str`
Plain text string containing the current renting state of the house.
"""
m = rented_regex.search(status)
if m:
self.status = HouseStatus.RENTED
self.owner = m.group("owner")
self.owner_sex = Sex.MALE if m.group("pronoun") == "He" else Sex.FEMALE
self.paid_until = parse_tibia_datetime(m.group("paid_until"))
else:
self.status = HouseStatus.AUCTIONED
m = transfer_regex.search(status)
if m:
self.transfer_date = parse_tibia_datetime(m.group("transfer_date"))
self.transfer_accepted = m.group("verb") == "will"
self.transferee = m.group("transferee")
price = m.group("transfer_price")
self.transfer_price = int(price) if price is not None else 0
m = auction_regex.search(status)
if m:
self.auction_end = parse_tibia_datetime(m.group("auction_end"))
m = bid_regex.search(status)
if m:
self.highest_bid = int(m.group("highest_bid"))
self.highest_bidder = m.group("bidder") | [
"def",
"_parse_status",
"(",
"self",
",",
"status",
")",
":",
"m",
"=",
"rented_regex",
".",
"search",
"(",
"status",
")",
"if",
"m",
":",
"self",
".",
"status",
"=",
"HouseStatus",
".",
"RENTED",
"self",
".",
"owner",
"=",
"m",
".",
"group",
"(",
"\"owner\"",
")",
"self",
".",
"owner_sex",
"=",
"Sex",
".",
"MALE",
"if",
"m",
".",
"group",
"(",
"\"pronoun\"",
")",
"==",
"\"He\"",
"else",
"Sex",
".",
"FEMALE",
"self",
".",
"paid_until",
"=",
"parse_tibia_datetime",
"(",
"m",
".",
"group",
"(",
"\"paid_until\"",
")",
")",
"else",
":",
"self",
".",
"status",
"=",
"HouseStatus",
".",
"AUCTIONED",
"m",
"=",
"transfer_regex",
".",
"search",
"(",
"status",
")",
"if",
"m",
":",
"self",
".",
"transfer_date",
"=",
"parse_tibia_datetime",
"(",
"m",
".",
"group",
"(",
"\"transfer_date\"",
")",
")",
"self",
".",
"transfer_accepted",
"=",
"m",
".",
"group",
"(",
"\"verb\"",
")",
"==",
"\"will\"",
"self",
".",
"transferee",
"=",
"m",
".",
"group",
"(",
"\"transferee\"",
")",
"price",
"=",
"m",
".",
"group",
"(",
"\"transfer_price\"",
")",
"self",
".",
"transfer_price",
"=",
"int",
"(",
"price",
")",
"if",
"price",
"is",
"not",
"None",
"else",
"0",
"m",
"=",
"auction_regex",
".",
"search",
"(",
"status",
")",
"if",
"m",
":",
"self",
".",
"auction_end",
"=",
"parse_tibia_datetime",
"(",
"m",
".",
"group",
"(",
"\"auction_end\"",
")",
")",
"m",
"=",
"bid_regex",
".",
"search",
"(",
"status",
")",
"if",
"m",
":",
"self",
".",
"highest_bid",
"=",
"int",
"(",
"m",
".",
"group",
"(",
"\"highest_bid\"",
")",
")",
"self",
".",
"highest_bidder",
"=",
"m",
".",
"group",
"(",
"\"bidder\"",
")"
]
| Parses the house's state description and applies the corresponding values
Parameters
----------
status: :class:`str`
Plain text string containing the current renting state of the house. | [
"Parses",
"the",
"house",
"s",
"state",
"description",
"and",
"applies",
"the",
"corresponding",
"values"
]
| python | train | 38.575758 |
google/dotty | efilter/scope.py | https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/scope.py#L104-L119 | def getmembers_runtime(self):
"""Gets members (vars) from all scopes using ONLY runtime information.
You most likely want to use ScopeStack.getmembers instead.
Returns:
Set of available vars.
Raises:
NotImplementedError if any scope fails to implement 'getmembers'.
"""
names = set()
for scope in self.scopes:
names.update(structured.getmembers_runtime(scope))
return names | [
"def",
"getmembers_runtime",
"(",
"self",
")",
":",
"names",
"=",
"set",
"(",
")",
"for",
"scope",
"in",
"self",
".",
"scopes",
":",
"names",
".",
"update",
"(",
"structured",
".",
"getmembers_runtime",
"(",
"scope",
")",
")",
"return",
"names"
]
| Gets members (vars) from all scopes using ONLY runtime information.
You most likely want to use ScopeStack.getmembers instead.
Returns:
Set of available vars.
Raises:
NotImplementedError if any scope fails to implement 'getmembers'. | [
"Gets",
"members",
"(",
"vars",
")",
"from",
"all",
"scopes",
"using",
"ONLY",
"runtime",
"information",
"."
]
| python | train | 28.875 |
deeplook/svglib | svglib/svglib.py | https://github.com/deeplook/svglib/blob/859f9f461f1041018af3e6f507bb4c0616b04fbb/svglib/svglib.py#L198-L223 | def findAttr(self, svgNode, name):
"""Search an attribute with some name in some node or above.
First the node is searched, then its style attribute, then
the search continues in the node's parent node. If no such
attribute is found, '' is returned.
"""
# This needs also to lookup values like "url(#SomeName)"...
if self.css_rules is not None and not svgNode.attrib.get('__rules_applied', False):
if isinstance(svgNode, NodeTracker):
svgNode.apply_rules(self.css_rules)
else:
ElementWrapper(svgNode).apply_rules(self.css_rules)
attr_value = svgNode.attrib.get(name, '').strip()
if attr_value and attr_value != "inherit":
return attr_value
elif svgNode.attrib.get("style"):
dict = self.parseMultiAttributes(svgNode.attrib.get("style"))
if name in dict:
return dict[name]
if svgNode.getparent() is not None:
return self.findAttr(svgNode.getparent(), name)
return '' | [
"def",
"findAttr",
"(",
"self",
",",
"svgNode",
",",
"name",
")",
":",
"# This needs also to lookup values like \"url(#SomeName)\"...",
"if",
"self",
".",
"css_rules",
"is",
"not",
"None",
"and",
"not",
"svgNode",
".",
"attrib",
".",
"get",
"(",
"'__rules_applied'",
",",
"False",
")",
":",
"if",
"isinstance",
"(",
"svgNode",
",",
"NodeTracker",
")",
":",
"svgNode",
".",
"apply_rules",
"(",
"self",
".",
"css_rules",
")",
"else",
":",
"ElementWrapper",
"(",
"svgNode",
")",
".",
"apply_rules",
"(",
"self",
".",
"css_rules",
")",
"attr_value",
"=",
"svgNode",
".",
"attrib",
".",
"get",
"(",
"name",
",",
"''",
")",
".",
"strip",
"(",
")",
"if",
"attr_value",
"and",
"attr_value",
"!=",
"\"inherit\"",
":",
"return",
"attr_value",
"elif",
"svgNode",
".",
"attrib",
".",
"get",
"(",
"\"style\"",
")",
":",
"dict",
"=",
"self",
".",
"parseMultiAttributes",
"(",
"svgNode",
".",
"attrib",
".",
"get",
"(",
"\"style\"",
")",
")",
"if",
"name",
"in",
"dict",
":",
"return",
"dict",
"[",
"name",
"]",
"if",
"svgNode",
".",
"getparent",
"(",
")",
"is",
"not",
"None",
":",
"return",
"self",
".",
"findAttr",
"(",
"svgNode",
".",
"getparent",
"(",
")",
",",
"name",
")",
"return",
"''"
]
| Search an attribute with some name in some node or above.
First the node is searched, then its style attribute, then
the search continues in the node's parent node. If no such
attribute is found, '' is returned. | [
"Search",
"an",
"attribute",
"with",
"some",
"name",
"in",
"some",
"node",
"or",
"above",
"."
]
| python | train | 40.730769 |
CitrineInformatics/pypif-sdk | pypif_sdk/func/calculate_funcs.py | https://github.com/CitrineInformatics/pypif-sdk/blob/8b01d10d9a1426d5eef12e4b2f31c4657aa0fe59/pypif_sdk/func/calculate_funcs.py#L197-L214 | def _consolidate_elemental_array_(elemental_array):
"""
Accounts for non-empirical chemical formulas by taking in the compositional array generated by _create_compositional_array_() and returning a consolidated array of dictionaries with no repeating elements
:param elemental_array: an elemental array generated from _create_compositional_array_()
:return: an array of element dictionaries
"""
condensed_array = []
for e in elemental_array:
exists = False
for k in condensed_array:
if k["symbol"] == e["symbol"]:
exists = True
k["occurances"] += e["occurances"]
break
if not exists:
condensed_array.append(e)
return condensed_array | [
"def",
"_consolidate_elemental_array_",
"(",
"elemental_array",
")",
":",
"condensed_array",
"=",
"[",
"]",
"for",
"e",
"in",
"elemental_array",
":",
"exists",
"=",
"False",
"for",
"k",
"in",
"condensed_array",
":",
"if",
"k",
"[",
"\"symbol\"",
"]",
"==",
"e",
"[",
"\"symbol\"",
"]",
":",
"exists",
"=",
"True",
"k",
"[",
"\"occurances\"",
"]",
"+=",
"e",
"[",
"\"occurances\"",
"]",
"break",
"if",
"not",
"exists",
":",
"condensed_array",
".",
"append",
"(",
"e",
")",
"return",
"condensed_array"
]
| Accounts for non-empirical chemical formulas by taking in the compositional array generated by _create_compositional_array_() and returning a consolidated array of dictionaries with no repeating elements
:param elemental_array: an elemental array generated from _create_compositional_array_()
:return: an array of element dictionaries | [
"Accounts",
"for",
"non",
"-",
"empirical",
"chemical",
"formulas",
"by",
"taking",
"in",
"the",
"compositional",
"array",
"generated",
"by",
"_create_compositional_array_",
"()",
"and",
"returning",
"a",
"consolidated",
"array",
"of",
"dictionaries",
"with",
"no",
"repeating",
"elements"
]
| python | train | 41.333333 |
yfpeng/bioc | bioc/biocjson/decoder.py | https://github.com/yfpeng/bioc/blob/47ddaa010960d9ba673aefe068e7bbaf39f0fff4/bioc/biocjson/decoder.py#L26-L34 | def parse_annotation(obj: dict) -> BioCAnnotation:
"""Deserialize a dict obj to a BioCAnnotation object"""
ann = BioCAnnotation()
ann.id = obj['id']
ann.infons = obj['infons']
ann.text = obj['text']
for loc in obj['locations']:
ann.add_location(BioCLocation(loc['offset'], loc['length']))
return ann | [
"def",
"parse_annotation",
"(",
"obj",
":",
"dict",
")",
"->",
"BioCAnnotation",
":",
"ann",
"=",
"BioCAnnotation",
"(",
")",
"ann",
".",
"id",
"=",
"obj",
"[",
"'id'",
"]",
"ann",
".",
"infons",
"=",
"obj",
"[",
"'infons'",
"]",
"ann",
".",
"text",
"=",
"obj",
"[",
"'text'",
"]",
"for",
"loc",
"in",
"obj",
"[",
"'locations'",
"]",
":",
"ann",
".",
"add_location",
"(",
"BioCLocation",
"(",
"loc",
"[",
"'offset'",
"]",
",",
"loc",
"[",
"'length'",
"]",
")",
")",
"return",
"ann"
]
| Deserialize a dict obj to a BioCAnnotation object | [
"Deserialize",
"a",
"dict",
"obj",
"to",
"a",
"BioCAnnotation",
"object"
]
| python | train | 37.222222 |
inspirehep/harvesting-kit | harvestingkit/inspire_cds_package/from_inspire.py | https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/inspire_cds_package/from_inspire.py#L372-L408 | def update_experiments(self):
"""Experiment mapping."""
# 693 Remove if 'not applicable'
for field in record_get_field_instances(self.record, '693'):
subs = field_get_subfields(field)
acc_experiment = subs.get("e", [])
if not acc_experiment:
acc_experiment = subs.get("a", [])
if not acc_experiment:
continue
experiment = acc_experiment[-1]
# Handle special case of leading experiments numbers NA-050 -> NA 50
e_suffix = ""
if "-NA-" in experiment or \
"-RD-" in experiment or \
"-WA-" in experiment:
splitted_experiment = experiment.split("-")
e_suffix = "-".join(splitted_experiment[2:])
if e_suffix.startswith("0"):
e_suffix = e_suffix[1:]
experiment = "-".join(splitted_experiment[:2]) # only CERN-NA
translated_experiment = self.get_config_item(experiment,
"experiments")
if not translated_experiment:
continue
new_subs = []
if "---" in translated_experiment:
experiment_a, experiment_e = translated_experiment.split("---")
new_subs.append(("a", experiment_a.replace("-", " ")))
else:
experiment_e = translated_experiment
new_subs.append(("e", experiment_e.replace("-", " ") + e_suffix))
record_delete_field(self.record, tag="693",
field_position_global=field[4])
record_add_field(self.record, "693", subfields=new_subs) | [
"def",
"update_experiments",
"(",
"self",
")",
":",
"# 693 Remove if 'not applicable'",
"for",
"field",
"in",
"record_get_field_instances",
"(",
"self",
".",
"record",
",",
"'693'",
")",
":",
"subs",
"=",
"field_get_subfields",
"(",
"field",
")",
"acc_experiment",
"=",
"subs",
".",
"get",
"(",
"\"e\"",
",",
"[",
"]",
")",
"if",
"not",
"acc_experiment",
":",
"acc_experiment",
"=",
"subs",
".",
"get",
"(",
"\"a\"",
",",
"[",
"]",
")",
"if",
"not",
"acc_experiment",
":",
"continue",
"experiment",
"=",
"acc_experiment",
"[",
"-",
"1",
"]",
"# Handle special case of leading experiments numbers NA-050 -> NA 50",
"e_suffix",
"=",
"\"\"",
"if",
"\"-NA-\"",
"in",
"experiment",
"or",
"\"-RD-\"",
"in",
"experiment",
"or",
"\"-WA-\"",
"in",
"experiment",
":",
"splitted_experiment",
"=",
"experiment",
".",
"split",
"(",
"\"-\"",
")",
"e_suffix",
"=",
"\"-\"",
".",
"join",
"(",
"splitted_experiment",
"[",
"2",
":",
"]",
")",
"if",
"e_suffix",
".",
"startswith",
"(",
"\"0\"",
")",
":",
"e_suffix",
"=",
"e_suffix",
"[",
"1",
":",
"]",
"experiment",
"=",
"\"-\"",
".",
"join",
"(",
"splitted_experiment",
"[",
":",
"2",
"]",
")",
"# only CERN-NA",
"translated_experiment",
"=",
"self",
".",
"get_config_item",
"(",
"experiment",
",",
"\"experiments\"",
")",
"if",
"not",
"translated_experiment",
":",
"continue",
"new_subs",
"=",
"[",
"]",
"if",
"\"---\"",
"in",
"translated_experiment",
":",
"experiment_a",
",",
"experiment_e",
"=",
"translated_experiment",
".",
"split",
"(",
"\"---\"",
")",
"new_subs",
".",
"append",
"(",
"(",
"\"a\"",
",",
"experiment_a",
".",
"replace",
"(",
"\"-\"",
",",
"\" \"",
")",
")",
")",
"else",
":",
"experiment_e",
"=",
"translated_experiment",
"new_subs",
".",
"append",
"(",
"(",
"\"e\"",
",",
"experiment_e",
".",
"replace",
"(",
"\"-\"",
",",
"\" \"",
")",
"+",
"e_suffix",
")",
")",
"record_delete_field",
"(",
"self",
".",
"record",
",",
"tag",
"=",
"\"693\"",
",",
"field_position_global",
"=",
"field",
"[",
"4",
"]",
")",
"record_add_field",
"(",
"self",
".",
"record",
",",
"\"693\"",
",",
"subfields",
"=",
"new_subs",
")"
]
| Experiment mapping. | [
"Experiment",
"mapping",
"."
]
| python | valid | 46.351351 |
DBuildService/dockerfile-parse | dockerfile_parse/parser.py | https://github.com/DBuildService/dockerfile-parse/blob/3d7b514d8b8eded1b33529cf0f6a0770a573aee0/dockerfile_parse/parser.py#L135-L150 | def lines(self):
"""
:return: list containing lines (unicode) from Dockerfile
"""
if self.cache_content and self.cached_content:
return self.cached_content.splitlines(True)
try:
with self._open_dockerfile('rb') as dockerfile:
lines = [b2u(l) for l in dockerfile.readlines()]
if self.cache_content:
self.cached_content = ''.join(lines)
return lines
except (IOError, OSError) as ex:
logger.error("Couldn't retrieve lines from dockerfile: %r", ex)
raise | [
"def",
"lines",
"(",
"self",
")",
":",
"if",
"self",
".",
"cache_content",
"and",
"self",
".",
"cached_content",
":",
"return",
"self",
".",
"cached_content",
".",
"splitlines",
"(",
"True",
")",
"try",
":",
"with",
"self",
".",
"_open_dockerfile",
"(",
"'rb'",
")",
"as",
"dockerfile",
":",
"lines",
"=",
"[",
"b2u",
"(",
"l",
")",
"for",
"l",
"in",
"dockerfile",
".",
"readlines",
"(",
")",
"]",
"if",
"self",
".",
"cache_content",
":",
"self",
".",
"cached_content",
"=",
"''",
".",
"join",
"(",
"lines",
")",
"return",
"lines",
"except",
"(",
"IOError",
",",
"OSError",
")",
"as",
"ex",
":",
"logger",
".",
"error",
"(",
"\"Couldn't retrieve lines from dockerfile: %r\"",
",",
"ex",
")",
"raise"
]
| :return: list containing lines (unicode) from Dockerfile | [
":",
"return",
":",
"list",
"containing",
"lines",
"(",
"unicode",
")",
"from",
"Dockerfile"
]
| python | train | 37.5 |
datamachine/twx.botapi | twx/botapi/botapi.py | https://github.com/datamachine/twx.botapi/blob/c85184da738169e8f9d6d8e62970540f427c486e/twx/botapi/botapi.py#L4278-L4280 | def set_chat_description(self, *args, **kwargs):
"""See :func:`set_chat_description`"""
return set_chat_description(*args, **self._merge_overrides(**kwargs)).run() | [
"def",
"set_chat_description",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"set_chat_description",
"(",
"*",
"args",
",",
"*",
"*",
"self",
".",
"_merge_overrides",
"(",
"*",
"*",
"kwargs",
")",
")",
".",
"run",
"(",
")"
]
| See :func:`set_chat_description` | [
"See",
":",
"func",
":",
"set_chat_description"
]
| python | train | 59 |
wndhydrnt/python-oauth2 | oauth2/__init__.py | https://github.com/wndhydrnt/python-oauth2/blob/abe3bf5f27bda2ff737cab387b040e2e6e85c2e2/oauth2/__init__.py#L99-L147 | def dispatch(self, request, environ):
"""
Checks which Grant supports the current request and dispatches to it.
:param request: The incoming request.
:type request: :class:`oauth2.web.Request`
:param environ: Dict containing variables of the environment.
:type environ: dict
:return: An instance of ``oauth2.web.Response``.
"""
try:
grant_type = self._determine_grant_type(request)
response = self.response_class()
grant_type.read_validate_params(request)
return grant_type.process(request, response, environ)
except OAuthInvalidNoRedirectError:
response = self.response_class()
response.add_header("Content-Type", "application/json")
response.status_code = 400
response.body = json.dumps({
"error": "invalid_redirect_uri",
"error_description": "Invalid redirect URI"
})
return response
except OAuthInvalidError as err:
response = self.response_class()
return grant_type.handle_error(error=err, response=response)
except UnsupportedGrantError:
response = self.response_class()
response.add_header("Content-Type", "application/json")
response.status_code = 400
response.body = json.dumps({
"error": "unsupported_response_type",
"error_description": "Grant not supported"
})
return response
except:
app_log.error("Uncaught Exception", exc_info=True)
response = self.response_class()
return grant_type.handle_error(
error=OAuthInvalidError(error="server_error",
explanation="Internal server error"),
response=response) | [
"def",
"dispatch",
"(",
"self",
",",
"request",
",",
"environ",
")",
":",
"try",
":",
"grant_type",
"=",
"self",
".",
"_determine_grant_type",
"(",
"request",
")",
"response",
"=",
"self",
".",
"response_class",
"(",
")",
"grant_type",
".",
"read_validate_params",
"(",
"request",
")",
"return",
"grant_type",
".",
"process",
"(",
"request",
",",
"response",
",",
"environ",
")",
"except",
"OAuthInvalidNoRedirectError",
":",
"response",
"=",
"self",
".",
"response_class",
"(",
")",
"response",
".",
"add_header",
"(",
"\"Content-Type\"",
",",
"\"application/json\"",
")",
"response",
".",
"status_code",
"=",
"400",
"response",
".",
"body",
"=",
"json",
".",
"dumps",
"(",
"{",
"\"error\"",
":",
"\"invalid_redirect_uri\"",
",",
"\"error_description\"",
":",
"\"Invalid redirect URI\"",
"}",
")",
"return",
"response",
"except",
"OAuthInvalidError",
"as",
"err",
":",
"response",
"=",
"self",
".",
"response_class",
"(",
")",
"return",
"grant_type",
".",
"handle_error",
"(",
"error",
"=",
"err",
",",
"response",
"=",
"response",
")",
"except",
"UnsupportedGrantError",
":",
"response",
"=",
"self",
".",
"response_class",
"(",
")",
"response",
".",
"add_header",
"(",
"\"Content-Type\"",
",",
"\"application/json\"",
")",
"response",
".",
"status_code",
"=",
"400",
"response",
".",
"body",
"=",
"json",
".",
"dumps",
"(",
"{",
"\"error\"",
":",
"\"unsupported_response_type\"",
",",
"\"error_description\"",
":",
"\"Grant not supported\"",
"}",
")",
"return",
"response",
"except",
":",
"app_log",
".",
"error",
"(",
"\"Uncaught Exception\"",
",",
"exc_info",
"=",
"True",
")",
"response",
"=",
"self",
".",
"response_class",
"(",
")",
"return",
"grant_type",
".",
"handle_error",
"(",
"error",
"=",
"OAuthInvalidError",
"(",
"error",
"=",
"\"server_error\"",
",",
"explanation",
"=",
"\"Internal server error\"",
")",
",",
"response",
"=",
"response",
")"
]
| Checks which Grant supports the current request and dispatches to it.
:param request: The incoming request.
:type request: :class:`oauth2.web.Request`
:param environ: Dict containing variables of the environment.
:type environ: dict
:return: An instance of ``oauth2.web.Response``. | [
"Checks",
"which",
"Grant",
"supports",
"the",
"current",
"request",
"and",
"dispatches",
"to",
"it",
"."
]
| python | train | 38.081633 |
SiLab-Bonn/basil | basil/HL/SussProber.py | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/SussProber.py#L34-L37 | def get_position(self):
''' Read chuck position (x, y, z)'''
reply = self._intf.query('ReadChuckPosition Y H')[2:]
return [float(i) for i in reply.split()] | [
"def",
"get_position",
"(",
"self",
")",
":",
"reply",
"=",
"self",
".",
"_intf",
".",
"query",
"(",
"'ReadChuckPosition Y H'",
")",
"[",
"2",
":",
"]",
"return",
"[",
"float",
"(",
"i",
")",
"for",
"i",
"in",
"reply",
".",
"split",
"(",
")",
"]"
]
| Read chuck position (x, y, z) | [
"Read",
"chuck",
"position",
"(",
"x",
"y",
"z",
")"
]
| python | train | 44 |
Thermondo/django-heroku-connect | heroku_connect/utils.py | https://github.com/Thermondo/django-heroku-connect/blob/f390e0fbf256ee79b30bb88f9a8c9576c6c8d9b5/heroku_connect/utils.py#L151-L186 | def get_connections(app):
"""
Return all Heroku Connect connections setup with the given application.
For more details check the link -
https://devcenter.heroku.com/articles/heroku-connect-api#step-4-retrieve-the-new-connection-s-id
Sample response from the API call is below::
{
"count": 1,
"results":[{
"id": "<connection_id>",
"name": "<app_name>",
"resource_name": "<resource_name>",
…
}],
…
}
Args:
app (str): Heroku application name.
Returns:
List[dict]: List of all Heroku Connect connections associated with the Heroku application.
Raises:
requests.HTTPError: If an error occurred when accessing the connections API.
ValueError: If response is not a valid JSON.
"""
payload = {'app': app}
url = os.path.join(settings.HEROKU_CONNECT_API_ENDPOINT, 'connections')
response = requests.get(url, params=payload, headers=_get_authorization_headers())
response.raise_for_status()
return response.json()['results'] | [
"def",
"get_connections",
"(",
"app",
")",
":",
"payload",
"=",
"{",
"'app'",
":",
"app",
"}",
"url",
"=",
"os",
".",
"path",
".",
"join",
"(",
"settings",
".",
"HEROKU_CONNECT_API_ENDPOINT",
",",
"'connections'",
")",
"response",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"params",
"=",
"payload",
",",
"headers",
"=",
"_get_authorization_headers",
"(",
")",
")",
"response",
".",
"raise_for_status",
"(",
")",
"return",
"response",
".",
"json",
"(",
")",
"[",
"'results'",
"]"
]
| Return all Heroku Connect connections setup with the given application.
For more details check the link -
https://devcenter.heroku.com/articles/heroku-connect-api#step-4-retrieve-the-new-connection-s-id
Sample response from the API call is below::
{
"count": 1,
"results":[{
"id": "<connection_id>",
"name": "<app_name>",
"resource_name": "<resource_name>",
…
}],
…
}
Args:
app (str): Heroku application name.
Returns:
List[dict]: List of all Heroku Connect connections associated with the Heroku application.
Raises:
requests.HTTPError: If an error occurred when accessing the connections API.
ValueError: If response is not a valid JSON. | [
"Return",
"all",
"Heroku",
"Connect",
"connections",
"setup",
"with",
"the",
"given",
"application",
"."
]
| python | train | 30.583333 |
tomer8007/kik-bot-api-unofficial | kik_unofficial/client.py | https://github.com/tomer8007/kik-bot-api-unofficial/blob/2ae5216bc05e7099a41895382fc8e428a7a5c3ac/kik_unofficial/client.py#L455-L477 | def _on_new_data_received(self, data: bytes):
"""
Gets called whenever we get a whole new XML element from kik's servers.
:param data: The data received (bytes)
"""
if data == b' ':
# Happens every half hour. Disconnect after 10th time. Some kind of keep-alive? Let's send it back.
self.loop.call_soon_threadsafe(self.connection.send_raw_data, b' ')
return
xml_element = BeautifulSoup(data.decode(), features='xml')
xml_element = next(iter(xml_element)) if len(xml_element) > 0 else xml_element
# choose the handler based on the XML tag name
if xml_element.name == "k":
self._handle_received_k_element(xml_element)
if xml_element.name == "iq":
self._handle_received_iq_element(xml_element)
elif xml_element.name == "message":
self._handle_xmpp_message(xml_element)
elif xml_element.name == 'stc':
self.callback.on_captcha_received(login.CaptchaElement(xml_element)) | [
"def",
"_on_new_data_received",
"(",
"self",
",",
"data",
":",
"bytes",
")",
":",
"if",
"data",
"==",
"b' '",
":",
"# Happens every half hour. Disconnect after 10th time. Some kind of keep-alive? Let's send it back.",
"self",
".",
"loop",
".",
"call_soon_threadsafe",
"(",
"self",
".",
"connection",
".",
"send_raw_data",
",",
"b' '",
")",
"return",
"xml_element",
"=",
"BeautifulSoup",
"(",
"data",
".",
"decode",
"(",
")",
",",
"features",
"=",
"'xml'",
")",
"xml_element",
"=",
"next",
"(",
"iter",
"(",
"xml_element",
")",
")",
"if",
"len",
"(",
"xml_element",
")",
">",
"0",
"else",
"xml_element",
"# choose the handler based on the XML tag name",
"if",
"xml_element",
".",
"name",
"==",
"\"k\"",
":",
"self",
".",
"_handle_received_k_element",
"(",
"xml_element",
")",
"if",
"xml_element",
".",
"name",
"==",
"\"iq\"",
":",
"self",
".",
"_handle_received_iq_element",
"(",
"xml_element",
")",
"elif",
"xml_element",
".",
"name",
"==",
"\"message\"",
":",
"self",
".",
"_handle_xmpp_message",
"(",
"xml_element",
")",
"elif",
"xml_element",
".",
"name",
"==",
"'stc'",
":",
"self",
".",
"callback",
".",
"on_captcha_received",
"(",
"login",
".",
"CaptchaElement",
"(",
"xml_element",
")",
")"
]
| Gets called whenever we get a whole new XML element from kik's servers.
:param data: The data received (bytes) | [
"Gets",
"called",
"whenever",
"we",
"get",
"a",
"whole",
"new",
"XML",
"element",
"from",
"kik",
"s",
"servers",
".",
":",
"param",
"data",
":",
"The",
"data",
"received",
"(",
"bytes",
")"
]
| python | train | 44.608696 |
asweigart/pyautogui | pyautogui/__init__.py | https://github.com/asweigart/pyautogui/blob/77524bd47334a89024013fd48e05151c3ac9289a/pyautogui/__init__.py#L735-L775 | def dragTo(x=None, y=None, duration=0.0, tween=linear, button='left', pause=None, _pause=True, mouseDownUp=True):
"""Performs a mouse drag (mouse movement while a button is held down) to a
point on the screen.
The x and y parameters detail where the mouse event happens. If None, the
current mouse position is used. If a float value, it is rounded down. If
outside the boundaries of the screen, the event happens at edge of the
screen.
Args:
x (int, float, None, tuple, optional): How far left (for negative values) or
right (for positive values) to move the cursor. 0 by default. If tuple, this is used for x and y.
If x is a str, it's considered a filename of an image to find on
the screen with locateOnScreen() and click the center of.
y (int, float, None, optional): How far up (for negative values) or
down (for positive values) to move the cursor. 0 by default.
duration (float, optional): The amount of time it takes to move the mouse
cursor to the new xy coordinates. If 0, then the mouse cursor is moved
instantaneously. 0.0 by default.
tween (func, optional): The tweening function used if the duration is not
0. A linear tween is used by default. See the tweens.py file for
details.
button (str, int, optional): The mouse button clicked. Must be one of
'left', 'middle', 'right' (or 1, 2, or 3) respectively. 'left' by
default.
mouseDownUp (True, False): When true, the mouseUp/Down actions are not perfomed.
Which allows dragging over multiple (small) actions. 'True' by default.
Returns:
None
"""
_failSafeCheck()
x, y = _unpackXY(x, y)
if mouseDownUp:
mouseDown(button=button, _pause=False)
_mouseMoveDrag('drag', x, y, 0, 0, duration, tween, button)
if mouseDownUp:
mouseUp(button=button, _pause=False)
_autoPause(pause, _pause) | [
"def",
"dragTo",
"(",
"x",
"=",
"None",
",",
"y",
"=",
"None",
",",
"duration",
"=",
"0.0",
",",
"tween",
"=",
"linear",
",",
"button",
"=",
"'left'",
",",
"pause",
"=",
"None",
",",
"_pause",
"=",
"True",
",",
"mouseDownUp",
"=",
"True",
")",
":",
"_failSafeCheck",
"(",
")",
"x",
",",
"y",
"=",
"_unpackXY",
"(",
"x",
",",
"y",
")",
"if",
"mouseDownUp",
":",
"mouseDown",
"(",
"button",
"=",
"button",
",",
"_pause",
"=",
"False",
")",
"_mouseMoveDrag",
"(",
"'drag'",
",",
"x",
",",
"y",
",",
"0",
",",
"0",
",",
"duration",
",",
"tween",
",",
"button",
")",
"if",
"mouseDownUp",
":",
"mouseUp",
"(",
"button",
"=",
"button",
",",
"_pause",
"=",
"False",
")",
"_autoPause",
"(",
"pause",
",",
"_pause",
")"
]
| Performs a mouse drag (mouse movement while a button is held down) to a
point on the screen.
The x and y parameters detail where the mouse event happens. If None, the
current mouse position is used. If a float value, it is rounded down. If
outside the boundaries of the screen, the event happens at edge of the
screen.
Args:
x (int, float, None, tuple, optional): How far left (for negative values) or
right (for positive values) to move the cursor. 0 by default. If tuple, this is used for x and y.
If x is a str, it's considered a filename of an image to find on
the screen with locateOnScreen() and click the center of.
y (int, float, None, optional): How far up (for negative values) or
down (for positive values) to move the cursor. 0 by default.
duration (float, optional): The amount of time it takes to move the mouse
cursor to the new xy coordinates. If 0, then the mouse cursor is moved
instantaneously. 0.0 by default.
tween (func, optional): The tweening function used if the duration is not
0. A linear tween is used by default. See the tweens.py file for
details.
button (str, int, optional): The mouse button clicked. Must be one of
'left', 'middle', 'right' (or 1, 2, or 3) respectively. 'left' by
default.
mouseDownUp (True, False): When true, the mouseUp/Down actions are not perfomed.
Which allows dragging over multiple (small) actions. 'True' by default.
Returns:
None | [
"Performs",
"a",
"mouse",
"drag",
"(",
"mouse",
"movement",
"while",
"a",
"button",
"is",
"held",
"down",
")",
"to",
"a",
"point",
"on",
"the",
"screen",
"."
]
| python | train | 46.707317 |
log2timeline/dfdatetime | dfdatetime/filetime.py | https://github.com/log2timeline/dfdatetime/blob/141ca4ef1eff3d354b5deaac3d81cb08506f98d6/dfdatetime/filetime.py#L109-L127 | def CopyToDateTimeString(self):
"""Copies the FILETIME timestamp to a date and time string.
Returns:
str: date and time value formatted as: "YYYY-MM-DD hh:mm:ss.#######" or
None if the timestamp is missing or invalid.
"""
if (self._timestamp is None or self._timestamp < 0 or
self._timestamp > self._UINT64_MAX):
return None
timestamp, remainder = divmod(self._timestamp, self._100NS_PER_SECOND)
number_of_days, hours, minutes, seconds = self._GetTimeValues(timestamp)
year, month, day_of_month = self._GetDateValuesWithEpoch(
number_of_days, self._EPOCH)
return '{0:04d}-{1:02d}-{2:02d} {3:02d}:{4:02d}:{5:02d}.{6:07d}'.format(
year, month, day_of_month, hours, minutes, seconds, remainder) | [
"def",
"CopyToDateTimeString",
"(",
"self",
")",
":",
"if",
"(",
"self",
".",
"_timestamp",
"is",
"None",
"or",
"self",
".",
"_timestamp",
"<",
"0",
"or",
"self",
".",
"_timestamp",
">",
"self",
".",
"_UINT64_MAX",
")",
":",
"return",
"None",
"timestamp",
",",
"remainder",
"=",
"divmod",
"(",
"self",
".",
"_timestamp",
",",
"self",
".",
"_100NS_PER_SECOND",
")",
"number_of_days",
",",
"hours",
",",
"minutes",
",",
"seconds",
"=",
"self",
".",
"_GetTimeValues",
"(",
"timestamp",
")",
"year",
",",
"month",
",",
"day_of_month",
"=",
"self",
".",
"_GetDateValuesWithEpoch",
"(",
"number_of_days",
",",
"self",
".",
"_EPOCH",
")",
"return",
"'{0:04d}-{1:02d}-{2:02d} {3:02d}:{4:02d}:{5:02d}.{6:07d}'",
".",
"format",
"(",
"year",
",",
"month",
",",
"day_of_month",
",",
"hours",
",",
"minutes",
",",
"seconds",
",",
"remainder",
")"
]
| Copies the FILETIME timestamp to a date and time string.
Returns:
str: date and time value formatted as: "YYYY-MM-DD hh:mm:ss.#######" or
None if the timestamp is missing or invalid. | [
"Copies",
"the",
"FILETIME",
"timestamp",
"to",
"a",
"date",
"and",
"time",
"string",
"."
]
| python | train | 39.736842 |
zimeon/iiif | iiif/info.py | https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/info.py#L391-L402 | def add_service(self, service):
"""Add a service description.
Handles transition from self.service=None, self.service=dict for a
single service, and then self.service=[dict,dict,...] for multiple
"""
if (self.service is None):
self.service = service
elif (isinstance(self.service, dict)):
self.service = [self.service, service]
else:
self.service.append(service) | [
"def",
"add_service",
"(",
"self",
",",
"service",
")",
":",
"if",
"(",
"self",
".",
"service",
"is",
"None",
")",
":",
"self",
".",
"service",
"=",
"service",
"elif",
"(",
"isinstance",
"(",
"self",
".",
"service",
",",
"dict",
")",
")",
":",
"self",
".",
"service",
"=",
"[",
"self",
".",
"service",
",",
"service",
"]",
"else",
":",
"self",
".",
"service",
".",
"append",
"(",
"service",
")"
]
| Add a service description.
Handles transition from self.service=None, self.service=dict for a
single service, and then self.service=[dict,dict,...] for multiple | [
"Add",
"a",
"service",
"description",
"."
]
| python | train | 37 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.