repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
alimanfoo/csvvalidator | csvvalidator.py | https://github.com/alimanfoo/csvvalidator/blob/50a86eefdc549c48f65a91a5c0a66099010ee65d/csvvalidator.py#L958-L971 | def search_pattern(regex):
"""
Return a value check function which raises a ValueError if the supplied
regular expression does not match anywhere in the value, see also
`re.search`.
"""
prog = re.compile(regex)
def checker(v):
result = prog.search(v)
if result is None:
raise ValueError(v)
return checker | [
"def",
"search_pattern",
"(",
"regex",
")",
":",
"prog",
"=",
"re",
".",
"compile",
"(",
"regex",
")",
"def",
"checker",
"(",
"v",
")",
":",
"result",
"=",
"prog",
".",
"search",
"(",
"v",
")",
"if",
"result",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"v",
")",
"return",
"checker"
] | Return a value check function which raises a ValueError if the supplied
regular expression does not match anywhere in the value, see also
`re.search`. | [
"Return",
"a",
"value",
"check",
"function",
"which",
"raises",
"a",
"ValueError",
"if",
"the",
"supplied",
"regular",
"expression",
"does",
"not",
"match",
"anywhere",
"in",
"the",
"value",
"see",
"also",
"re",
".",
"search",
"."
] | python | valid |
ChrisCummins/labm8 | labtypes.py | https://github.com/ChrisCummins/labm8/blob/dd10d67a757aefb180cb508f86696f99440c94f5/labtypes.py#L85-L103 | def dict_values(src):
"""
Recursively get values in dict.
Unlike the builtin dict.values() function, this method will descend into
nested dicts, returning all nested values.
Arguments:
src (dict): Source dict.
Returns:
list: List of values.
"""
for v in src.values():
if isinstance(v, dict):
for v in dict_values(v):
yield v
else:
yield v | [
"def",
"dict_values",
"(",
"src",
")",
":",
"for",
"v",
"in",
"src",
".",
"values",
"(",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"dict",
")",
":",
"for",
"v",
"in",
"dict_values",
"(",
"v",
")",
":",
"yield",
"v",
"else",
":",
"yield",
"v"
] | Recursively get values in dict.
Unlike the builtin dict.values() function, this method will descend into
nested dicts, returning all nested values.
Arguments:
src (dict): Source dict.
Returns:
list: List of values. | [
"Recursively",
"get",
"values",
"in",
"dict",
"."
] | python | train |
coded-by-hand/mass | env/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/index.py | https://github.com/coded-by-hand/mass/blob/59005479efed3cd8598a8f0c66791a4482071899/env/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/index.py#L628-L635 | def get_requirement_from_url(url):
"""Get a requirement from the URL, if possible. This looks for #egg
in the URL"""
link = Link(url)
egg_info = link.egg_fragment
if not egg_info:
egg_info = splitext(link.filename)[0]
return package_to_requirement(egg_info) | [
"def",
"get_requirement_from_url",
"(",
"url",
")",
":",
"link",
"=",
"Link",
"(",
"url",
")",
"egg_info",
"=",
"link",
".",
"egg_fragment",
"if",
"not",
"egg_info",
":",
"egg_info",
"=",
"splitext",
"(",
"link",
".",
"filename",
")",
"[",
"0",
"]",
"return",
"package_to_requirement",
"(",
"egg_info",
")"
] | Get a requirement from the URL, if possible. This looks for #egg
in the URL | [
"Get",
"a",
"requirement",
"from",
"the",
"URL",
"if",
"possible",
".",
"This",
"looks",
"for",
"#egg",
"in",
"the",
"URL"
] | python | train |
PyCQA/astroid | astroid/scoped_nodes.py | https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/scoped_nodes.py#L2113-L2162 | def scope_lookup(self, node, name, offset=0):
"""Lookup where the given name is assigned.
:param node: The node to look for assignments up to.
Any assignments after the given node are ignored.
:type node: NodeNG
:param name: The name to find assignments for.
:type name: str
:param offset: The line offset to filter statements up to.
:type offset: int
:returns: This scope node and the list of assignments associated to the
given name according to the scope where it has been found (locals,
globals or builtin).
:rtype: tuple(str, list(NodeNG))
"""
# If the name looks like a builtin name, just try to look
# into the upper scope of this class. We might have a
# decorator that it's poorly named after a builtin object
# inside this class.
lookup_upper_frame = (
isinstance(node.parent, node_classes.Decorators)
and name in MANAGER.builtins_module
)
if (
any(node == base or base.parent_of(node) for base in self.bases)
or lookup_upper_frame
):
# Handle the case where we have either a name
# in the bases of a class, which exists before
# the actual definition or the case where we have
# a Getattr node, with that name.
#
# name = ...
# class A(name):
# def name(self): ...
#
# import name
# class A(name.Name):
# def name(self): ...
frame = self.parent.frame()
# line offset to avoid that class A(A) resolve the ancestor to
# the defined class
offset = -1
else:
frame = self
return frame._scope_lookup(node, name, offset) | [
"def",
"scope_lookup",
"(",
"self",
",",
"node",
",",
"name",
",",
"offset",
"=",
"0",
")",
":",
"# If the name looks like a builtin name, just try to look",
"# into the upper scope of this class. We might have a",
"# decorator that it's poorly named after a builtin object",
"# inside this class.",
"lookup_upper_frame",
"=",
"(",
"isinstance",
"(",
"node",
".",
"parent",
",",
"node_classes",
".",
"Decorators",
")",
"and",
"name",
"in",
"MANAGER",
".",
"builtins_module",
")",
"if",
"(",
"any",
"(",
"node",
"==",
"base",
"or",
"base",
".",
"parent_of",
"(",
"node",
")",
"for",
"base",
"in",
"self",
".",
"bases",
")",
"or",
"lookup_upper_frame",
")",
":",
"# Handle the case where we have either a name",
"# in the bases of a class, which exists before",
"# the actual definition or the case where we have",
"# a Getattr node, with that name.",
"#",
"# name = ...",
"# class A(name):",
"# def name(self): ...",
"#",
"# import name",
"# class A(name.Name):",
"# def name(self): ...",
"frame",
"=",
"self",
".",
"parent",
".",
"frame",
"(",
")",
"# line offset to avoid that class A(A) resolve the ancestor to",
"# the defined class",
"offset",
"=",
"-",
"1",
"else",
":",
"frame",
"=",
"self",
"return",
"frame",
".",
"_scope_lookup",
"(",
"node",
",",
"name",
",",
"offset",
")"
] | Lookup where the given name is assigned.
:param node: The node to look for assignments up to.
Any assignments after the given node are ignored.
:type node: NodeNG
:param name: The name to find assignments for.
:type name: str
:param offset: The line offset to filter statements up to.
:type offset: int
:returns: This scope node and the list of assignments associated to the
given name according to the scope where it has been found (locals,
globals or builtin).
:rtype: tuple(str, list(NodeNG)) | [
"Lookup",
"where",
"the",
"given",
"name",
"is",
"assigned",
"."
] | python | train |
samuelcolvin/pydantic | pydantic/validators.py | https://github.com/samuelcolvin/pydantic/blob/bff8a1789dfde2c38928cced6640887b53615aa3/pydantic/validators.py#L252-L263 | def ip_v6_network_validator(v: Any) -> IPv6Network:
"""
Assume IPv6Network initialised with a default ``strict`` argument
See more:
https://docs.python.org/library/ipaddress.html#ipaddress.IPv6Network
"""
if isinstance(v, IPv6Network):
return v
with change_exception(errors.IPv6NetworkError, ValueError):
return IPv6Network(v) | [
"def",
"ip_v6_network_validator",
"(",
"v",
":",
"Any",
")",
"->",
"IPv6Network",
":",
"if",
"isinstance",
"(",
"v",
",",
"IPv6Network",
")",
":",
"return",
"v",
"with",
"change_exception",
"(",
"errors",
".",
"IPv6NetworkError",
",",
"ValueError",
")",
":",
"return",
"IPv6Network",
"(",
"v",
")"
] | Assume IPv6Network initialised with a default ``strict`` argument
See more:
https://docs.python.org/library/ipaddress.html#ipaddress.IPv6Network | [
"Assume",
"IPv6Network",
"initialised",
"with",
"a",
"default",
"strict",
"argument"
] | python | train |
BerkeleyAutomation/autolab_core | autolab_core/data_stream_recorder.py | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/data_stream_recorder.py#L193-L202 | def _flush(self):
""" Returns a list of all current data """
if self._recording:
raise Exception("Cannot flush data queue while recording!")
if self._saving_cache:
logging.warn("Flush when using cache means unsaved data will be lost and not returned!")
self._cmds_q.put(("reset_data_segment",))
else:
data = self._extract_q(0)
return data | [
"def",
"_flush",
"(",
"self",
")",
":",
"if",
"self",
".",
"_recording",
":",
"raise",
"Exception",
"(",
"\"Cannot flush data queue while recording!\"",
")",
"if",
"self",
".",
"_saving_cache",
":",
"logging",
".",
"warn",
"(",
"\"Flush when using cache means unsaved data will be lost and not returned!\"",
")",
"self",
".",
"_cmds_q",
".",
"put",
"(",
"(",
"\"reset_data_segment\"",
",",
")",
")",
"else",
":",
"data",
"=",
"self",
".",
"_extract_q",
"(",
"0",
")",
"return",
"data"
] | Returns a list of all current data | [
"Returns",
"a",
"list",
"of",
"all",
"current",
"data"
] | python | train |
Peter-Slump/python-keycloak-client | src/keycloak/client.py | https://github.com/Peter-Slump/python-keycloak-client/blob/379ae58f3c65892327b0c98c06d4982aa83f357e/src/keycloak/client.py#L45-L56 | def session(self):
"""
Get session object to benefit from connection pooling.
http://docs.python-requests.org/en/master/user/advanced/#session-objects
:rtype: requests.Session
"""
if self._session is None:
self._session = requests.Session()
self._session.headers.update(self._headers)
return self._session | [
"def",
"session",
"(",
"self",
")",
":",
"if",
"self",
".",
"_session",
"is",
"None",
":",
"self",
".",
"_session",
"=",
"requests",
".",
"Session",
"(",
")",
"self",
".",
"_session",
".",
"headers",
".",
"update",
"(",
"self",
".",
"_headers",
")",
"return",
"self",
".",
"_session"
] | Get session object to benefit from connection pooling.
http://docs.python-requests.org/en/master/user/advanced/#session-objects
:rtype: requests.Session | [
"Get",
"session",
"object",
"to",
"benefit",
"from",
"connection",
"pooling",
"."
] | python | train |
senaite/senaite.core | bika/lims/browser/analyses/view.py | https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/analyses/view.py#L830-L865 | def _folder_item_instrument(self, analysis_brain, item):
"""Fills the analysis' instrument to the item passed in.
:param analysis_brain: Brain that represents an analysis
:param item: analysis' dictionary counterpart that represents a row
"""
item['Instrument'] = ''
if not analysis_brain.getInstrumentEntryOfResults:
# Manual entry of results, instrument is not allowed
item['Instrument'] = _('Manual')
item['replace']['Instrument'] = \
'<a href="#">{}</a>'.format(t(_('Manual')))
return
# Instrument can be assigned to this analysis
is_editable = self.is_analysis_edition_allowed(analysis_brain)
self.show_methodinstr_columns = True
instrument = self.get_instrument(analysis_brain)
if is_editable:
# Edition allowed
voc = self.get_instruments_vocabulary(analysis_brain)
if voc:
# The service has at least one instrument available
item['Instrument'] = instrument.UID() if instrument else ''
item['choices']['Instrument'] = voc
item['allow_edit'].append('Instrument')
return
if instrument:
# Edition not allowed
instrument_title = instrument and instrument.Title() or ''
instrument_link = get_link(instrument.absolute_url(),
instrument_title)
item['Instrument'] = instrument_title
item['replace']['Instrument'] = instrument_link
return | [
"def",
"_folder_item_instrument",
"(",
"self",
",",
"analysis_brain",
",",
"item",
")",
":",
"item",
"[",
"'Instrument'",
"]",
"=",
"''",
"if",
"not",
"analysis_brain",
".",
"getInstrumentEntryOfResults",
":",
"# Manual entry of results, instrument is not allowed",
"item",
"[",
"'Instrument'",
"]",
"=",
"_",
"(",
"'Manual'",
")",
"item",
"[",
"'replace'",
"]",
"[",
"'Instrument'",
"]",
"=",
"'<a href=\"#\">{}</a>'",
".",
"format",
"(",
"t",
"(",
"_",
"(",
"'Manual'",
")",
")",
")",
"return",
"# Instrument can be assigned to this analysis",
"is_editable",
"=",
"self",
".",
"is_analysis_edition_allowed",
"(",
"analysis_brain",
")",
"self",
".",
"show_methodinstr_columns",
"=",
"True",
"instrument",
"=",
"self",
".",
"get_instrument",
"(",
"analysis_brain",
")",
"if",
"is_editable",
":",
"# Edition allowed",
"voc",
"=",
"self",
".",
"get_instruments_vocabulary",
"(",
"analysis_brain",
")",
"if",
"voc",
":",
"# The service has at least one instrument available",
"item",
"[",
"'Instrument'",
"]",
"=",
"instrument",
".",
"UID",
"(",
")",
"if",
"instrument",
"else",
"''",
"item",
"[",
"'choices'",
"]",
"[",
"'Instrument'",
"]",
"=",
"voc",
"item",
"[",
"'allow_edit'",
"]",
".",
"append",
"(",
"'Instrument'",
")",
"return",
"if",
"instrument",
":",
"# Edition not allowed",
"instrument_title",
"=",
"instrument",
"and",
"instrument",
".",
"Title",
"(",
")",
"or",
"''",
"instrument_link",
"=",
"get_link",
"(",
"instrument",
".",
"absolute_url",
"(",
")",
",",
"instrument_title",
")",
"item",
"[",
"'Instrument'",
"]",
"=",
"instrument_title",
"item",
"[",
"'replace'",
"]",
"[",
"'Instrument'",
"]",
"=",
"instrument_link",
"return"
] | Fills the analysis' instrument to the item passed in.
:param analysis_brain: Brain that represents an analysis
:param item: analysis' dictionary counterpart that represents a row | [
"Fills",
"the",
"analysis",
"instrument",
"to",
"the",
"item",
"passed",
"in",
"."
] | python | train |
NaPs/Kolekto | kolekto/commands/importer.py | https://github.com/NaPs/Kolekto/blob/29c5469da8782780a06bf9a76c59414bb6fd8fe3/kolekto/commands/importer.py#L88-L98 | def list_attachments(fullname):
""" List attachment for the specified fullname.
"""
parent, filename = os.path.split(fullname)
filename_without_ext, ext = os.path.splitext(filename)
attachments = []
for found_filename in os.listdir(parent):
found_filename_without_ext, _ = os.path.splitext(found_filename)
if filename_without_ext == found_filename_without_ext and found_filename != filename:
attachments.append(os.path.join(parent, found_filename))
return attachments | [
"def",
"list_attachments",
"(",
"fullname",
")",
":",
"parent",
",",
"filename",
"=",
"os",
".",
"path",
".",
"split",
"(",
"fullname",
")",
"filename_without_ext",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"attachments",
"=",
"[",
"]",
"for",
"found_filename",
"in",
"os",
".",
"listdir",
"(",
"parent",
")",
":",
"found_filename_without_ext",
",",
"_",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"found_filename",
")",
"if",
"filename_without_ext",
"==",
"found_filename_without_ext",
"and",
"found_filename",
"!=",
"filename",
":",
"attachments",
".",
"append",
"(",
"os",
".",
"path",
".",
"join",
"(",
"parent",
",",
"found_filename",
")",
")",
"return",
"attachments"
] | List attachment for the specified fullname. | [
"List",
"attachment",
"for",
"the",
"specified",
"fullname",
"."
] | python | train |
google/grr | grr/server/grr_response_server/databases/mem_hunts.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mem_hunts.py#L114-L119 | def ReadHuntObject(self, hunt_id):
"""Reads a hunt object from the database."""
try:
return self._DeepCopy(self.hunts[hunt_id])
except KeyError:
raise db.UnknownHuntError(hunt_id) | [
"def",
"ReadHuntObject",
"(",
"self",
",",
"hunt_id",
")",
":",
"try",
":",
"return",
"self",
".",
"_DeepCopy",
"(",
"self",
".",
"hunts",
"[",
"hunt_id",
"]",
")",
"except",
"KeyError",
":",
"raise",
"db",
".",
"UnknownHuntError",
"(",
"hunt_id",
")"
] | Reads a hunt object from the database. | [
"Reads",
"a",
"hunt",
"object",
"from",
"the",
"database",
"."
] | python | train |
carpedm20/ndrive | ndrive/models.py | https://github.com/carpedm20/ndrive/blob/ac58eaf8a8d46292ad752bb38047f65838b8ad2b/ndrive/models.py#L426-L519 | def getList(self, dummy = 56184, orgresource = '/', type = 1, dept = 0, sort = 'name', order = 'asc', startnum = 0, pagingrow = 1000):
"""GetList
Args:
dummy: ???
orgresource: Directory path to get the file list
ex) /Picture/
type: 1 => only directories with idxfolder property
2 => only files
3 => directories and files with thumbnail info
ex) viewHeight, viewWidth for Image file
4 => only directories except idxfolder
5 => directories and files without thumbnail info
depth: Dept for file list
sort: name => ์ด๋ฆ
file => file type, ์ข
๋ฅ
length => size of file, ํฌ๊ธฐ
date => edited date, ์์ ํ ๋ ์ง
credate => creation date, ์ฌ๋ฆฐ ๋ ์ง
protect => protect or not, ์ค์ ํ์
order: Order by (asc, desc)
startnum: ???
pagingrow: start index ?
Returns:
FileInfo list: List of files for a path
False: Failed to get list
"""
url = nurls['getList']
data = {'userid': self.user_id,
'useridx': self.useridx,
'dummy': dummy,
'orgresource': orgresource,
'type': type,
'dept': dept,
'sort': sort,
'order': order,
'startnum': startnum,
'pagingrow': pagingrow,
}
r = self.session.post(url = url, data = data)
try:
j = json.loads(r.text)
except:
print '[*] Success checkUpload: 0 result'
return []
if j['message'] != 'success':
print '[*] Error checkUpload: ' + j['message']
return False
else:
files = []
for i in j['resultvalue']:
f = FileInfo()
f.protect = i['protect']
f.resourceno = i['resourceno']
f.copyright = i['copyright']
f.subfoldercnt = i['subfoldercnt']
f.resourcetype = i['resourcetype']
f.fileuploadstatus = i['fileuploadstatus']
f.prority = i['priority']
f.filelink = i['filelink']
f.href = i['href']
f.thumbnailpath = i['thumbnailpath']
f.sharedinfo = i['sharedinfo']
f.getlastmodified = i['getlastmodified']
f.shareno = i['shareno']
f.lastmodifieduser = i['lastmodifieduser']
f.getcontentlength = i['getcontentlength']
f.lastaccessed = i['lastaccessed']
f.virusstatus = i['virusstatus']
f.idxfolder = i['idxfolder']
f.creationdate = i['creationdate']
f.nocache = i['nocache']
f.viewWidth = i['viewWidth']
f.viewHeight = i['viewHeight']
f.setJson(j['resultvalue'])
files.append(f)
return files | [
"def",
"getList",
"(",
"self",
",",
"dummy",
"=",
"56184",
",",
"orgresource",
"=",
"'/'",
",",
"type",
"=",
"1",
",",
"dept",
"=",
"0",
",",
"sort",
"=",
"'name'",
",",
"order",
"=",
"'asc'",
",",
"startnum",
"=",
"0",
",",
"pagingrow",
"=",
"1000",
")",
":",
"url",
"=",
"nurls",
"[",
"'getList'",
"]",
"data",
"=",
"{",
"'userid'",
":",
"self",
".",
"user_id",
",",
"'useridx'",
":",
"self",
".",
"useridx",
",",
"'dummy'",
":",
"dummy",
",",
"'orgresource'",
":",
"orgresource",
",",
"'type'",
":",
"type",
",",
"'dept'",
":",
"dept",
",",
"'sort'",
":",
"sort",
",",
"'order'",
":",
"order",
",",
"'startnum'",
":",
"startnum",
",",
"'pagingrow'",
":",
"pagingrow",
",",
"}",
"r",
"=",
"self",
".",
"session",
".",
"post",
"(",
"url",
"=",
"url",
",",
"data",
"=",
"data",
")",
"try",
":",
"j",
"=",
"json",
".",
"loads",
"(",
"r",
".",
"text",
")",
"except",
":",
"print",
"'[*] Success checkUpload: 0 result'",
"return",
"[",
"]",
"if",
"j",
"[",
"'message'",
"]",
"!=",
"'success'",
":",
"print",
"'[*] Error checkUpload: '",
"+",
"j",
"[",
"'message'",
"]",
"return",
"False",
"else",
":",
"files",
"=",
"[",
"]",
"for",
"i",
"in",
"j",
"[",
"'resultvalue'",
"]",
":",
"f",
"=",
"FileInfo",
"(",
")",
"f",
".",
"protect",
"=",
"i",
"[",
"'protect'",
"]",
"f",
".",
"resourceno",
"=",
"i",
"[",
"'resourceno'",
"]",
"f",
".",
"copyright",
"=",
"i",
"[",
"'copyright'",
"]",
"f",
".",
"subfoldercnt",
"=",
"i",
"[",
"'subfoldercnt'",
"]",
"f",
".",
"resourcetype",
"=",
"i",
"[",
"'resourcetype'",
"]",
"f",
".",
"fileuploadstatus",
"=",
"i",
"[",
"'fileuploadstatus'",
"]",
"f",
".",
"prority",
"=",
"i",
"[",
"'priority'",
"]",
"f",
".",
"filelink",
"=",
"i",
"[",
"'filelink'",
"]",
"f",
".",
"href",
"=",
"i",
"[",
"'href'",
"]",
"f",
".",
"thumbnailpath",
"=",
"i",
"[",
"'thumbnailpath'",
"]",
"f",
".",
"sharedinfo",
"=",
"i",
"[",
"'sharedinfo'",
"]",
"f",
".",
"getlastmodified",
"=",
"i",
"[",
"'getlastmodified'",
"]",
"f",
".",
"shareno",
"=",
"i",
"[",
"'shareno'",
"]",
"f",
".",
"lastmodifieduser",
"=",
"i",
"[",
"'lastmodifieduser'",
"]",
"f",
".",
"getcontentlength",
"=",
"i",
"[",
"'getcontentlength'",
"]",
"f",
".",
"lastaccessed",
"=",
"i",
"[",
"'lastaccessed'",
"]",
"f",
".",
"virusstatus",
"=",
"i",
"[",
"'virusstatus'",
"]",
"f",
".",
"idxfolder",
"=",
"i",
"[",
"'idxfolder'",
"]",
"f",
".",
"creationdate",
"=",
"i",
"[",
"'creationdate'",
"]",
"f",
".",
"nocache",
"=",
"i",
"[",
"'nocache'",
"]",
"f",
".",
"viewWidth",
"=",
"i",
"[",
"'viewWidth'",
"]",
"f",
".",
"viewHeight",
"=",
"i",
"[",
"'viewHeight'",
"]",
"f",
".",
"setJson",
"(",
"j",
"[",
"'resultvalue'",
"]",
")",
"files",
".",
"append",
"(",
"f",
")",
"return",
"files"
] | GetList
Args:
dummy: ???
orgresource: Directory path to get the file list
ex) /Picture/
type: 1 => only directories with idxfolder property
2 => only files
3 => directories and files with thumbnail info
ex) viewHeight, viewWidth for Image file
4 => only directories except idxfolder
5 => directories and files without thumbnail info
depth: Dept for file list
sort: name => ์ด๋ฆ
file => file type, ์ข
๋ฅ
length => size of file, ํฌ๊ธฐ
date => edited date, ์์ ํ ๋ ์ง
credate => creation date, ์ฌ๋ฆฐ ๋ ์ง
protect => protect or not, ์ค์ ํ์
order: Order by (asc, desc)
startnum: ???
pagingrow: start index ?
Returns:
FileInfo list: List of files for a path
False: Failed to get list | [
"GetList"
] | python | train |
locationlabs/mockredis | mockredis/client.py | https://github.com/locationlabs/mockredis/blob/fd4e3117066ff0c24e86ebca007853a8092e3254/mockredis/client.py#L1019-L1026 | def sismember(self, name, value):
"""Emulate sismember."""
redis_set = self._get_set(name, 'SISMEMBER')
if not redis_set:
return 0
result = self._encode(value) in redis_set
return 1 if result else 0 | [
"def",
"sismember",
"(",
"self",
",",
"name",
",",
"value",
")",
":",
"redis_set",
"=",
"self",
".",
"_get_set",
"(",
"name",
",",
"'SISMEMBER'",
")",
"if",
"not",
"redis_set",
":",
"return",
"0",
"result",
"=",
"self",
".",
"_encode",
"(",
"value",
")",
"in",
"redis_set",
"return",
"1",
"if",
"result",
"else",
"0"
] | Emulate sismember. | [
"Emulate",
"sismember",
"."
] | python | train |
crackinglandia/pype32 | pype32/utils.py | https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/utils.py#L266-L282 | def readAlignedString(self, align = 4):
"""
Reads an ASCII string aligned to the next align-bytes boundary.
@type align: int
@param align: (Optional) The value we want the ASCII string to be aligned.
@rtype: str
@return: A 4-bytes aligned (default) ASCII string.
"""
s = self.readString()
r = align - len(s) % align
while r:
s += self.data[self.offset]
self.offset += 1
r -= 1
return s.rstrip("\x00") | [
"def",
"readAlignedString",
"(",
"self",
",",
"align",
"=",
"4",
")",
":",
"s",
"=",
"self",
".",
"readString",
"(",
")",
"r",
"=",
"align",
"-",
"len",
"(",
"s",
")",
"%",
"align",
"while",
"r",
":",
"s",
"+=",
"self",
".",
"data",
"[",
"self",
".",
"offset",
"]",
"self",
".",
"offset",
"+=",
"1",
"r",
"-=",
"1",
"return",
"s",
".",
"rstrip",
"(",
"\"\\x00\"",
")"
] | Reads an ASCII string aligned to the next align-bytes boundary.
@type align: int
@param align: (Optional) The value we want the ASCII string to be aligned.
@rtype: str
@return: A 4-bytes aligned (default) ASCII string. | [
"Reads",
"an",
"ASCII",
"string",
"aligned",
"to",
"the",
"next",
"align",
"-",
"bytes",
"boundary",
"."
] | python | train |
google/grr | grr/server/grr_response_server/artifact_registry.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/artifact_registry.py#L68-L80 | def AddDatastore(self, urn):
"""Adds a datastore URN as a source.
Args:
urn: an RDF URN value of the datastore.
Returns:
True if the datastore is not an already existing source.
"""
if urn not in self._datastores:
self._datastores.add(urn)
return True
return False | [
"def",
"AddDatastore",
"(",
"self",
",",
"urn",
")",
":",
"if",
"urn",
"not",
"in",
"self",
".",
"_datastores",
":",
"self",
".",
"_datastores",
".",
"add",
"(",
"urn",
")",
"return",
"True",
"return",
"False"
] | Adds a datastore URN as a source.
Args:
urn: an RDF URN value of the datastore.
Returns:
True if the datastore is not an already existing source. | [
"Adds",
"a",
"datastore",
"URN",
"as",
"a",
"source",
"."
] | python | train |
arne-cl/discoursegraphs | src/discoursegraphs/relabel.py | https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/relabel.py#L25-L93 | def relabel_nodes(G, mapping, copy=True):
"""Relabel the nodes of the graph G.
Parameters
----------
G : graph
A NetworkX graph
mapping : dictionary
A dictionary with the old labels as keys and new labels as values.
A partial mapping is allowed.
copy : bool (optional, default=True)
If True return a copy, or if False relabel the nodes in place.
Examples
--------
>>> G=nx.path_graph(3) # nodes 0-1-2
>>> mapping={0:'a',1:'b',2:'c'}
>>> H=nx.relabel_nodes(G,mapping)
>>> print(sorted(H.nodes()))
['a', 'b', 'c']
>>> G=nx.path_graph(26) # nodes 0..25
>>> mapping=dict(zip(G.nodes(),"abcdefghijklmnopqrstuvwxyz"))
>>> H=nx.relabel_nodes(G,mapping) # nodes a..z
>>> mapping=dict(zip(G.nodes(),range(1,27)))
>>> G1=nx.relabel_nodes(G,mapping) # nodes 1..26
Partial in-place mapping:
>>> G=nx.path_graph(3) # nodes 0-1-2
>>> mapping={0:'a',1:'b'} # 0->'a' and 1->'b'
>>> G=nx.relabel_nodes(G,mapping, copy=False)
print(G.nodes())
[2, 'b', 'a']
Mapping as function:
>>> G=nx.path_graph(3)
>>> def mapping(x):
... return x**2
>>> H=nx.relabel_nodes(G,mapping)
>>> print(H.nodes())
[0, 1, 4]
Notes
-----
Only the nodes specified in the mapping will be relabeled.
The keyword setting copy=False modifies the graph in place.
This is not always possible if the mapping is circular.
In that case use copy=True.
See Also
--------
convert_node_labels_to_integers
"""
# you can pass a function f(old_label)->new_label
# but we'll just make a dictionary here regardless
if not hasattr(mapping, "__getitem__"):
m = dict((n, mapping(n)) for n in G)
else:
m = mapping
if copy:
return _relabel_copy(G, m)
else:
return _relabel_inplace(G, m) | [
"def",
"relabel_nodes",
"(",
"G",
",",
"mapping",
",",
"copy",
"=",
"True",
")",
":",
"# you can pass a function f(old_label)->new_label",
"# but we'll just make a dictionary here regardless",
"if",
"not",
"hasattr",
"(",
"mapping",
",",
"\"__getitem__\"",
")",
":",
"m",
"=",
"dict",
"(",
"(",
"n",
",",
"mapping",
"(",
"n",
")",
")",
"for",
"n",
"in",
"G",
")",
"else",
":",
"m",
"=",
"mapping",
"if",
"copy",
":",
"return",
"_relabel_copy",
"(",
"G",
",",
"m",
")",
"else",
":",
"return",
"_relabel_inplace",
"(",
"G",
",",
"m",
")"
] | Relabel the nodes of the graph G.
Parameters
----------
G : graph
A NetworkX graph
mapping : dictionary
A dictionary with the old labels as keys and new labels as values.
A partial mapping is allowed.
copy : bool (optional, default=True)
If True return a copy, or if False relabel the nodes in place.
Examples
--------
>>> G=nx.path_graph(3) # nodes 0-1-2
>>> mapping={0:'a',1:'b',2:'c'}
>>> H=nx.relabel_nodes(G,mapping)
>>> print(sorted(H.nodes()))
['a', 'b', 'c']
>>> G=nx.path_graph(26) # nodes 0..25
>>> mapping=dict(zip(G.nodes(),"abcdefghijklmnopqrstuvwxyz"))
>>> H=nx.relabel_nodes(G,mapping) # nodes a..z
>>> mapping=dict(zip(G.nodes(),range(1,27)))
>>> G1=nx.relabel_nodes(G,mapping) # nodes 1..26
Partial in-place mapping:
>>> G=nx.path_graph(3) # nodes 0-1-2
>>> mapping={0:'a',1:'b'} # 0->'a' and 1->'b'
>>> G=nx.relabel_nodes(G,mapping, copy=False)
print(G.nodes())
[2, 'b', 'a']
Mapping as function:
>>> G=nx.path_graph(3)
>>> def mapping(x):
... return x**2
>>> H=nx.relabel_nodes(G,mapping)
>>> print(H.nodes())
[0, 1, 4]
Notes
-----
Only the nodes specified in the mapping will be relabeled.
The keyword setting copy=False modifies the graph in place.
This is not always possible if the mapping is circular.
In that case use copy=True.
See Also
--------
convert_node_labels_to_integers | [
"Relabel",
"the",
"nodes",
"of",
"the",
"graph",
"G",
"."
] | python | train |
Azure/azure-uamqp-python | uamqp/connection.py | https://github.com/Azure/azure-uamqp-python/blob/b67e4fcaf2e8a337636947523570239c10a58ae2/uamqp/connection.py#L158-L188 | def _state_changed(self, previous_state, new_state):
"""Callback called whenever the underlying Connection undergoes
a change of state. This function wraps the states as Enums for logging
purposes.
:param previous_state: The previous Connection state.
:type previous_state: int
:param new_state: The new Connection state.
:type new_state: int
"""
try:
try:
_previous_state = c_uamqp.ConnectionState(previous_state)
except ValueError:
_previous_state = c_uamqp.ConnectionState.UNKNOWN
try:
_new_state = c_uamqp.ConnectionState(new_state)
except ValueError:
_new_state = c_uamqp.ConnectionState.UNKNOWN
self._state = _new_state
_logger.info("Connection %r state changed from %r to %r", self.container_id, _previous_state, _new_state)
if _new_state == c_uamqp.ConnectionState.END and _previous_state != c_uamqp.ConnectionState.CLOSE_RCVD:
if not self._closing and not self._error:
_logger.info("Connection with ID %r unexpectedly in an error state. Closing: %r, Error: %r",
self.container_id, self._closing, self._error)
condition = b"amqp:unknown-error"
description = b"Connection in an unexpected error state."
self._error = errors._process_connection_error(self.error_policy, condition, description, None) # pylint: disable=protected-access
except KeyboardInterrupt:
_logger.error("Received shutdown signal while updating connection state from {} to {}".format(
previous_state, new_state))
self._error = errors.AMQPClientShutdown() | [
"def",
"_state_changed",
"(",
"self",
",",
"previous_state",
",",
"new_state",
")",
":",
"try",
":",
"try",
":",
"_previous_state",
"=",
"c_uamqp",
".",
"ConnectionState",
"(",
"previous_state",
")",
"except",
"ValueError",
":",
"_previous_state",
"=",
"c_uamqp",
".",
"ConnectionState",
".",
"UNKNOWN",
"try",
":",
"_new_state",
"=",
"c_uamqp",
".",
"ConnectionState",
"(",
"new_state",
")",
"except",
"ValueError",
":",
"_new_state",
"=",
"c_uamqp",
".",
"ConnectionState",
".",
"UNKNOWN",
"self",
".",
"_state",
"=",
"_new_state",
"_logger",
".",
"info",
"(",
"\"Connection %r state changed from %r to %r\"",
",",
"self",
".",
"container_id",
",",
"_previous_state",
",",
"_new_state",
")",
"if",
"_new_state",
"==",
"c_uamqp",
".",
"ConnectionState",
".",
"END",
"and",
"_previous_state",
"!=",
"c_uamqp",
".",
"ConnectionState",
".",
"CLOSE_RCVD",
":",
"if",
"not",
"self",
".",
"_closing",
"and",
"not",
"self",
".",
"_error",
":",
"_logger",
".",
"info",
"(",
"\"Connection with ID %r unexpectedly in an error state. Closing: %r, Error: %r\"",
",",
"self",
".",
"container_id",
",",
"self",
".",
"_closing",
",",
"self",
".",
"_error",
")",
"condition",
"=",
"b\"amqp:unknown-error\"",
"description",
"=",
"b\"Connection in an unexpected error state.\"",
"self",
".",
"_error",
"=",
"errors",
".",
"_process_connection_error",
"(",
"self",
".",
"error_policy",
",",
"condition",
",",
"description",
",",
"None",
")",
"# pylint: disable=protected-access",
"except",
"KeyboardInterrupt",
":",
"_logger",
".",
"error",
"(",
"\"Received shutdown signal while updating connection state from {} to {}\"",
".",
"format",
"(",
"previous_state",
",",
"new_state",
")",
")",
"self",
".",
"_error",
"=",
"errors",
".",
"AMQPClientShutdown",
"(",
")"
] | Callback called whenever the underlying Connection undergoes
a change of state. This function wraps the states as Enums for logging
purposes.
:param previous_state: The previous Connection state.
:type previous_state: int
:param new_state: The new Connection state.
:type new_state: int | [
"Callback",
"called",
"whenever",
"the",
"underlying",
"Connection",
"undergoes",
"a",
"change",
"of",
"state",
".",
"This",
"function",
"wraps",
"the",
"states",
"as",
"Enums",
"for",
"logging",
"purposes",
".",
":",
"param",
"previous_state",
":",
"The",
"previous",
"Connection",
"state",
".",
":",
"type",
"previous_state",
":",
"int",
":",
"param",
"new_state",
":",
"The",
"new",
"Connection",
"state",
".",
":",
"type",
"new_state",
":",
"int"
] | python | train |
xtuml/pyxtuml | bridgepoint/oal.py | https://github.com/xtuml/pyxtuml/blob/7dd9343b9a0191d1db1887ab9288d0a026608d9a/bridgepoint/oal.py#L1254-L1258 | def p_port_invocation_assignment_statement(self, p):
'''statement : SEND variable_access EQUAL implicit_invocation'''
p[4].__class__ = PortInvocationNode
p[0] = AssignmentNode(variable_access=p[2],
expression=p[4]) | [
"def",
"p_port_invocation_assignment_statement",
"(",
"self",
",",
"p",
")",
":",
"p",
"[",
"4",
"]",
".",
"__class__",
"=",
"PortInvocationNode",
"p",
"[",
"0",
"]",
"=",
"AssignmentNode",
"(",
"variable_access",
"=",
"p",
"[",
"2",
"]",
",",
"expression",
"=",
"p",
"[",
"4",
"]",
")"
] | statement : SEND variable_access EQUAL implicit_invocation | [
"statement",
":",
"SEND",
"variable_access",
"EQUAL",
"implicit_invocation"
] | python | test |
chaoss/grimoirelab-perceval | perceval/backends/core/jira.py | https://github.com/chaoss/grimoirelab-perceval/blob/41c908605e88b7ebc3a536c643fa0f212eaf9e0e/perceval/backends/core/jira.py#L221-L234 | def parse_issues(raw_page):
"""Parse a JIRA API raw response.
The method parses the API response retrieving the
issues from the received items
:param items: items from where to parse the issues
:returns: a generator of issues
"""
raw_issues = json.loads(raw_page)
issues = raw_issues['issues']
for issue in issues:
yield issue | [
"def",
"parse_issues",
"(",
"raw_page",
")",
":",
"raw_issues",
"=",
"json",
".",
"loads",
"(",
"raw_page",
")",
"issues",
"=",
"raw_issues",
"[",
"'issues'",
"]",
"for",
"issue",
"in",
"issues",
":",
"yield",
"issue"
] | Parse a JIRA API raw response.
The method parses the API response retrieving the
issues from the received items
:param items: items from where to parse the issues
:returns: a generator of issues | [
"Parse",
"a",
"JIRA",
"API",
"raw",
"response",
"."
] | python | test |
kkujawinski/git-pre-push-hook | src/git_pre_push_hook/engine.py | https://github.com/kkujawinski/git-pre-push-hook/blob/b62f4199150de2d6ec3f6f383ad69b0dddf9948d/src/git_pre_push_hook/engine.py#L103-L136 | def get_user_modified_lines(self):
"""
Output: {file_path: [(line_a_start, line_a_end), (line_b_start, line_b_end)]}
Lines ranges are sorted and not overlapping
"""
# I assume that git diff:
# - doesn't mix diffs from different files,
# - diffs are not overlapping
# - diffs are sorted based on line numbers
output = {}
FILE_NAME_RE = r'^\+\+\+ (.+)$'
CHANGED_LINES_RE = r'^@@ -[0-9,]+ \+([0-9]+)(?:,([0-9]+))? @@'
current_file_name = None
for line in self.git_wrapper.get_min_diff(self.remote_sha1, self.local_sha1).split('\n'):
file_name_match = re.match(FILE_NAME_RE, line)
if file_name_match:
current_file_name, = file_name_match.groups()
output[current_file_name] = []
continue
line_number_match = re.match(CHANGED_LINES_RE, line)
if line_number_match:
assert current_file_name
if current_file_name == '/dev/null':
continue
line_start, diff_len = line_number_match.groups()
line_start, diff_len = int(line_start), int(diff_len or 0)
output[current_file_name].append(LinesRange(line_start, line_start + diff_len))
continue
return output | [
"def",
"get_user_modified_lines",
"(",
"self",
")",
":",
"# I assume that git diff:",
"# - doesn't mix diffs from different files,",
"# - diffs are not overlapping",
"# - diffs are sorted based on line numbers",
"output",
"=",
"{",
"}",
"FILE_NAME_RE",
"=",
"r'^\\+\\+\\+ (.+)$'",
"CHANGED_LINES_RE",
"=",
"r'^@@ -[0-9,]+ \\+([0-9]+)(?:,([0-9]+))? @@'",
"current_file_name",
"=",
"None",
"for",
"line",
"in",
"self",
".",
"git_wrapper",
".",
"get_min_diff",
"(",
"self",
".",
"remote_sha1",
",",
"self",
".",
"local_sha1",
")",
".",
"split",
"(",
"'\\n'",
")",
":",
"file_name_match",
"=",
"re",
".",
"match",
"(",
"FILE_NAME_RE",
",",
"line",
")",
"if",
"file_name_match",
":",
"current_file_name",
",",
"=",
"file_name_match",
".",
"groups",
"(",
")",
"output",
"[",
"current_file_name",
"]",
"=",
"[",
"]",
"continue",
"line_number_match",
"=",
"re",
".",
"match",
"(",
"CHANGED_LINES_RE",
",",
"line",
")",
"if",
"line_number_match",
":",
"assert",
"current_file_name",
"if",
"current_file_name",
"==",
"'/dev/null'",
":",
"continue",
"line_start",
",",
"diff_len",
"=",
"line_number_match",
".",
"groups",
"(",
")",
"line_start",
",",
"diff_len",
"=",
"int",
"(",
"line_start",
")",
",",
"int",
"(",
"diff_len",
"or",
"0",
")",
"output",
"[",
"current_file_name",
"]",
".",
"append",
"(",
"LinesRange",
"(",
"line_start",
",",
"line_start",
"+",
"diff_len",
")",
")",
"continue",
"return",
"output"
] | Output: {file_path: [(line_a_start, line_a_end), (line_b_start, line_b_end)]}
Lines ranges are sorted and not overlapping | [
"Output",
":",
"{",
"file_path",
":",
"[",
"(",
"line_a_start",
"line_a_end",
")",
"(",
"line_b_start",
"line_b_end",
")",
"]",
"}"
] | python | train |
Qiskit/qiskit-terra | qiskit/transpiler/passmanager.py | https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/transpiler/passmanager.py#L237-L247 | def remove_flow_controller(cls, name):
"""
Removes a flow controller.
Args:
name (string): Name of the controller to remove.
Raises:
KeyError: If the controller to remove was not registered.
"""
if name not in cls.registered_controllers:
raise KeyError("Flow controller not found: %s" % name)
del cls.registered_controllers[name] | [
"def",
"remove_flow_controller",
"(",
"cls",
",",
"name",
")",
":",
"if",
"name",
"not",
"in",
"cls",
".",
"registered_controllers",
":",
"raise",
"KeyError",
"(",
"\"Flow controller not found: %s\"",
"%",
"name",
")",
"del",
"cls",
".",
"registered_controllers",
"[",
"name",
"]"
] | Removes a flow controller.
Args:
name (string): Name of the controller to remove.
Raises:
KeyError: If the controller to remove was not registered. | [
"Removes",
"a",
"flow",
"controller",
".",
"Args",
":",
"name",
"(",
"string",
")",
":",
"Name",
"of",
"the",
"controller",
"to",
"remove",
".",
"Raises",
":",
"KeyError",
":",
"If",
"the",
"controller",
"to",
"remove",
"was",
"not",
"registered",
"."
] | python | test |
Microsoft/azure-devops-python-api | azure-devops/azure/devops/v5_0/work/work_client.py | https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/work/work_client.py#L165-L201 | def get_board_mapping_parent_items(self, team_context, child_backlog_context_category_ref_name, workitem_ids):
"""GetBoardMappingParentItems.
[Preview API] Returns the list of parent field filter model for the given list of workitem ids
:param :class:`<TeamContext> <azure.devops.v5_0.work.models.TeamContext>` team_context: The team context for the operation
:param str child_backlog_context_category_ref_name:
:param [int] workitem_ids:
:rtype: [ParentChildWIMap]
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
query_parameters = {}
if child_backlog_context_category_ref_name is not None:
query_parameters['childBacklogContextCategoryRefName'] = self._serialize.query('child_backlog_context_category_ref_name', child_backlog_context_category_ref_name, 'str')
if workitem_ids is not None:
workitem_ids = ",".join(map(str, workitem_ids))
query_parameters['workitemIds'] = self._serialize.query('workitem_ids', workitem_ids, 'str')
response = self._send(http_method='GET',
location_id='186abea3-5c35-432f-9e28-7a15b4312a0e',
version='5.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[ParentChildWIMap]', self._unwrap_collection(response)) | [
"def",
"get_board_mapping_parent_items",
"(",
"self",
",",
"team_context",
",",
"child_backlog_context_category_ref_name",
",",
"workitem_ids",
")",
":",
"project",
"=",
"None",
"team",
"=",
"None",
"if",
"team_context",
"is",
"not",
"None",
":",
"if",
"team_context",
".",
"project_id",
":",
"project",
"=",
"team_context",
".",
"project_id",
"else",
":",
"project",
"=",
"team_context",
".",
"project",
"if",
"team_context",
".",
"team_id",
":",
"team",
"=",
"team_context",
".",
"team_id",
"else",
":",
"team",
"=",
"team_context",
".",
"team",
"route_values",
"=",
"{",
"}",
"if",
"project",
"is",
"not",
"None",
":",
"route_values",
"[",
"'project'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'project'",
",",
"project",
",",
"'string'",
")",
"if",
"team",
"is",
"not",
"None",
":",
"route_values",
"[",
"'team'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'team'",
",",
"team",
",",
"'string'",
")",
"query_parameters",
"=",
"{",
"}",
"if",
"child_backlog_context_category_ref_name",
"is",
"not",
"None",
":",
"query_parameters",
"[",
"'childBacklogContextCategoryRefName'",
"]",
"=",
"self",
".",
"_serialize",
".",
"query",
"(",
"'child_backlog_context_category_ref_name'",
",",
"child_backlog_context_category_ref_name",
",",
"'str'",
")",
"if",
"workitem_ids",
"is",
"not",
"None",
":",
"workitem_ids",
"=",
"\",\"",
".",
"join",
"(",
"map",
"(",
"str",
",",
"workitem_ids",
")",
")",
"query_parameters",
"[",
"'workitemIds'",
"]",
"=",
"self",
".",
"_serialize",
".",
"query",
"(",
"'workitem_ids'",
",",
"workitem_ids",
",",
"'str'",
")",
"response",
"=",
"self",
".",
"_send",
"(",
"http_method",
"=",
"'GET'",
",",
"location_id",
"=",
"'186abea3-5c35-432f-9e28-7a15b4312a0e'",
",",
"version",
"=",
"'5.0-preview.1'",
",",
"route_values",
"=",
"route_values",
",",
"query_parameters",
"=",
"query_parameters",
")",
"return",
"self",
".",
"_deserialize",
"(",
"'[ParentChildWIMap]'",
",",
"self",
".",
"_unwrap_collection",
"(",
"response",
")",
")"
] | GetBoardMappingParentItems.
[Preview API] Returns the list of parent field filter model for the given list of workitem ids
:param :class:`<TeamContext> <azure.devops.v5_0.work.models.TeamContext>` team_context: The team context for the operation
:param str child_backlog_context_category_ref_name:
:param [int] workitem_ids:
:rtype: [ParentChildWIMap] | [
"GetBoardMappingParentItems",
".",
"[",
"Preview",
"API",
"]",
"Returns",
"the",
"list",
"of",
"parent",
"field",
"filter",
"model",
"for",
"the",
"given",
"list",
"of",
"workitem",
"ids",
":",
"param",
":",
"class",
":",
"<TeamContext",
">",
"<azure",
".",
"devops",
".",
"v5_0",
".",
"work",
".",
"models",
".",
"TeamContext",
">",
"team_context",
":",
"The",
"team",
"context",
"for",
"the",
"operation",
":",
"param",
"str",
"child_backlog_context_category_ref_name",
":",
":",
"param",
"[",
"int",
"]",
"workitem_ids",
":",
":",
"rtype",
":",
"[",
"ParentChildWIMap",
"]"
] | python | train |
tjcsl/ion | intranet/apps/dashboard/views.py | https://github.com/tjcsl/ion/blob/5d722b0725d572039bb0929fd5715a4070c82c72/intranet/apps/dashboard/views.py#L25-L37 | def get_fcps_emerg(request):
"""Return FCPS emergency information."""
try:
emerg = get_emerg()
except Exception:
logger.info("Unable to fetch FCPS emergency info")
emerg = {"status": False}
if emerg["status"] or ("show_emerg" in request.GET):
msg = emerg["message"]
return "{} <span style='display: block;text-align: right'>— FCPS</span>".format(msg)
return False | [
"def",
"get_fcps_emerg",
"(",
"request",
")",
":",
"try",
":",
"emerg",
"=",
"get_emerg",
"(",
")",
"except",
"Exception",
":",
"logger",
".",
"info",
"(",
"\"Unable to fetch FCPS emergency info\"",
")",
"emerg",
"=",
"{",
"\"status\"",
":",
"False",
"}",
"if",
"emerg",
"[",
"\"status\"",
"]",
"or",
"(",
"\"show_emerg\"",
"in",
"request",
".",
"GET",
")",
":",
"msg",
"=",
"emerg",
"[",
"\"message\"",
"]",
"return",
"\"{} <span style='display: block;text-align: right'>— FCPS</span>\"",
".",
"format",
"(",
"msg",
")",
"return",
"False"
] | Return FCPS emergency information. | [
"Return",
"FCPS",
"emergency",
"information",
"."
] | python | train |
troeger/opensubmit | web/opensubmit/models/submission.py | https://github.com/troeger/opensubmit/blob/384a95b7c6fa41e3f949a129d25dafd9a1c54859/web/opensubmit/models/submission.py#L276-L280 | def author_list(self):
''' The list of authors als text, for admin submission list overview.'''
author_list = [self.submitter] + \
[author for author in self.authors.all().exclude(pk=self.submitter.pk)]
return ",\n".join([author.get_full_name() for author in author_list]) | [
"def",
"author_list",
"(",
"self",
")",
":",
"author_list",
"=",
"[",
"self",
".",
"submitter",
"]",
"+",
"[",
"author",
"for",
"author",
"in",
"self",
".",
"authors",
".",
"all",
"(",
")",
".",
"exclude",
"(",
"pk",
"=",
"self",
".",
"submitter",
".",
"pk",
")",
"]",
"return",
"\",\\n\"",
".",
"join",
"(",
"[",
"author",
".",
"get_full_name",
"(",
")",
"for",
"author",
"in",
"author_list",
"]",
")"
] | The list of authors als text, for admin submission list overview. | [
"The",
"list",
"of",
"authors",
"als",
"text",
"for",
"admin",
"submission",
"list",
"overview",
"."
] | python | train |
ehansis/ozelot | examples/eurominder/eurominder/pipeline.py | https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/examples/eurominder/eurominder/pipeline.py#L659-L701 | def load(self):
"""Load the climate data as a map
Returns:
dict: {data: masked 3D numpy array containing climate data per month (first axis),
lat_idx: function converting a latitude to the (fractional) row index in the map,
lon_idx: function converting a longitude to the (fractional) column index in the map}
"""
from scipy.io import netcdf_file
from scipy import interpolate
import numpy as np
# load file
f = netcdf_file(self.input_file)
# extract data, make explicity copies of data
out = dict()
lats = f.variables['lat'][:].copy()
lons = f.variables['lon'][:].copy()
# lons start at 0, this is bad for working with data in Europe because the map border runs right through;
# roll array by half its width to get Europe into the map center
out['data'] = np.roll(f.variables[self.variable_name][:, :, :].copy(), shift=len(lons) // 2, axis=2)
lons = np.roll(lons, shift=len(lons) // 2)
# avoid wraparound problems around zero by setting lon range to -180...180, this is
# also the format used in the GeoJSON NUTS2 polygons
lons[lons > 180] -= 360
# data contains some very negative value (~ -9e36) as 'invalid data' flag, convert this to a masked array
out['data'] = np.ma.array(out['data'])
out['data'][out['data'] < -1.e6] = np.ma.masked
# -- start documentation include: climate-input-interp
# build interpolators to convert lats/lons to row/column indices
out['lat_idx'] = interpolate.interp1d(x=lats, y=np.arange(len(lats)))
out['lon_idx'] = interpolate.interp1d(x=lons, y=np.arange(len(lons)))
# -- end documentation include: climate-input-interp
# clean up
f.close()
return out | [
"def",
"load",
"(",
"self",
")",
":",
"from",
"scipy",
".",
"io",
"import",
"netcdf_file",
"from",
"scipy",
"import",
"interpolate",
"import",
"numpy",
"as",
"np",
"# load file",
"f",
"=",
"netcdf_file",
"(",
"self",
".",
"input_file",
")",
"# extract data, make explicity copies of data",
"out",
"=",
"dict",
"(",
")",
"lats",
"=",
"f",
".",
"variables",
"[",
"'lat'",
"]",
"[",
":",
"]",
".",
"copy",
"(",
")",
"lons",
"=",
"f",
".",
"variables",
"[",
"'lon'",
"]",
"[",
":",
"]",
".",
"copy",
"(",
")",
"# lons start at 0, this is bad for working with data in Europe because the map border runs right through;",
"# roll array by half its width to get Europe into the map center",
"out",
"[",
"'data'",
"]",
"=",
"np",
".",
"roll",
"(",
"f",
".",
"variables",
"[",
"self",
".",
"variable_name",
"]",
"[",
":",
",",
":",
",",
":",
"]",
".",
"copy",
"(",
")",
",",
"shift",
"=",
"len",
"(",
"lons",
")",
"//",
"2",
",",
"axis",
"=",
"2",
")",
"lons",
"=",
"np",
".",
"roll",
"(",
"lons",
",",
"shift",
"=",
"len",
"(",
"lons",
")",
"//",
"2",
")",
"# avoid wraparound problems around zero by setting lon range to -180...180, this is",
"# also the format used in the GeoJSON NUTS2 polygons",
"lons",
"[",
"lons",
">",
"180",
"]",
"-=",
"360",
"# data contains some very negative value (~ -9e36) as 'invalid data' flag, convert this to a masked array",
"out",
"[",
"'data'",
"]",
"=",
"np",
".",
"ma",
".",
"array",
"(",
"out",
"[",
"'data'",
"]",
")",
"out",
"[",
"'data'",
"]",
"[",
"out",
"[",
"'data'",
"]",
"<",
"-",
"1.e6",
"]",
"=",
"np",
".",
"ma",
".",
"masked",
"# -- start documentation include: climate-input-interp",
"# build interpolators to convert lats/lons to row/column indices",
"out",
"[",
"'lat_idx'",
"]",
"=",
"interpolate",
".",
"interp1d",
"(",
"x",
"=",
"lats",
",",
"y",
"=",
"np",
".",
"arange",
"(",
"len",
"(",
"lats",
")",
")",
")",
"out",
"[",
"'lon_idx'",
"]",
"=",
"interpolate",
".",
"interp1d",
"(",
"x",
"=",
"lons",
",",
"y",
"=",
"np",
".",
"arange",
"(",
"len",
"(",
"lons",
")",
")",
")",
"# -- end documentation include: climate-input-interp",
"# clean up",
"f",
".",
"close",
"(",
")",
"return",
"out"
] | Load the climate data as a map
Returns:
dict: {data: masked 3D numpy array containing climate data per month (first axis),
lat_idx: function converting a latitude to the (fractional) row index in the map,
lon_idx: function converting a longitude to the (fractional) column index in the map} | [
"Load",
"the",
"climate",
"data",
"as",
"a",
"map"
] | python | train |
google/grr | grr/client_builder/grr_response_client_builder/client_build.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/client_builder/grr_response_client_builder/client_build.py#L387-L400 | def GetClientConfig(filename):
"""Write client config to filename."""
config_lib.SetPlatformArchContext()
config_lib.ParseConfigCommandLine()
context = list(grr_config.CONFIG.context)
context.append("Client Context")
deployer = build.ClientRepacker()
# Disable timestamping so we can get a reproducible and cachable config file.
config_data = deployer.GetClientConfig(
context, validate=True, deploy_timestamp=False)
builder = build.ClientBuilder()
with open(filename, "w") as fd:
fd.write(config_data)
builder.WriteBuildYaml(fd, build_timestamp=False) | [
"def",
"GetClientConfig",
"(",
"filename",
")",
":",
"config_lib",
".",
"SetPlatformArchContext",
"(",
")",
"config_lib",
".",
"ParseConfigCommandLine",
"(",
")",
"context",
"=",
"list",
"(",
"grr_config",
".",
"CONFIG",
".",
"context",
")",
"context",
".",
"append",
"(",
"\"Client Context\"",
")",
"deployer",
"=",
"build",
".",
"ClientRepacker",
"(",
")",
"# Disable timestamping so we can get a reproducible and cachable config file.",
"config_data",
"=",
"deployer",
".",
"GetClientConfig",
"(",
"context",
",",
"validate",
"=",
"True",
",",
"deploy_timestamp",
"=",
"False",
")",
"builder",
"=",
"build",
".",
"ClientBuilder",
"(",
")",
"with",
"open",
"(",
"filename",
",",
"\"w\"",
")",
"as",
"fd",
":",
"fd",
".",
"write",
"(",
"config_data",
")",
"builder",
".",
"WriteBuildYaml",
"(",
"fd",
",",
"build_timestamp",
"=",
"False",
")"
] | Write client config to filename. | [
"Write",
"client",
"config",
"to",
"filename",
"."
] | python | train |
tgbugs/pyontutils | ilxutils/ilxutils/simple_scicrunch_client.py | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/ilxutils/ilxutils/simple_scicrunch_client.py#L47-L51 | def superclasses_bug_fix(data):
''' PHP returns "id" in superclass but only accepts superclass_tid '''
for i, value in enumerate(data['superclasses']):
data['superclasses'][i]['superclass_tid'] = data['superclasses'][i].pop('id')
return data | [
"def",
"superclasses_bug_fix",
"(",
"data",
")",
":",
"for",
"i",
",",
"value",
"in",
"enumerate",
"(",
"data",
"[",
"'superclasses'",
"]",
")",
":",
"data",
"[",
"'superclasses'",
"]",
"[",
"i",
"]",
"[",
"'superclass_tid'",
"]",
"=",
"data",
"[",
"'superclasses'",
"]",
"[",
"i",
"]",
".",
"pop",
"(",
"'id'",
")",
"return",
"data"
] | PHP returns "id" in superclass but only accepts superclass_tid | [
"PHP",
"returns",
"id",
"in",
"superclass",
"but",
"only",
"accepts",
"superclass_tid"
] | python | train |
FNNDSC/pfmisc | pfmisc/C_snode.py | https://github.com/FNNDSC/pfmisc/blob/960b4d6135fcc50bed0a8e55db2ab1ddad9b99d8/pfmisc/C_snode.py#L68-L75 | def pre(self, *args):
"""
Get / set the str_pre
"""
if len(args):
self.str_pre = args[0]
else:
return self.str_pre | [
"def",
"pre",
"(",
"self",
",",
"*",
"args",
")",
":",
"if",
"len",
"(",
"args",
")",
":",
"self",
".",
"str_pre",
"=",
"args",
"[",
"0",
"]",
"else",
":",
"return",
"self",
".",
"str_pre"
] | Get / set the str_pre | [
"Get",
"/",
"set",
"the",
"str_pre"
] | python | train |
Yubico/python-pyhsm | examples/yhsm-password-auth.py | https://github.com/Yubico/python-pyhsm/blob/b6e2744d1ea15c352a0fc1d6ebc5950026b71311/examples/yhsm-password-auth.py#L47-L106 | def parse_args():
"""
Parse the command line arguments
"""
global default_device
parser = argparse.ArgumentParser(description = "Generate password AEAD using YubiHSM",
add_help=True
)
parser.add_argument('-D', '--device',
dest='device',
default=default_device,
required=False,
help='YubiHSM device (default : "%s").' % default_device
)
parser.add_argument('-v', '--verbose',
dest='verbose',
action='store_true', default=False,
help='Enable verbose operation.'
)
parser.add_argument('--debug',
dest='debug',
action='store_true', default=False,
help='Enable debug operation.'
)
parser.add_argument('--key-handle',
type=int, dest='key_handle',
required=True,
help='Key handle to use. Must have YHSM_ECB_BLOCK_ENCRYPT and/or YHSM_ECB_BLOCK_DECRYPT_CMP flag set.'
)
parser.add_argument('-N', '--nonce',
dest='nonce',
required=True,
help='Nonce to use. 6 bytes encoded as 12 chars hex.'
)
parser.add_argument('--set',
dest='set',
action='store_true', default=False,
help='Set password mode.'
)
parser.add_argument('--validate',
dest='validate',
help='AEAD to validate.'
)
parser.add_argument('--min_length',
type=int, dest='min_len',
required=False, default=20,
help='Minimum length to pad passwords to (default: 20).'
)
args = parser.parse_args()
if args.set and args.validate:
sys.stderr.write("Arguments --set and --validate are mutually exclusive.\n")
sys.exit(1)
if not args.set and not args.validate:
sys.stderr.write("Either --set or --validate must be specified.\n")
sys.exit(1)
return args | [
"def",
"parse_args",
"(",
")",
":",
"global",
"default_device",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"\"Generate password AEAD using YubiHSM\"",
",",
"add_help",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"'-D'",
",",
"'--device'",
",",
"dest",
"=",
"'device'",
",",
"default",
"=",
"default_device",
",",
"required",
"=",
"False",
",",
"help",
"=",
"'YubiHSM device (default : \"%s\").'",
"%",
"default_device",
")",
"parser",
".",
"add_argument",
"(",
"'-v'",
",",
"'--verbose'",
",",
"dest",
"=",
"'verbose'",
",",
"action",
"=",
"'store_true'",
",",
"default",
"=",
"False",
",",
"help",
"=",
"'Enable verbose operation.'",
")",
"parser",
".",
"add_argument",
"(",
"'--debug'",
",",
"dest",
"=",
"'debug'",
",",
"action",
"=",
"'store_true'",
",",
"default",
"=",
"False",
",",
"help",
"=",
"'Enable debug operation.'",
")",
"parser",
".",
"add_argument",
"(",
"'--key-handle'",
",",
"type",
"=",
"int",
",",
"dest",
"=",
"'key_handle'",
",",
"required",
"=",
"True",
",",
"help",
"=",
"'Key handle to use. Must have YHSM_ECB_BLOCK_ENCRYPT and/or YHSM_ECB_BLOCK_DECRYPT_CMP flag set.'",
")",
"parser",
".",
"add_argument",
"(",
"'-N'",
",",
"'--nonce'",
",",
"dest",
"=",
"'nonce'",
",",
"required",
"=",
"True",
",",
"help",
"=",
"'Nonce to use. 6 bytes encoded as 12 chars hex.'",
")",
"parser",
".",
"add_argument",
"(",
"'--set'",
",",
"dest",
"=",
"'set'",
",",
"action",
"=",
"'store_true'",
",",
"default",
"=",
"False",
",",
"help",
"=",
"'Set password mode.'",
")",
"parser",
".",
"add_argument",
"(",
"'--validate'",
",",
"dest",
"=",
"'validate'",
",",
"help",
"=",
"'AEAD to validate.'",
")",
"parser",
".",
"add_argument",
"(",
"'--min_length'",
",",
"type",
"=",
"int",
",",
"dest",
"=",
"'min_len'",
",",
"required",
"=",
"False",
",",
"default",
"=",
"20",
",",
"help",
"=",
"'Minimum length to pad passwords to (default: 20).'",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"if",
"args",
".",
"set",
"and",
"args",
".",
"validate",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"Arguments --set and --validate are mutually exclusive.\\n\"",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"if",
"not",
"args",
".",
"set",
"and",
"not",
"args",
".",
"validate",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"Either --set or --validate must be specified.\\n\"",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"return",
"args"
] | Parse the command line arguments | [
"Parse",
"the",
"command",
"line",
"arguments"
] | python | train |
softlayer/softlayer-python | SoftLayer/CLI/ticket/upload.py | https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/CLI/ticket/upload.py#L19-L37 | def cli(env, identifier, path, name):
"""Adds an attachment to an existing ticket."""
mgr = SoftLayer.TicketManager(env.client)
ticket_id = helpers.resolve_id(mgr.resolve_ids, identifier, 'ticket')
if path is None:
raise exceptions.ArgumentError("Missing argument --path")
if not os.path.exists(path):
raise exceptions.ArgumentError("%s not exist" % path)
if name is None:
name = os.path.basename(path)
attached_file = mgr.upload_attachment(ticket_id=ticket_id,
file_path=path,
file_name=name)
env.fout("File attached: \n%s" % attached_file) | [
"def",
"cli",
"(",
"env",
",",
"identifier",
",",
"path",
",",
"name",
")",
":",
"mgr",
"=",
"SoftLayer",
".",
"TicketManager",
"(",
"env",
".",
"client",
")",
"ticket_id",
"=",
"helpers",
".",
"resolve_id",
"(",
"mgr",
".",
"resolve_ids",
",",
"identifier",
",",
"'ticket'",
")",
"if",
"path",
"is",
"None",
":",
"raise",
"exceptions",
".",
"ArgumentError",
"(",
"\"Missing argument --path\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"raise",
"exceptions",
".",
"ArgumentError",
"(",
"\"%s not exist\"",
"%",
"path",
")",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"path",
")",
"attached_file",
"=",
"mgr",
".",
"upload_attachment",
"(",
"ticket_id",
"=",
"ticket_id",
",",
"file_path",
"=",
"path",
",",
"file_name",
"=",
"name",
")",
"env",
".",
"fout",
"(",
"\"File attached: \\n%s\"",
"%",
"attached_file",
")"
] | Adds an attachment to an existing ticket. | [
"Adds",
"an",
"attachment",
"to",
"an",
"existing",
"ticket",
"."
] | python | train |
SheffieldML/GPy | GPy/util/mocap.py | https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/util/mocap.py#L313-L325 | def load_skel(self, file_name):
"""
Loads an ASF file into a skeleton structure.
:param file_name: The file name to load in.
"""
fid = open(file_name, 'r')
self.read_skel(fid)
fid.close()
self.name = file_name | [
"def",
"load_skel",
"(",
"self",
",",
"file_name",
")",
":",
"fid",
"=",
"open",
"(",
"file_name",
",",
"'r'",
")",
"self",
".",
"read_skel",
"(",
"fid",
")",
"fid",
".",
"close",
"(",
")",
"self",
".",
"name",
"=",
"file_name"
] | Loads an ASF file into a skeleton structure.
:param file_name: The file name to load in. | [
"Loads",
"an",
"ASF",
"file",
"into",
"a",
"skeleton",
"structure",
"."
] | python | train |
ChristopherRabotin/bungiesearch | bungiesearch/__init__.py | https://github.com/ChristopherRabotin/bungiesearch/blob/13768342bc2698b214eb0003c2d113b6e273c30d/bungiesearch/__init__.py#L111-L122 | def get_model_index(cls, model, default=True):
'''
Returns the default model index for the given model, or the list of indices if default is False.
:param model: model name as a string.
:raise KeyError: If the provided model does not have any index associated.
'''
try:
if default:
return cls._model_name_to_default_index[model]
return cls._model_name_to_model_idx[model]
except KeyError:
raise KeyError('Could not find any model index defined for model {}.'.format(model)) | [
"def",
"get_model_index",
"(",
"cls",
",",
"model",
",",
"default",
"=",
"True",
")",
":",
"try",
":",
"if",
"default",
":",
"return",
"cls",
".",
"_model_name_to_default_index",
"[",
"model",
"]",
"return",
"cls",
".",
"_model_name_to_model_idx",
"[",
"model",
"]",
"except",
"KeyError",
":",
"raise",
"KeyError",
"(",
"'Could not find any model index defined for model {}.'",
".",
"format",
"(",
"model",
")",
")"
] | Returns the default model index for the given model, or the list of indices if default is False.
:param model: model name as a string.
:raise KeyError: If the provided model does not have any index associated. | [
"Returns",
"the",
"default",
"model",
"index",
"for",
"the",
"given",
"model",
"or",
"the",
"list",
"of",
"indices",
"if",
"default",
"is",
"False",
".",
":",
"param",
"model",
":",
"model",
"name",
"as",
"a",
"string",
".",
":",
"raise",
"KeyError",
":",
"If",
"the",
"provided",
"model",
"does",
"not",
"have",
"any",
"index",
"associated",
"."
] | python | train |
mrstephenneal/mysql-toolkit | mysql/toolkit/components/operations/clone.py | https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/components/operations/clone.py#L90-L101 | def copy_database_structure(self, source, destination, tables=None):
"""Copy multiple tables from one database to another."""
# Change database to source
self.change_db(source)
if tables is None:
tables = self.tables
# Change database to destination
self.change_db(destination)
for t in tqdm(tables, total=len(tables), desc='Copying {0} table structure'.format(source)):
self.copy_table_structure(source, destination, t) | [
"def",
"copy_database_structure",
"(",
"self",
",",
"source",
",",
"destination",
",",
"tables",
"=",
"None",
")",
":",
"# Change database to source",
"self",
".",
"change_db",
"(",
"source",
")",
"if",
"tables",
"is",
"None",
":",
"tables",
"=",
"self",
".",
"tables",
"# Change database to destination",
"self",
".",
"change_db",
"(",
"destination",
")",
"for",
"t",
"in",
"tqdm",
"(",
"tables",
",",
"total",
"=",
"len",
"(",
"tables",
")",
",",
"desc",
"=",
"'Copying {0} table structure'",
".",
"format",
"(",
"source",
")",
")",
":",
"self",
".",
"copy_table_structure",
"(",
"source",
",",
"destination",
",",
"t",
")"
] | Copy multiple tables from one database to another. | [
"Copy",
"multiple",
"tables",
"from",
"one",
"database",
"to",
"another",
"."
] | python | train |
aws/aws-iot-device-sdk-python | AWSIoTPythonSDK/core/protocol/paho/client.py | https://github.com/aws/aws-iot-device-sdk-python/blob/f0aa2ce34b21dd2e44f4fb7e1d058656aaf2fc62/AWSIoTPythonSDK/core/protocol/paho/client.py#L526-L535 | def configIAMCredentials(self, srcAWSAccessKeyID, srcAWSSecretAccessKey, srcAWSSessionToken):
"""
Make custom settings for IAM credentials for websocket connection
srcAWSAccessKeyID - AWS IAM access key
srcAWSSecretAccessKey - AWS IAM secret key
srcAWSSessionToken - AWS Session Token
"""
self._AWSAccessKeyIDCustomConfig = srcAWSAccessKeyID
self._AWSSecretAccessKeyCustomConfig = srcAWSSecretAccessKey
self._AWSSessionTokenCustomConfig = srcAWSSessionToken | [
"def",
"configIAMCredentials",
"(",
"self",
",",
"srcAWSAccessKeyID",
",",
"srcAWSSecretAccessKey",
",",
"srcAWSSessionToken",
")",
":",
"self",
".",
"_AWSAccessKeyIDCustomConfig",
"=",
"srcAWSAccessKeyID",
"self",
".",
"_AWSSecretAccessKeyCustomConfig",
"=",
"srcAWSSecretAccessKey",
"self",
".",
"_AWSSessionTokenCustomConfig",
"=",
"srcAWSSessionToken"
] | Make custom settings for IAM credentials for websocket connection
srcAWSAccessKeyID - AWS IAM access key
srcAWSSecretAccessKey - AWS IAM secret key
srcAWSSessionToken - AWS Session Token | [
"Make",
"custom",
"settings",
"for",
"IAM",
"credentials",
"for",
"websocket",
"connection",
"srcAWSAccessKeyID",
"-",
"AWS",
"IAM",
"access",
"key",
"srcAWSSecretAccessKey",
"-",
"AWS",
"IAM",
"secret",
"key",
"srcAWSSessionToken",
"-",
"AWS",
"Session",
"Token"
] | python | train |
brainiak/brainiak | brainiak/funcalign/sssrm.py | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/funcalign/sssrm.py#L691-L731 | def _loss_lr_subject(self, data, labels, w, theta, bias):
"""Compute the Loss MLR for a single subject (without regularization)
Parameters
----------
data : array, shape=[voxels, samples]
The fMRI data of subject i for the classification task.
labels : array of int, shape=[samples]
The labels for the data samples in data.
w : array, shape=[voxels, features]
The orthogonal transform (mapping) :math:`W_i` for subject i.
theta : array, shape=[classes, features]
The MLR class plane parameters.
bias : array, shape=[classes]
The MLR class biases.
Returns
-------
loss : float
The loss MLR for the subject
"""
if data is None:
return 0.0
samples = data.shape[1]
thetaT_wi_zi_plus_bias = theta.T.dot(w.T.dot(data)) + bias
sum_exp, max_value, _ = utils.sumexp_stable(thetaT_wi_zi_plus_bias)
sum_exp_values = np.log(sum_exp) + max_value
aux = 0.0
for sample in range(samples):
label = labels[sample]
aux += thetaT_wi_zi_plus_bias[label, sample]
return self.alpha / samples / self.gamma * (sum_exp_values.sum() - aux) | [
"def",
"_loss_lr_subject",
"(",
"self",
",",
"data",
",",
"labels",
",",
"w",
",",
"theta",
",",
"bias",
")",
":",
"if",
"data",
"is",
"None",
":",
"return",
"0.0",
"samples",
"=",
"data",
".",
"shape",
"[",
"1",
"]",
"thetaT_wi_zi_plus_bias",
"=",
"theta",
".",
"T",
".",
"dot",
"(",
"w",
".",
"T",
".",
"dot",
"(",
"data",
")",
")",
"+",
"bias",
"sum_exp",
",",
"max_value",
",",
"_",
"=",
"utils",
".",
"sumexp_stable",
"(",
"thetaT_wi_zi_plus_bias",
")",
"sum_exp_values",
"=",
"np",
".",
"log",
"(",
"sum_exp",
")",
"+",
"max_value",
"aux",
"=",
"0.0",
"for",
"sample",
"in",
"range",
"(",
"samples",
")",
":",
"label",
"=",
"labels",
"[",
"sample",
"]",
"aux",
"+=",
"thetaT_wi_zi_plus_bias",
"[",
"label",
",",
"sample",
"]",
"return",
"self",
".",
"alpha",
"/",
"samples",
"/",
"self",
".",
"gamma",
"*",
"(",
"sum_exp_values",
".",
"sum",
"(",
")",
"-",
"aux",
")"
] | Compute the Loss MLR for a single subject (without regularization)
Parameters
----------
data : array, shape=[voxels, samples]
The fMRI data of subject i for the classification task.
labels : array of int, shape=[samples]
The labels for the data samples in data.
w : array, shape=[voxels, features]
The orthogonal transform (mapping) :math:`W_i` for subject i.
theta : array, shape=[classes, features]
The MLR class plane parameters.
bias : array, shape=[classes]
The MLR class biases.
Returns
-------
loss : float
The loss MLR for the subject | [
"Compute",
"the",
"Loss",
"MLR",
"for",
"a",
"single",
"subject",
"(",
"without",
"regularization",
")"
] | python | train |
rpcope1/PythonConfluenceAPI | PythonConfluenceAPI/cfapi.py | https://github.com/rpcope1/PythonConfluenceAPI/blob/b7f0ca2a390f964715fdf3a60b5b0c5ef7116d40/PythonConfluenceAPI/cfapi.py#L16-L33 | def request_patch(self, *args, **kwargs):
"""Maintains the existing api for Session.request.
Used by all of the higher level methods, e.g. Session.get.
The background_callback param allows you to do some processing on the
response in the background, e.g. call resp.json() so that json parsing
happens in the background thread.
"""
func = sup = super(FuturesSession, self).request
background_callback = kwargs.pop('background_callback', None)
if background_callback:
def wrap(*args_, **kwargs_):
resp = sup(*args_, **kwargs_)
# Patch the closure to return the callback.
return background_callback(self, resp)
func = wrap
return self.executor.submit(func, *args, **kwargs) | [
"def",
"request_patch",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"func",
"=",
"sup",
"=",
"super",
"(",
"FuturesSession",
",",
"self",
")",
".",
"request",
"background_callback",
"=",
"kwargs",
".",
"pop",
"(",
"'background_callback'",
",",
"None",
")",
"if",
"background_callback",
":",
"def",
"wrap",
"(",
"*",
"args_",
",",
"*",
"*",
"kwargs_",
")",
":",
"resp",
"=",
"sup",
"(",
"*",
"args_",
",",
"*",
"*",
"kwargs_",
")",
"# Patch the closure to return the callback.",
"return",
"background_callback",
"(",
"self",
",",
"resp",
")",
"func",
"=",
"wrap",
"return",
"self",
".",
"executor",
".",
"submit",
"(",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Maintains the existing api for Session.request.
Used by all of the higher level methods, e.g. Session.get.
The background_callback param allows you to do some processing on the
response in the background, e.g. call resp.json() so that json parsing
happens in the background thread. | [
"Maintains",
"the",
"existing",
"api",
"for",
"Session",
".",
"request",
".",
"Used",
"by",
"all",
"of",
"the",
"higher",
"level",
"methods",
"e",
".",
"g",
".",
"Session",
".",
"get",
".",
"The",
"background_callback",
"param",
"allows",
"you",
"to",
"do",
"some",
"processing",
"on",
"the",
"response",
"in",
"the",
"background",
"e",
".",
"g",
".",
"call",
"resp",
".",
"json",
"()",
"so",
"that",
"json",
"parsing",
"happens",
"in",
"the",
"background",
"thread",
"."
] | python | train |
GeorgeArgyros/symautomata | symautomata/flex2fst.py | https://github.com/GeorgeArgyros/symautomata/blob/f5d66533573b27e155bec3f36b8c00b8e3937cb3/symautomata/flex2fst.py#L50-L104 | def _read_transitions(self):
"""
Read DFA transitions from flex compiled file
Args:
None
Returns:
list: The list of states and the destination for a character
"""
states = []
i = 0
regex = re.compile('[ \t\n\r:,]+')
found = 0 # For maintaining the state of yy_nxt declaration
state = 0 # For maintaining the state of opening and closing tag of yy_nxt
substate = 0 # For maintaining the state of opening and closing tag of each set in yy_nxt
mapping = [] # For writing each set of yy_next
cur_line = None
with open(self.outfile) as flex_file:
for cur_line in flex_file:
if cur_line[0:35] == "static yyconst flex_int16_t yy_nxt[" or cur_line[0:33] == "static const flex_int16_t yy_nxt[":
found = 1
# print 'Found yy_next declaration'
continue
if found == 1:
if state == 0 and cur_line[0:5] == " {":
state = 1
continue
if state == 1 and cur_line[0:7] == " } ;":
state = 0
break
if substate == 0 and cur_line[0:5] == " {":
mapping = []
substate = 1
continue
if substate == 1:
if cur_line[0:6] != " },":
cur_line = "".join(cur_line.split())
if cur_line == '':
continue
if cur_line[cur_line.__len__() - 1] == ',':
splitted_line = regex.split(
cur_line[:cur_line.__len__() - 1])
else:
splitted_line = regex.split(cur_line)
mapping = mapping + splitted_line
continue
else:
cleared = []
for j in mapping:
cleared.append(int(j))
states.append(cleared)
mapping = []
substate = 0
return states | [
"def",
"_read_transitions",
"(",
"self",
")",
":",
"states",
"=",
"[",
"]",
"i",
"=",
"0",
"regex",
"=",
"re",
".",
"compile",
"(",
"'[ \\t\\n\\r:,]+'",
")",
"found",
"=",
"0",
"# For maintaining the state of yy_nxt declaration",
"state",
"=",
"0",
"# For maintaining the state of opening and closing tag of yy_nxt",
"substate",
"=",
"0",
"# For maintaining the state of opening and closing tag of each set in yy_nxt",
"mapping",
"=",
"[",
"]",
"# For writing each set of yy_next",
"cur_line",
"=",
"None",
"with",
"open",
"(",
"self",
".",
"outfile",
")",
"as",
"flex_file",
":",
"for",
"cur_line",
"in",
"flex_file",
":",
"if",
"cur_line",
"[",
"0",
":",
"35",
"]",
"==",
"\"static yyconst flex_int16_t yy_nxt[\"",
"or",
"cur_line",
"[",
"0",
":",
"33",
"]",
"==",
"\"static const flex_int16_t yy_nxt[\"",
":",
"found",
"=",
"1",
"# print 'Found yy_next declaration'",
"continue",
"if",
"found",
"==",
"1",
":",
"if",
"state",
"==",
"0",
"and",
"cur_line",
"[",
"0",
":",
"5",
"]",
"==",
"\" {\"",
":",
"state",
"=",
"1",
"continue",
"if",
"state",
"==",
"1",
"and",
"cur_line",
"[",
"0",
":",
"7",
"]",
"==",
"\" } ;\"",
":",
"state",
"=",
"0",
"break",
"if",
"substate",
"==",
"0",
"and",
"cur_line",
"[",
"0",
":",
"5",
"]",
"==",
"\" {\"",
":",
"mapping",
"=",
"[",
"]",
"substate",
"=",
"1",
"continue",
"if",
"substate",
"==",
"1",
":",
"if",
"cur_line",
"[",
"0",
":",
"6",
"]",
"!=",
"\" },\"",
":",
"cur_line",
"=",
"\"\"",
".",
"join",
"(",
"cur_line",
".",
"split",
"(",
")",
")",
"if",
"cur_line",
"==",
"''",
":",
"continue",
"if",
"cur_line",
"[",
"cur_line",
".",
"__len__",
"(",
")",
"-",
"1",
"]",
"==",
"','",
":",
"splitted_line",
"=",
"regex",
".",
"split",
"(",
"cur_line",
"[",
":",
"cur_line",
".",
"__len__",
"(",
")",
"-",
"1",
"]",
")",
"else",
":",
"splitted_line",
"=",
"regex",
".",
"split",
"(",
"cur_line",
")",
"mapping",
"=",
"mapping",
"+",
"splitted_line",
"continue",
"else",
":",
"cleared",
"=",
"[",
"]",
"for",
"j",
"in",
"mapping",
":",
"cleared",
".",
"append",
"(",
"int",
"(",
"j",
")",
")",
"states",
".",
"append",
"(",
"cleared",
")",
"mapping",
"=",
"[",
"]",
"substate",
"=",
"0",
"return",
"states"
] | Read DFA transitions from flex compiled file
Args:
None
Returns:
list: The list of states and the destination for a character | [
"Read",
"DFA",
"transitions",
"from",
"flex",
"compiled",
"file",
"Args",
":",
"None",
"Returns",
":",
"list",
":",
"The",
"list",
"of",
"states",
"and",
"the",
"destination",
"for",
"a",
"character"
] | python | train |
SteveMcGrath/pySecurityCenter | securitycenter/sc4.py | https://github.com/SteveMcGrath/pySecurityCenter/blob/f0b10b1bcd4fd23a8d4d09ca6774cdf5e1cfd880/securitycenter/sc4.py#L736-L769 | def scan_list(self, start_time=None, end_time=None, **kwargs):
"""List scans stored in Security Center in a given time range.
Time is given in UNIX timestamps, assumed to be UTC. If a `datetime` is
passed it is converted. If `end_time` is not specified it is NOW. If
`start_time` is not specified it is 30 days previous from `end_time`.
:param start_time: start of range to filter
:type start_time: date, datetime, int
:param end_time: end of range to filter
:type start_time: date, datetime, int
:return: list of dictionaries representing scans
"""
try:
end_time = datetime.utcfromtimestamp(int(end_time))
except TypeError:
if end_time is None:
end_time = datetime.utcnow()
try:
start_time = datetime.utcfromtimestamp(int(start_time))
except TypeError:
if start_time is None:
start_time = end_time - timedelta(days=30)
data = {"startTime": calendar.timegm(start_time.utctimetuple()),
"endTime": calendar.timegm(end_time.utctimetuple())}
data.update(kwargs)
result = self.raw_query("scanResult", "getRange", data=data)
return result["scanResults"] | [
"def",
"scan_list",
"(",
"self",
",",
"start_time",
"=",
"None",
",",
"end_time",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"end_time",
"=",
"datetime",
".",
"utcfromtimestamp",
"(",
"int",
"(",
"end_time",
")",
")",
"except",
"TypeError",
":",
"if",
"end_time",
"is",
"None",
":",
"end_time",
"=",
"datetime",
".",
"utcnow",
"(",
")",
"try",
":",
"start_time",
"=",
"datetime",
".",
"utcfromtimestamp",
"(",
"int",
"(",
"start_time",
")",
")",
"except",
"TypeError",
":",
"if",
"start_time",
"is",
"None",
":",
"start_time",
"=",
"end_time",
"-",
"timedelta",
"(",
"days",
"=",
"30",
")",
"data",
"=",
"{",
"\"startTime\"",
":",
"calendar",
".",
"timegm",
"(",
"start_time",
".",
"utctimetuple",
"(",
")",
")",
",",
"\"endTime\"",
":",
"calendar",
".",
"timegm",
"(",
"end_time",
".",
"utctimetuple",
"(",
")",
")",
"}",
"data",
".",
"update",
"(",
"kwargs",
")",
"result",
"=",
"self",
".",
"raw_query",
"(",
"\"scanResult\"",
",",
"\"getRange\"",
",",
"data",
"=",
"data",
")",
"return",
"result",
"[",
"\"scanResults\"",
"]"
] | List scans stored in Security Center in a given time range.
Time is given in UNIX timestamps, assumed to be UTC. If a `datetime` is
passed it is converted. If `end_time` is not specified it is NOW. If
`start_time` is not specified it is 30 days previous from `end_time`.
:param start_time: start of range to filter
:type start_time: date, datetime, int
:param end_time: end of range to filter
:type start_time: date, datetime, int
:return: list of dictionaries representing scans | [
"List",
"scans",
"stored",
"in",
"Security",
"Center",
"in",
"a",
"given",
"time",
"range",
"."
] | python | train |
linkedin/naarad | src/naarad/metrics/innotop_metric.py | https://github.com/linkedin/naarad/blob/261e2c0760fd6a6b0ee59064180bd8e3674311fe/src/naarad/metrics/innotop_metric.py#L184-L263 | def parse_innotop_mode_m(self):
""" Special parsing method for Innotop "Replication Status" results (innotop --mode M)"""
with open(self.infile, 'r') as infh:
# Pre processing to figure out different headers
max_row_quot = 0
valrow = -1
thisrowcolumns = {}
data = {}
last_ts = None
while True:
# 2012-05-11T00:00:02 master_host slave_sql_running time_behind_master slave_catchup_rate slave_open_temp_tables relay_log_pos last_error
line1 = infh.readline()
words = line1.split()
# Skip next line
infh.readline()
is_header = True
for word in words:
if naarad.utils.is_number(word):
last_ts = words[0].strip().replace('T', ' ')
is_header = False
break # from this loop
if len(words) > 2 and is_header:
thisrowcolumns[max_row_quot] = words[2:]
for column in thisrowcolumns[max_row_quot]:
data[column] = []
max_row_quot += 1
else:
break
# from pre-processing. All headers accounted for
# Real Processing
if not last_ts:
logger.warn("last_ts not set, looks like there is no data in file %s", self.infile)
return True
infh.seek(0)
is_bad_line = False
outfilehandlers = {}
for line in infh:
l = line.strip().split(' ', 1)
# Blank line
if len(l) <= 1:
continue
ts = l[0].strip().replace('T', ' ')
if ts != last_ts:
last_ts = ts
valrow = -1
nameval = l[1].strip().split('\t', 1)
try:
words = nameval[1].split('\t')
except IndexError:
logger.warn("Bad line: %s", line)
continue
valrow += 1
command = nameval[0]
if command not in outfilehandlers:
outfilehandlers[command] = {}
quot = valrow % max_row_quot
columns = thisrowcolumns[quot]
for i in range(len(words)):
if len(words) > len(columns):
logger.warn("Mismatched number of columns: %s", line)
logger.warn("%d %d", len(words), len(columns))
break
if words[i] in columns:
logger.warn("Skipping line: %s", line)
valrow -= 1
break
if self.options and columns[i] not in self.options:
continue
if columns[i] not in outfilehandlers[command]:
outfilehandlers[command][columns[i]] = open(self.get_csv_C(command, columns[i]), 'w')
self.csv_files.append(self.get_csv_C(command, columns[i]))
ts = naarad.utils.reconcile_timezones(ts, self.timezone, self.graph_timezone)
outfilehandlers[command][columns[i]].write(ts + ',')
outfilehandlers[command][columns[i]].write(words[i])
outfilehandlers[command][columns[i]].write('\n')
for command in outfilehandlers:
for column in outfilehandlers[command]:
outfilehandlers[command][column].close()
return True | [
"def",
"parse_innotop_mode_m",
"(",
"self",
")",
":",
"with",
"open",
"(",
"self",
".",
"infile",
",",
"'r'",
")",
"as",
"infh",
":",
"# Pre processing to figure out different headers",
"max_row_quot",
"=",
"0",
"valrow",
"=",
"-",
"1",
"thisrowcolumns",
"=",
"{",
"}",
"data",
"=",
"{",
"}",
"last_ts",
"=",
"None",
"while",
"True",
":",
"# 2012-05-11T00:00:02 master_host slave_sql_running time_behind_master slave_catchup_rate slave_open_temp_tables relay_log_pos last_error",
"line1",
"=",
"infh",
".",
"readline",
"(",
")",
"words",
"=",
"line1",
".",
"split",
"(",
")",
"# Skip next line",
"infh",
".",
"readline",
"(",
")",
"is_header",
"=",
"True",
"for",
"word",
"in",
"words",
":",
"if",
"naarad",
".",
"utils",
".",
"is_number",
"(",
"word",
")",
":",
"last_ts",
"=",
"words",
"[",
"0",
"]",
".",
"strip",
"(",
")",
".",
"replace",
"(",
"'T'",
",",
"' '",
")",
"is_header",
"=",
"False",
"break",
"# from this loop",
"if",
"len",
"(",
"words",
")",
">",
"2",
"and",
"is_header",
":",
"thisrowcolumns",
"[",
"max_row_quot",
"]",
"=",
"words",
"[",
"2",
":",
"]",
"for",
"column",
"in",
"thisrowcolumns",
"[",
"max_row_quot",
"]",
":",
"data",
"[",
"column",
"]",
"=",
"[",
"]",
"max_row_quot",
"+=",
"1",
"else",
":",
"break",
"# from pre-processing. All headers accounted for",
"# Real Processing",
"if",
"not",
"last_ts",
":",
"logger",
".",
"warn",
"(",
"\"last_ts not set, looks like there is no data in file %s\"",
",",
"self",
".",
"infile",
")",
"return",
"True",
"infh",
".",
"seek",
"(",
"0",
")",
"is_bad_line",
"=",
"False",
"outfilehandlers",
"=",
"{",
"}",
"for",
"line",
"in",
"infh",
":",
"l",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"' '",
",",
"1",
")",
"# Blank line",
"if",
"len",
"(",
"l",
")",
"<=",
"1",
":",
"continue",
"ts",
"=",
"l",
"[",
"0",
"]",
".",
"strip",
"(",
")",
".",
"replace",
"(",
"'T'",
",",
"' '",
")",
"if",
"ts",
"!=",
"last_ts",
":",
"last_ts",
"=",
"ts",
"valrow",
"=",
"-",
"1",
"nameval",
"=",
"l",
"[",
"1",
"]",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'\\t'",
",",
"1",
")",
"try",
":",
"words",
"=",
"nameval",
"[",
"1",
"]",
".",
"split",
"(",
"'\\t'",
")",
"except",
"IndexError",
":",
"logger",
".",
"warn",
"(",
"\"Bad line: %s\"",
",",
"line",
")",
"continue",
"valrow",
"+=",
"1",
"command",
"=",
"nameval",
"[",
"0",
"]",
"if",
"command",
"not",
"in",
"outfilehandlers",
":",
"outfilehandlers",
"[",
"command",
"]",
"=",
"{",
"}",
"quot",
"=",
"valrow",
"%",
"max_row_quot",
"columns",
"=",
"thisrowcolumns",
"[",
"quot",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"words",
")",
")",
":",
"if",
"len",
"(",
"words",
")",
">",
"len",
"(",
"columns",
")",
":",
"logger",
".",
"warn",
"(",
"\"Mismatched number of columns: %s\"",
",",
"line",
")",
"logger",
".",
"warn",
"(",
"\"%d %d\"",
",",
"len",
"(",
"words",
")",
",",
"len",
"(",
"columns",
")",
")",
"break",
"if",
"words",
"[",
"i",
"]",
"in",
"columns",
":",
"logger",
".",
"warn",
"(",
"\"Skipping line: %s\"",
",",
"line",
")",
"valrow",
"-=",
"1",
"break",
"if",
"self",
".",
"options",
"and",
"columns",
"[",
"i",
"]",
"not",
"in",
"self",
".",
"options",
":",
"continue",
"if",
"columns",
"[",
"i",
"]",
"not",
"in",
"outfilehandlers",
"[",
"command",
"]",
":",
"outfilehandlers",
"[",
"command",
"]",
"[",
"columns",
"[",
"i",
"]",
"]",
"=",
"open",
"(",
"self",
".",
"get_csv_C",
"(",
"command",
",",
"columns",
"[",
"i",
"]",
")",
",",
"'w'",
")",
"self",
".",
"csv_files",
".",
"append",
"(",
"self",
".",
"get_csv_C",
"(",
"command",
",",
"columns",
"[",
"i",
"]",
")",
")",
"ts",
"=",
"naarad",
".",
"utils",
".",
"reconcile_timezones",
"(",
"ts",
",",
"self",
".",
"timezone",
",",
"self",
".",
"graph_timezone",
")",
"outfilehandlers",
"[",
"command",
"]",
"[",
"columns",
"[",
"i",
"]",
"]",
".",
"write",
"(",
"ts",
"+",
"','",
")",
"outfilehandlers",
"[",
"command",
"]",
"[",
"columns",
"[",
"i",
"]",
"]",
".",
"write",
"(",
"words",
"[",
"i",
"]",
")",
"outfilehandlers",
"[",
"command",
"]",
"[",
"columns",
"[",
"i",
"]",
"]",
".",
"write",
"(",
"'\\n'",
")",
"for",
"command",
"in",
"outfilehandlers",
":",
"for",
"column",
"in",
"outfilehandlers",
"[",
"command",
"]",
":",
"outfilehandlers",
"[",
"command",
"]",
"[",
"column",
"]",
".",
"close",
"(",
")",
"return",
"True"
] | Special parsing method for Innotop "Replication Status" results (innotop --mode M) | [
"Special",
"parsing",
"method",
"for",
"Innotop",
"Replication",
"Status",
"results",
"(",
"innotop",
"--",
"mode",
"M",
")"
] | python | valid |
twilio/twilio-python | twilio/rest/autopilot/v1/assistant/task/field.py | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/autopilot/v1/assistant/task/field.py#L120-L143 | def create(self, field_type, unique_name):
"""
Create a new FieldInstance
:param unicode field_type: The Field Type of this field
:param unicode unique_name: An application-defined string that uniquely identifies the new resource
:returns: Newly created FieldInstance
:rtype: twilio.rest.autopilot.v1.assistant.task.field.FieldInstance
"""
data = values.of({'FieldType': field_type, 'UniqueName': unique_name, })
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return FieldInstance(
self._version,
payload,
assistant_sid=self._solution['assistant_sid'],
task_sid=self._solution['task_sid'],
) | [
"def",
"create",
"(",
"self",
",",
"field_type",
",",
"unique_name",
")",
":",
"data",
"=",
"values",
".",
"of",
"(",
"{",
"'FieldType'",
":",
"field_type",
",",
"'UniqueName'",
":",
"unique_name",
",",
"}",
")",
"payload",
"=",
"self",
".",
"_version",
".",
"create",
"(",
"'POST'",
",",
"self",
".",
"_uri",
",",
"data",
"=",
"data",
",",
")",
"return",
"FieldInstance",
"(",
"self",
".",
"_version",
",",
"payload",
",",
"assistant_sid",
"=",
"self",
".",
"_solution",
"[",
"'assistant_sid'",
"]",
",",
"task_sid",
"=",
"self",
".",
"_solution",
"[",
"'task_sid'",
"]",
",",
")"
] | Create a new FieldInstance
:param unicode field_type: The Field Type of this field
:param unicode unique_name: An application-defined string that uniquely identifies the new resource
:returns: Newly created FieldInstance
:rtype: twilio.rest.autopilot.v1.assistant.task.field.FieldInstance | [
"Create",
"a",
"new",
"FieldInstance"
] | python | train |
nikhilkumarsingh/content-downloader | ctdl/ctdl.py | https://github.com/nikhilkumarsingh/content-downloader/blob/8b14af3a6eadcc43581e0425dc1d218208de12df/ctdl/ctdl.py#L43-L52 | def get_duckduckgo_links(limit, params, headers):
"""
function to fetch links equal to limit
duckduckgo pagination is not static, so there is a limit on
maximum number of links that can be scraped
"""
resp = s.get('https://duckduckgo.com/html', params = params, headers = headers)
links = scrape_links(resp.content, engine = 'd')
return links[:limit] | [
"def",
"get_duckduckgo_links",
"(",
"limit",
",",
"params",
",",
"headers",
")",
":",
"resp",
"=",
"s",
".",
"get",
"(",
"'https://duckduckgo.com/html'",
",",
"params",
"=",
"params",
",",
"headers",
"=",
"headers",
")",
"links",
"=",
"scrape_links",
"(",
"resp",
".",
"content",
",",
"engine",
"=",
"'d'",
")",
"return",
"links",
"[",
":",
"limit",
"]"
] | function to fetch links equal to limit
duckduckgo pagination is not static, so there is a limit on
maximum number of links that can be scraped | [
"function",
"to",
"fetch",
"links",
"equal",
"to",
"limit"
] | python | train |
PaulHancock/Aegean | AegeanTools/msq2.py | https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/msq2.py#L114-L157 | def walk_perimeter(self, startx, starty):
"""
Starting at a point on the perimeter of a region, 'walk' the perimeter to return
to the starting point. Record the path taken.
Parameters
----------
startx, starty : int
The starting location. Assumed to be on the perimeter of a region.
Returns
-------
perimeter : list
A list of pixel coordinates [ [x1,y1], ...] that constitute the perimeter of the region.
"""
# checks
startx = max(startx, 0)
startx = min(startx, self.xsize)
starty = max(starty, 0)
starty = min(starty, self.ysize)
points = []
x, y = startx, starty
while True:
self.step(x, y)
if 0 <= x <= self.xsize and 0 <= y <= self.ysize:
points.append((x, y))
if self.next == self.UP:
y -= 1
elif self.next == self.LEFT:
x -= 1
elif self.next == self.DOWN:
y += 1
elif self.next == self.RIGHT:
x += 1
# stop if we meet some kind of error
elif self.next == self.NOWHERE:
break
# stop when we return to the starting location
if x == startx and y == starty:
break
return points | [
"def",
"walk_perimeter",
"(",
"self",
",",
"startx",
",",
"starty",
")",
":",
"# checks",
"startx",
"=",
"max",
"(",
"startx",
",",
"0",
")",
"startx",
"=",
"min",
"(",
"startx",
",",
"self",
".",
"xsize",
")",
"starty",
"=",
"max",
"(",
"starty",
",",
"0",
")",
"starty",
"=",
"min",
"(",
"starty",
",",
"self",
".",
"ysize",
")",
"points",
"=",
"[",
"]",
"x",
",",
"y",
"=",
"startx",
",",
"starty",
"while",
"True",
":",
"self",
".",
"step",
"(",
"x",
",",
"y",
")",
"if",
"0",
"<=",
"x",
"<=",
"self",
".",
"xsize",
"and",
"0",
"<=",
"y",
"<=",
"self",
".",
"ysize",
":",
"points",
".",
"append",
"(",
"(",
"x",
",",
"y",
")",
")",
"if",
"self",
".",
"next",
"==",
"self",
".",
"UP",
":",
"y",
"-=",
"1",
"elif",
"self",
".",
"next",
"==",
"self",
".",
"LEFT",
":",
"x",
"-=",
"1",
"elif",
"self",
".",
"next",
"==",
"self",
".",
"DOWN",
":",
"y",
"+=",
"1",
"elif",
"self",
".",
"next",
"==",
"self",
".",
"RIGHT",
":",
"x",
"+=",
"1",
"# stop if we meet some kind of error",
"elif",
"self",
".",
"next",
"==",
"self",
".",
"NOWHERE",
":",
"break",
"# stop when we return to the starting location",
"if",
"x",
"==",
"startx",
"and",
"y",
"==",
"starty",
":",
"break",
"return",
"points"
] | Starting at a point on the perimeter of a region, 'walk' the perimeter to return
to the starting point. Record the path taken.
Parameters
----------
startx, starty : int
The starting location. Assumed to be on the perimeter of a region.
Returns
-------
perimeter : list
A list of pixel coordinates [ [x1,y1], ...] that constitute the perimeter of the region. | [
"Starting",
"at",
"a",
"point",
"on",
"the",
"perimeter",
"of",
"a",
"region",
"walk",
"the",
"perimeter",
"to",
"return",
"to",
"the",
"starting",
"point",
".",
"Record",
"the",
"path",
"taken",
"."
] | python | train |
iron-io/iron_mq_python | iron_mq.py | https://github.com/iron-io/iron_mq_python/blob/d6a293f0d54b4ca2dca1c335f9867cd2310f6fc7/iron_mq.py#L63-L82 | def delete(self, message_id, reservation_id=None, subscriber_name=None):
"""Execute an HTTP request to delete a message from queue.
Arguments:
message_id -- The ID of the message to be deleted.
reservation_id -- Reservation Id of the message. Reserved message could not be deleted without reservation Id.
subscriber_name -- This is required to acknowledge push after long-processing of message is finished.
"""
url = "queues/%s/messages/%s" % (self.name, message_id)
qitems = {}
if reservation_id is not None:
qitems['reservation_id'] = reservation_id
if subscriber_name is not None:
qitems['subscriber_name'] = subscriber_name
body = json.dumps(qitems)
result = self.client.delete(url=url, body=body,
headers={'Content-Type': 'application/json'})
return result['body'] | [
"def",
"delete",
"(",
"self",
",",
"message_id",
",",
"reservation_id",
"=",
"None",
",",
"subscriber_name",
"=",
"None",
")",
":",
"url",
"=",
"\"queues/%s/messages/%s\"",
"%",
"(",
"self",
".",
"name",
",",
"message_id",
")",
"qitems",
"=",
"{",
"}",
"if",
"reservation_id",
"is",
"not",
"None",
":",
"qitems",
"[",
"'reservation_id'",
"]",
"=",
"reservation_id",
"if",
"subscriber_name",
"is",
"not",
"None",
":",
"qitems",
"[",
"'subscriber_name'",
"]",
"=",
"subscriber_name",
"body",
"=",
"json",
".",
"dumps",
"(",
"qitems",
")",
"result",
"=",
"self",
".",
"client",
".",
"delete",
"(",
"url",
"=",
"url",
",",
"body",
"=",
"body",
",",
"headers",
"=",
"{",
"'Content-Type'",
":",
"'application/json'",
"}",
")",
"return",
"result",
"[",
"'body'",
"]"
] | Execute an HTTP request to delete a message from queue.
Arguments:
message_id -- The ID of the message to be deleted.
reservation_id -- Reservation Id of the message. Reserved message could not be deleted without reservation Id.
subscriber_name -- This is required to acknowledge push after long-processing of message is finished. | [
"Execute",
"an",
"HTTP",
"request",
"to",
"delete",
"a",
"message",
"from",
"queue",
"."
] | python | train |
pingali/dgit | dgitcore/vendor/pluginbase/pluginbase.py | https://github.com/pingali/dgit/blob/ecde01f40b98f0719dbcfb54452270ed2f86686d/dgitcore/vendor/pluginbase/pluginbase.py#L274-L296 | def open_resource(self, plugin, filename):
"""This function locates a resource inside the plugin and returns
a byte stream to the contents of it. If the resource cannot be
loaded an :exc:`IOError` will be raised. Only plugins that are
real Python packages can contain resources. Plain old Python
modules do not allow this for obvious reasons.
.. versionadded:: 0.3
:param plugin: the name of the plugin to open the resource of.
:param filename: the name of the file within the plugin to open.
"""
mod = self.load_plugin(plugin)
fn = getattr(mod, '__file__', None)
if fn is not None:
if fn.endswith(('.pyc', '.pyo')):
fn = fn[:-1]
if os.path.isfile(fn):
return open(os.path.join(os.path.dirname(fn), filename), 'rb')
buf = pkgutil.get_data(self.mod.__name__ + '.' + plugin, filename)
if buf is None:
raise IOError(errno.ENOEXITS, 'Could not find resource')
return NativeBytesIO(buf) | [
"def",
"open_resource",
"(",
"self",
",",
"plugin",
",",
"filename",
")",
":",
"mod",
"=",
"self",
".",
"load_plugin",
"(",
"plugin",
")",
"fn",
"=",
"getattr",
"(",
"mod",
",",
"'__file__'",
",",
"None",
")",
"if",
"fn",
"is",
"not",
"None",
":",
"if",
"fn",
".",
"endswith",
"(",
"(",
"'.pyc'",
",",
"'.pyo'",
")",
")",
":",
"fn",
"=",
"fn",
"[",
":",
"-",
"1",
"]",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"fn",
")",
":",
"return",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"fn",
")",
",",
"filename",
")",
",",
"'rb'",
")",
"buf",
"=",
"pkgutil",
".",
"get_data",
"(",
"self",
".",
"mod",
".",
"__name__",
"+",
"'.'",
"+",
"plugin",
",",
"filename",
")",
"if",
"buf",
"is",
"None",
":",
"raise",
"IOError",
"(",
"errno",
".",
"ENOEXITS",
",",
"'Could not find resource'",
")",
"return",
"NativeBytesIO",
"(",
"buf",
")"
] | This function locates a resource inside the plugin and returns
a byte stream to the contents of it. If the resource cannot be
loaded an :exc:`IOError` will be raised. Only plugins that are
real Python packages can contain resources. Plain old Python
modules do not allow this for obvious reasons.
.. versionadded:: 0.3
:param plugin: the name of the plugin to open the resource of.
:param filename: the name of the file within the plugin to open. | [
"This",
"function",
"locates",
"a",
"resource",
"inside",
"the",
"plugin",
"and",
"returns",
"a",
"byte",
"stream",
"to",
"the",
"contents",
"of",
"it",
".",
"If",
"the",
"resource",
"cannot",
"be",
"loaded",
"an",
":",
"exc",
":",
"IOError",
"will",
"be",
"raised",
".",
"Only",
"plugins",
"that",
"are",
"real",
"Python",
"packages",
"can",
"contain",
"resources",
".",
"Plain",
"old",
"Python",
"modules",
"do",
"not",
"allow",
"this",
"for",
"obvious",
"reasons",
"."
] | python | valid |
codelv/enaml-native | src/enamlnative/android/app.py | https://github.com/codelv/enaml-native/blob/c33986e9eda468c508806e0a3e73c771401e5718/src/enamlnative/android/app.py#L210-L233 | def show_view(self):
""" Show the current `app.view`. This will fade out the previous
with the new view.
"""
if not self.build_info:
def on_build_info(info):
""" Make sure the build info is ready before we
display the view
"""
self.dp = info['DISPLAY_DENSITY']
self.width = info['DISPLAY_WIDTH']
self.height = info['DISPLAY_HEIGHT']
self.orientation = ('square', 'portrait', 'landscape')[
info['DISPLAY_ORIENTATION']]
self.api_level = info['SDK_INT']
self.build_info = info
self._show_view()
self.init_widget()
self.widget.getBuildInfo().then(on_build_info)
else:
self._show_view() | [
"def",
"show_view",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"build_info",
":",
"def",
"on_build_info",
"(",
"info",
")",
":",
"\"\"\" Make sure the build info is ready before we \n display the view \n \n \"\"\"",
"self",
".",
"dp",
"=",
"info",
"[",
"'DISPLAY_DENSITY'",
"]",
"self",
".",
"width",
"=",
"info",
"[",
"'DISPLAY_WIDTH'",
"]",
"self",
".",
"height",
"=",
"info",
"[",
"'DISPLAY_HEIGHT'",
"]",
"self",
".",
"orientation",
"=",
"(",
"'square'",
",",
"'portrait'",
",",
"'landscape'",
")",
"[",
"info",
"[",
"'DISPLAY_ORIENTATION'",
"]",
"]",
"self",
".",
"api_level",
"=",
"info",
"[",
"'SDK_INT'",
"]",
"self",
".",
"build_info",
"=",
"info",
"self",
".",
"_show_view",
"(",
")",
"self",
".",
"init_widget",
"(",
")",
"self",
".",
"widget",
".",
"getBuildInfo",
"(",
")",
".",
"then",
"(",
"on_build_info",
")",
"else",
":",
"self",
".",
"_show_view",
"(",
")"
] | Show the current `app.view`. This will fade out the previous
with the new view. | [
"Show",
"the",
"current",
"app",
".",
"view",
".",
"This",
"will",
"fade",
"out",
"the",
"previous",
"with",
"the",
"new",
"view",
"."
] | python | train |
totalgood/pugnlp | src/pugnlp/util.py | https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L2295-L2364 | def get_words(s, splitter_regex=rex.word_sep_except_external_appostrophe,
preprocessor=strip_HTML, postprocessor=strip_edge_punc, min_len=None,
max_len=None, blacklist=None, whitelist=None, lower=False,
filter_fun=None, str_type=str):
r"""Segment words (tokens), returning a list of all tokens
Does not return any separating whitespace or punctuation marks.
Attempts to return external apostrophes at the end of words.
Comparable to `nltk.word_toeknize`.
Arguments:
splitter_regex (str or re): compiled or uncompiled regular expression
Applied to the input string using `re.split()`
preprocessor (function): defaults to a function that strips out all HTML tags
postprocessor (function): a function to apply to each token before return it as an element in the word list
Applied using the `map()` builtin
min_len (int): delete all words shorter than this number of characters
max_len (int): delete all words longer than this number of characters
blacklist and whitelist (list of str): words to delete or preserve
lower (bool): whether to convert all words to lowercase
str_type (type): typically `str` or `unicode`, any type constructor that should can be applied
to all words before returning the list
Returns:
list of str: list of tokens
>>> get_words('He said, "She called me \'Hoss\'!". I didn\'t hear.')
['He', 'said', 'She', 'called', 'me', 'Hoss', 'I', "didn't", 'hear']
>>> get_words('The foxes\' oh-so-tiny den was 2empty!')
['The', 'foxes', 'oh-so-tiny', 'den', 'was', '2empty']
"""
# TODO: Get rid of `lower` kwarg (and make sure code that uses it doesn't break)
# That and other simple postprocessors can be done outside of get_words
postprocessor = postprocessor or str_type
preprocessor = preprocessor or str_type
if min_len is None:
min_len = get_words.min_len
if max_len is None:
max_len = get_words.max_len
blacklist = blacklist or get_words.blacklist
whitelist = whitelist or get_words.whitelist
filter_fun = filter_fun or get_words.filter_fun
lower = lower or get_words.lower
try:
s = open(s, 'r')
except (IOError, FileNotFoundError):
pass
try:
s = s.read()
except (IOError, AttributeError, TypeError):
pass
if not isinstance(s, basestring):
try:
# flatten the list of lists of words from each obj (file or string)
return [word for obj in s for word in get_words(obj)]
except (IOError, IndexError, ValueError, AttributeError, TypeError):
pass
try:
s = preprocessor(s)
except (IndexError, ValueError, AttributeError, TypeError):
pass
if isinstance(splitter_regex, basestring):
splitter_regex = re.compile(splitter_regex)
s = list(map(postprocessor, splitter_regex.split(s)))
s = list(map(str_type, s))
if not filter_fun:
return s
return [word for word in s if
filter_fun(word, min_len=min_len, max_len=max_len,
blacklist=blacklist, whitelist=whitelist, lower=lower)] | [
"def",
"get_words",
"(",
"s",
",",
"splitter_regex",
"=",
"rex",
".",
"word_sep_except_external_appostrophe",
",",
"preprocessor",
"=",
"strip_HTML",
",",
"postprocessor",
"=",
"strip_edge_punc",
",",
"min_len",
"=",
"None",
",",
"max_len",
"=",
"None",
",",
"blacklist",
"=",
"None",
",",
"whitelist",
"=",
"None",
",",
"lower",
"=",
"False",
",",
"filter_fun",
"=",
"None",
",",
"str_type",
"=",
"str",
")",
":",
"# TODO: Get rid of `lower` kwarg (and make sure code that uses it doesn't break)",
"# That and other simple postprocessors can be done outside of get_words",
"postprocessor",
"=",
"postprocessor",
"or",
"str_type",
"preprocessor",
"=",
"preprocessor",
"or",
"str_type",
"if",
"min_len",
"is",
"None",
":",
"min_len",
"=",
"get_words",
".",
"min_len",
"if",
"max_len",
"is",
"None",
":",
"max_len",
"=",
"get_words",
".",
"max_len",
"blacklist",
"=",
"blacklist",
"or",
"get_words",
".",
"blacklist",
"whitelist",
"=",
"whitelist",
"or",
"get_words",
".",
"whitelist",
"filter_fun",
"=",
"filter_fun",
"or",
"get_words",
".",
"filter_fun",
"lower",
"=",
"lower",
"or",
"get_words",
".",
"lower",
"try",
":",
"s",
"=",
"open",
"(",
"s",
",",
"'r'",
")",
"except",
"(",
"IOError",
",",
"FileNotFoundError",
")",
":",
"pass",
"try",
":",
"s",
"=",
"s",
".",
"read",
"(",
")",
"except",
"(",
"IOError",
",",
"AttributeError",
",",
"TypeError",
")",
":",
"pass",
"if",
"not",
"isinstance",
"(",
"s",
",",
"basestring",
")",
":",
"try",
":",
"# flatten the list of lists of words from each obj (file or string)",
"return",
"[",
"word",
"for",
"obj",
"in",
"s",
"for",
"word",
"in",
"get_words",
"(",
"obj",
")",
"]",
"except",
"(",
"IOError",
",",
"IndexError",
",",
"ValueError",
",",
"AttributeError",
",",
"TypeError",
")",
":",
"pass",
"try",
":",
"s",
"=",
"preprocessor",
"(",
"s",
")",
"except",
"(",
"IndexError",
",",
"ValueError",
",",
"AttributeError",
",",
"TypeError",
")",
":",
"pass",
"if",
"isinstance",
"(",
"splitter_regex",
",",
"basestring",
")",
":",
"splitter_regex",
"=",
"re",
".",
"compile",
"(",
"splitter_regex",
")",
"s",
"=",
"list",
"(",
"map",
"(",
"postprocessor",
",",
"splitter_regex",
".",
"split",
"(",
"s",
")",
")",
")",
"s",
"=",
"list",
"(",
"map",
"(",
"str_type",
",",
"s",
")",
")",
"if",
"not",
"filter_fun",
":",
"return",
"s",
"return",
"[",
"word",
"for",
"word",
"in",
"s",
"if",
"filter_fun",
"(",
"word",
",",
"min_len",
"=",
"min_len",
",",
"max_len",
"=",
"max_len",
",",
"blacklist",
"=",
"blacklist",
",",
"whitelist",
"=",
"whitelist",
",",
"lower",
"=",
"lower",
")",
"]"
] | r"""Segment words (tokens), returning a list of all tokens
Does not return any separating whitespace or punctuation marks.
Attempts to return external apostrophes at the end of words.
Comparable to `nltk.word_toeknize`.
Arguments:
splitter_regex (str or re): compiled or uncompiled regular expression
Applied to the input string using `re.split()`
preprocessor (function): defaults to a function that strips out all HTML tags
postprocessor (function): a function to apply to each token before return it as an element in the word list
Applied using the `map()` builtin
min_len (int): delete all words shorter than this number of characters
max_len (int): delete all words longer than this number of characters
blacklist and whitelist (list of str): words to delete or preserve
lower (bool): whether to convert all words to lowercase
str_type (type): typically `str` or `unicode`, any type constructor that should can be applied
to all words before returning the list
Returns:
list of str: list of tokens
>>> get_words('He said, "She called me \'Hoss\'!". I didn\'t hear.')
['He', 'said', 'She', 'called', 'me', 'Hoss', 'I', "didn't", 'hear']
>>> get_words('The foxes\' oh-so-tiny den was 2empty!')
['The', 'foxes', 'oh-so-tiny', 'den', 'was', '2empty'] | [
"r",
"Segment",
"words",
"(",
"tokens",
")",
"returning",
"a",
"list",
"of",
"all",
"tokens"
] | python | train |
google/grr | grr/server/grr_response_server/aff4_objects/user_managers.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/aff4_objects/user_managers.py#L318-L323 | def _HasAccessToClient(self, subject, token):
"""Checks if user has access to a client under given URN."""
client_id, _ = rdfvalue.RDFURN(subject).Split(2)
client_urn = rdf_client.ClientURN(client_id)
return self.CheckClientAccess(token, client_urn) | [
"def",
"_HasAccessToClient",
"(",
"self",
",",
"subject",
",",
"token",
")",
":",
"client_id",
",",
"_",
"=",
"rdfvalue",
".",
"RDFURN",
"(",
"subject",
")",
".",
"Split",
"(",
"2",
")",
"client_urn",
"=",
"rdf_client",
".",
"ClientURN",
"(",
"client_id",
")",
"return",
"self",
".",
"CheckClientAccess",
"(",
"token",
",",
"client_urn",
")"
] | Checks if user has access to a client under given URN. | [
"Checks",
"if",
"user",
"has",
"access",
"to",
"a",
"client",
"under",
"given",
"URN",
"."
] | python | train |
Utagai/spice | spice_api/spice.py | https://github.com/Utagai/spice/blob/00b2c9e80ef338f4daef7643d99e8c7a0750b57c/spice_api/spice.py#L223-L230 | def delete(data, id, medium, credentials):
"""Deletes the [medium] with the given id and data from the user's [medium]List.
:param data The data for the [medium] to delete.
:param id The id of the data to delete.
:param medium Anime or manga (tokens.Medium.ANIME or tokens.Medium.MANGA).
:raise ValueError For bad arguments.
"""
_op(data, id, medium, tokens.Operations.DElETE, credentials) | [
"def",
"delete",
"(",
"data",
",",
"id",
",",
"medium",
",",
"credentials",
")",
":",
"_op",
"(",
"data",
",",
"id",
",",
"medium",
",",
"tokens",
".",
"Operations",
".",
"DElETE",
",",
"credentials",
")"
] | Deletes the [medium] with the given id and data from the user's [medium]List.
:param data The data for the [medium] to delete.
:param id The id of the data to delete.
:param medium Anime or manga (tokens.Medium.ANIME or tokens.Medium.MANGA).
:raise ValueError For bad arguments. | [
"Deletes",
"the",
"[",
"medium",
"]",
"with",
"the",
"given",
"id",
"and",
"data",
"from",
"the",
"user",
"s",
"[",
"medium",
"]",
"List",
".",
":",
"param",
"data",
"The",
"data",
"for",
"the",
"[",
"medium",
"]",
"to",
"delete",
".",
":",
"param",
"id",
"The",
"id",
"of",
"the",
"data",
"to",
"delete",
".",
":",
"param",
"medium",
"Anime",
"or",
"manga",
"(",
"tokens",
".",
"Medium",
".",
"ANIME",
"or",
"tokens",
".",
"Medium",
".",
"MANGA",
")",
".",
":",
"raise",
"ValueError",
"For",
"bad",
"arguments",
"."
] | python | train |
trailofbits/manticore | manticore/platforms/evm.py | https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/platforms/evm.py#L2338-L2346 | def new_address(self, sender=None, nonce=None):
"""Create a fresh 160bit address"""
if sender is not None and nonce is None:
nonce = self.get_nonce(sender)
new_address = self.calculate_new_address(sender, nonce)
if sender is None and new_address in self:
return self.new_address(sender, nonce)
return new_address | [
"def",
"new_address",
"(",
"self",
",",
"sender",
"=",
"None",
",",
"nonce",
"=",
"None",
")",
":",
"if",
"sender",
"is",
"not",
"None",
"and",
"nonce",
"is",
"None",
":",
"nonce",
"=",
"self",
".",
"get_nonce",
"(",
"sender",
")",
"new_address",
"=",
"self",
".",
"calculate_new_address",
"(",
"sender",
",",
"nonce",
")",
"if",
"sender",
"is",
"None",
"and",
"new_address",
"in",
"self",
":",
"return",
"self",
".",
"new_address",
"(",
"sender",
",",
"nonce",
")",
"return",
"new_address"
] | Create a fresh 160bit address | [
"Create",
"a",
"fresh",
"160bit",
"address"
] | python | valid |
databio/pypiper | pypiper/manager.py | https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/manager.py#L517-L541 | def _set_status_flag(self, status):
"""
Configure state and files on disk to match current processing status.
:param str status: Name of new status designation for pipeline.
"""
# Remove previous status flag file.
flag_file_path = self._flag_file_path()
try:
os.remove(flag_file_path)
except:
# Print message only if the failure to remove the status flag
# is unexpected; there's no flag for initialization, so we
# can't remove the file.
if self.status != "initializing":
print("Could not remove flag file: '{}'".format(flag_file_path))
pass
# Set new status.
prev_status = self.status
self.status = status
self._create_file(self._flag_file_path())
print("\nChanged status from {} to {}.".format(
prev_status, self.status)) | [
"def",
"_set_status_flag",
"(",
"self",
",",
"status",
")",
":",
"# Remove previous status flag file.",
"flag_file_path",
"=",
"self",
".",
"_flag_file_path",
"(",
")",
"try",
":",
"os",
".",
"remove",
"(",
"flag_file_path",
")",
"except",
":",
"# Print message only if the failure to remove the status flag",
"# is unexpected; there's no flag for initialization, so we",
"# can't remove the file.",
"if",
"self",
".",
"status",
"!=",
"\"initializing\"",
":",
"print",
"(",
"\"Could not remove flag file: '{}'\"",
".",
"format",
"(",
"flag_file_path",
")",
")",
"pass",
"# Set new status.",
"prev_status",
"=",
"self",
".",
"status",
"self",
".",
"status",
"=",
"status",
"self",
".",
"_create_file",
"(",
"self",
".",
"_flag_file_path",
"(",
")",
")",
"print",
"(",
"\"\\nChanged status from {} to {}.\"",
".",
"format",
"(",
"prev_status",
",",
"self",
".",
"status",
")",
")"
] | Configure state and files on disk to match current processing status.
:param str status: Name of new status designation for pipeline. | [
"Configure",
"state",
"and",
"files",
"on",
"disk",
"to",
"match",
"current",
"processing",
"status",
"."
] | python | train |
ndf-zz/asfv1 | asfv1.py | https://github.com/ndf-zz/asfv1/blob/c18f940d7ee86b14e6b201e6d8a4b71e3a57c34a/asfv1.py#L138-L169 | def bintoihex(buf, spos=0x0000):
"""Convert binary buffer to ihex and return as string."""
c = 0
olen = len(buf)
ret = ""
# 16 byte lines
while (c+0x10) <= olen:
adr = c + spos
l = ':10{0:04X}00'.format(adr)
sum = 0x10+((adr>>8)&M8)+(adr&M8)
for j in range(0,0x10):
nb = buf[c+j]
l += '{0:02X}'.format(nb)
sum = (sum + nb)&M8
l += '{0:02X}'.format((~sum+1)&M8)
ret += l + '\n'
c += 0x10
# remainder
if c < olen:
rem = olen-c
sum = rem
adr = c + spos
l = ':{0:02X}{1:04X}00'.format(rem,adr) # rem < 0x10
sum += ((adr>>8)&M8)+(adr&M8)
for j in range(0,rem):
nb = buf[c+j]
l += '{0:02X}'.format(nb)
sum = (sum + nb)&M8
l += '{0:02X}'.format((~sum+1)&M8)
ret += l + '\n'
ret += ':00000001FF\n' # EOF
return ret | [
"def",
"bintoihex",
"(",
"buf",
",",
"spos",
"=",
"0x0000",
")",
":",
"c",
"=",
"0",
"olen",
"=",
"len",
"(",
"buf",
")",
"ret",
"=",
"\"\"",
"# 16 byte lines",
"while",
"(",
"c",
"+",
"0x10",
")",
"<=",
"olen",
":",
"adr",
"=",
"c",
"+",
"spos",
"l",
"=",
"':10{0:04X}00'",
".",
"format",
"(",
"adr",
")",
"sum",
"=",
"0x10",
"+",
"(",
"(",
"adr",
">>",
"8",
")",
"&",
"M8",
")",
"+",
"(",
"adr",
"&",
"M8",
")",
"for",
"j",
"in",
"range",
"(",
"0",
",",
"0x10",
")",
":",
"nb",
"=",
"buf",
"[",
"c",
"+",
"j",
"]",
"l",
"+=",
"'{0:02X}'",
".",
"format",
"(",
"nb",
")",
"sum",
"=",
"(",
"sum",
"+",
"nb",
")",
"&",
"M8",
"l",
"+=",
"'{0:02X}'",
".",
"format",
"(",
"(",
"~",
"sum",
"+",
"1",
")",
"&",
"M8",
")",
"ret",
"+=",
"l",
"+",
"'\\n'",
"c",
"+=",
"0x10",
"# remainder",
"if",
"c",
"<",
"olen",
":",
"rem",
"=",
"olen",
"-",
"c",
"sum",
"=",
"rem",
"adr",
"=",
"c",
"+",
"spos",
"l",
"=",
"':{0:02X}{1:04X}00'",
".",
"format",
"(",
"rem",
",",
"adr",
")",
"# rem < 0x10",
"sum",
"+=",
"(",
"(",
"adr",
">>",
"8",
")",
"&",
"M8",
")",
"+",
"(",
"adr",
"&",
"M8",
")",
"for",
"j",
"in",
"range",
"(",
"0",
",",
"rem",
")",
":",
"nb",
"=",
"buf",
"[",
"c",
"+",
"j",
"]",
"l",
"+=",
"'{0:02X}'",
".",
"format",
"(",
"nb",
")",
"sum",
"=",
"(",
"sum",
"+",
"nb",
")",
"&",
"M8",
"l",
"+=",
"'{0:02X}'",
".",
"format",
"(",
"(",
"~",
"sum",
"+",
"1",
")",
"&",
"M8",
")",
"ret",
"+=",
"l",
"+",
"'\\n'",
"ret",
"+=",
"':00000001FF\\n'",
"# EOF",
"return",
"ret"
] | Convert binary buffer to ihex and return as string. | [
"Convert",
"binary",
"buffer",
"to",
"ihex",
"and",
"return",
"as",
"string",
"."
] | python | train |
elyase/masstable | masstable/masstable.py | https://github.com/elyase/masstable/blob/3eb72b22cd3337bc5c6bb95bb7bb73fdbe6ae9e2/masstable/masstable.py#L489-L496 | def ds2n(self):
"""Calculates the derivative of the neutron separation energies:
ds2n(Z,A) = s2n(Z,A) - s2n(Z,A+2)
"""
idx = [(x[0] + 0, x[1] + 2) for x in self.df.index]
values = self.s2n.values - self.s2n.loc[idx].values
return Table(df=pd.Series(values, index=self.df.index, name='ds2n' + '(' + self.name + ')')) | [
"def",
"ds2n",
"(",
"self",
")",
":",
"idx",
"=",
"[",
"(",
"x",
"[",
"0",
"]",
"+",
"0",
",",
"x",
"[",
"1",
"]",
"+",
"2",
")",
"for",
"x",
"in",
"self",
".",
"df",
".",
"index",
"]",
"values",
"=",
"self",
".",
"s2n",
".",
"values",
"-",
"self",
".",
"s2n",
".",
"loc",
"[",
"idx",
"]",
".",
"values",
"return",
"Table",
"(",
"df",
"=",
"pd",
".",
"Series",
"(",
"values",
",",
"index",
"=",
"self",
".",
"df",
".",
"index",
",",
"name",
"=",
"'ds2n'",
"+",
"'('",
"+",
"self",
".",
"name",
"+",
"')'",
")",
")"
] | Calculates the derivative of the neutron separation energies:
ds2n(Z,A) = s2n(Z,A) - s2n(Z,A+2) | [
"Calculates",
"the",
"derivative",
"of",
"the",
"neutron",
"separation",
"energies",
":"
] | python | test |
python-diamond/Diamond | src/collectors/onewire/onewire.py | https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/collectors/onewire/onewire.py#L68-L94 | def read_values(self, oid, files, metrics):
"""
Reads values from owfs/oid/{files} and update
metrics with format [oid.alias] = value
"""
oid_path = os.path.join(self.config['owfs'], oid)
oid = oid.replace('.', '_')
for fn, alias in files.iteritems():
fv = os.path.join(oid_path, fn)
if os.path.isfile(fv):
try:
f = open(fv)
v = f.read()
f.close()
except:
self.log.error("Unable to read %s", fv)
raise
try:
v = float(v)
except:
self.log.error("Unexpected value %s in %s", v, fv)
raise
metrics["%s.%s" % (oid, alias)] = v | [
"def",
"read_values",
"(",
"self",
",",
"oid",
",",
"files",
",",
"metrics",
")",
":",
"oid_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"config",
"[",
"'owfs'",
"]",
",",
"oid",
")",
"oid",
"=",
"oid",
".",
"replace",
"(",
"'.'",
",",
"'_'",
")",
"for",
"fn",
",",
"alias",
"in",
"files",
".",
"iteritems",
"(",
")",
":",
"fv",
"=",
"os",
".",
"path",
".",
"join",
"(",
"oid_path",
",",
"fn",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"fv",
")",
":",
"try",
":",
"f",
"=",
"open",
"(",
"fv",
")",
"v",
"=",
"f",
".",
"read",
"(",
")",
"f",
".",
"close",
"(",
")",
"except",
":",
"self",
".",
"log",
".",
"error",
"(",
"\"Unable to read %s\"",
",",
"fv",
")",
"raise",
"try",
":",
"v",
"=",
"float",
"(",
"v",
")",
"except",
":",
"self",
".",
"log",
".",
"error",
"(",
"\"Unexpected value %s in %s\"",
",",
"v",
",",
"fv",
")",
"raise",
"metrics",
"[",
"\"%s.%s\"",
"%",
"(",
"oid",
",",
"alias",
")",
"]",
"=",
"v"
] | Reads values from owfs/oid/{files} and update
metrics with format [oid.alias] = value | [
"Reads",
"values",
"from",
"owfs",
"/",
"oid",
"/",
"{",
"files",
"}",
"and",
"update",
"metrics",
"with",
"format",
"[",
"oid",
".",
"alias",
"]",
"=",
"value"
] | python | train |
obilaniu/Nauka | src/nauka/exp/experiment.py | https://github.com/obilaniu/Nauka/blob/1492a4f9d204a868c1a8a1d327bd108490b856b4/src/nauka/exp/experiment.py#L215-L217 | def strategyLastK(kls, n, k=10):
"""Return the directory names to preserve under the LastK purge strategy."""
return set(map(str, filter(lambda x:x>=0, range(n, n-k, -1)))) | [
"def",
"strategyLastK",
"(",
"kls",
",",
"n",
",",
"k",
"=",
"10",
")",
":",
"return",
"set",
"(",
"map",
"(",
"str",
",",
"filter",
"(",
"lambda",
"x",
":",
"x",
">=",
"0",
",",
"range",
"(",
"n",
",",
"n",
"-",
"k",
",",
"-",
"1",
")",
")",
")",
")"
] | Return the directory names to preserve under the LastK purge strategy. | [
"Return",
"the",
"directory",
"names",
"to",
"preserve",
"under",
"the",
"LastK",
"purge",
"strategy",
"."
] | python | train |
maximtrp/scikit-posthocs | scikit_posthocs/_posthocs.py | https://github.com/maximtrp/scikit-posthocs/blob/5476b09e2a325cd4e31c0b0bc6906ab5cd77fc5d/scikit_posthocs/_posthocs.py#L1900-L1991 | def posthoc_mannwhitney(a, val_col=None, group_col=None, use_continuity=True, alternative='two-sided', p_adjust=None, sort=True):
'''Pairwise comparisons with Mann-Whitney rank test.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into account.
Default is True.
alternative : ['two-sided', 'less', or 'greater'], optional
Whether to get the p-value for the one-sided hypothesis
('less' or 'greater') or for the two-sided hypothesis ('two-sided').
Defaults to 'two-sided'.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.mannwhitneyu` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_mannwhitney(x, p_adjust = 'holm')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_len = groups.size
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i,j in combs:
vs[i, j] = ss.mannwhitneyu(x.loc[x[_group_col] == groups[i], _val_col],
x.loc[x[_group_col] == groups[j], _val_col],
use_continuity=use_continuity,
alternative=alternative)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups) | [
"def",
"posthoc_mannwhitney",
"(",
"a",
",",
"val_col",
"=",
"None",
",",
"group_col",
"=",
"None",
",",
"use_continuity",
"=",
"True",
",",
"alternative",
"=",
"'two-sided'",
",",
"p_adjust",
"=",
"None",
",",
"sort",
"=",
"True",
")",
":",
"x",
",",
"_val_col",
",",
"_group_col",
"=",
"__convert_to_df",
"(",
"a",
",",
"val_col",
",",
"group_col",
")",
"if",
"not",
"sort",
":",
"x",
"[",
"_group_col",
"]",
"=",
"Categorical",
"(",
"x",
"[",
"_group_col",
"]",
",",
"categories",
"=",
"x",
"[",
"_group_col",
"]",
".",
"unique",
"(",
")",
",",
"ordered",
"=",
"True",
")",
"x",
".",
"sort_values",
"(",
"by",
"=",
"[",
"_group_col",
",",
"_val_col",
"]",
",",
"ascending",
"=",
"True",
",",
"inplace",
"=",
"True",
")",
"groups",
"=",
"np",
".",
"unique",
"(",
"x",
"[",
"_group_col",
"]",
")",
"x_len",
"=",
"groups",
".",
"size",
"vs",
"=",
"np",
".",
"zeros",
"(",
"(",
"x_len",
",",
"x_len",
")",
")",
"tri_upper",
"=",
"np",
".",
"triu_indices",
"(",
"vs",
".",
"shape",
"[",
"0",
"]",
",",
"1",
")",
"tri_lower",
"=",
"np",
".",
"tril_indices",
"(",
"vs",
".",
"shape",
"[",
"0",
"]",
",",
"-",
"1",
")",
"vs",
"[",
":",
",",
":",
"]",
"=",
"0",
"combs",
"=",
"it",
".",
"combinations",
"(",
"range",
"(",
"x_len",
")",
",",
"2",
")",
"for",
"i",
",",
"j",
"in",
"combs",
":",
"vs",
"[",
"i",
",",
"j",
"]",
"=",
"ss",
".",
"mannwhitneyu",
"(",
"x",
".",
"loc",
"[",
"x",
"[",
"_group_col",
"]",
"==",
"groups",
"[",
"i",
"]",
",",
"_val_col",
"]",
",",
"x",
".",
"loc",
"[",
"x",
"[",
"_group_col",
"]",
"==",
"groups",
"[",
"j",
"]",
",",
"_val_col",
"]",
",",
"use_continuity",
"=",
"use_continuity",
",",
"alternative",
"=",
"alternative",
")",
"[",
"1",
"]",
"if",
"p_adjust",
":",
"vs",
"[",
"tri_upper",
"]",
"=",
"multipletests",
"(",
"vs",
"[",
"tri_upper",
"]",
",",
"method",
"=",
"p_adjust",
")",
"[",
"1",
"]",
"vs",
"[",
"tri_lower",
"]",
"=",
"vs",
".",
"T",
"[",
"tri_lower",
"]",
"np",
".",
"fill_diagonal",
"(",
"vs",
",",
"-",
"1",
")",
"return",
"DataFrame",
"(",
"vs",
",",
"index",
"=",
"groups",
",",
"columns",
"=",
"groups",
")"
] | Pairwise comparisons with Mann-Whitney rank test.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into account.
Default is True.
alternative : ['two-sided', 'less', or 'greater'], optional
Whether to get the p-value for the one-sided hypothesis
('less' or 'greater') or for the two-sided hypothesis ('two-sided').
Defaults to 'two-sided'.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.mannwhitneyu` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_mannwhitney(x, p_adjust = 'holm') | [
"Pairwise",
"comparisons",
"with",
"Mann",
"-",
"Whitney",
"rank",
"test",
"."
] | python | train |
PolicyStat/docx2html | docx2html/core.py | https://github.com/PolicyStat/docx2html/blob/2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429/docx2html/core.py#L393-L406 | def get_v_merge(tc):
"""
vMerge is what docx uses to denote that a table cell is part of a rowspan.
The first cell to have a vMerge is the start of the rowspan, and the vMerge
will be denoted with 'restart'. If it is anything other than restart then
it is a continuation of another rowspan.
"""
if tc is None:
return None
v_merges = tc.xpath('.//w:vMerge', namespaces=tc.nsmap)
if len(v_merges) != 1:
return None
v_merge = v_merges[0]
return v_merge | [
"def",
"get_v_merge",
"(",
"tc",
")",
":",
"if",
"tc",
"is",
"None",
":",
"return",
"None",
"v_merges",
"=",
"tc",
".",
"xpath",
"(",
"'.//w:vMerge'",
",",
"namespaces",
"=",
"tc",
".",
"nsmap",
")",
"if",
"len",
"(",
"v_merges",
")",
"!=",
"1",
":",
"return",
"None",
"v_merge",
"=",
"v_merges",
"[",
"0",
"]",
"return",
"v_merge"
] | vMerge is what docx uses to denote that a table cell is part of a rowspan.
The first cell to have a vMerge is the start of the rowspan, and the vMerge
will be denoted with 'restart'. If it is anything other than restart then
it is a continuation of another rowspan. | [
"vMerge",
"is",
"what",
"docx",
"uses",
"to",
"denote",
"that",
"a",
"table",
"cell",
"is",
"part",
"of",
"a",
"rowspan",
".",
"The",
"first",
"cell",
"to",
"have",
"a",
"vMerge",
"is",
"the",
"start",
"of",
"the",
"rowspan",
"and",
"the",
"vMerge",
"will",
"be",
"denoted",
"with",
"restart",
".",
"If",
"it",
"is",
"anything",
"other",
"than",
"restart",
"then",
"it",
"is",
"a",
"continuation",
"of",
"another",
"rowspan",
"."
] | python | test |
kislyuk/aegea | aegea/packages/github3/pulls.py | https://github.com/kislyuk/aegea/blob/94957e9dba036eae3052e2662c208b259c08399a/aegea/packages/github3/pulls.py#L246-L252 | def is_merged(self):
"""Checks to see if the pull request was merged.
:returns: bool
"""
url = self._build_url('merge', base_url=self._api)
return self._boolean(self._get(url), 204, 404) | [
"def",
"is_merged",
"(",
"self",
")",
":",
"url",
"=",
"self",
".",
"_build_url",
"(",
"'merge'",
",",
"base_url",
"=",
"self",
".",
"_api",
")",
"return",
"self",
".",
"_boolean",
"(",
"self",
".",
"_get",
"(",
"url",
")",
",",
"204",
",",
"404",
")"
] | Checks to see if the pull request was merged.
:returns: bool | [
"Checks",
"to",
"see",
"if",
"the",
"pull",
"request",
"was",
"merged",
"."
] | python | train |
dw/mitogen | mitogen/core.py | https://github.com/dw/mitogen/blob/a7fdb55e1300a7e0a5e404b09eb730cf9a525da7/mitogen/core.py#L743-L748 | def dead(cls, reason=None, **kwargs):
"""
Syntax helper to construct a dead message.
"""
kwargs['data'], _ = UTF8_CODEC.encode(reason or u'')
return cls(reply_to=IS_DEAD, **kwargs) | [
"def",
"dead",
"(",
"cls",
",",
"reason",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'data'",
"]",
",",
"_",
"=",
"UTF8_CODEC",
".",
"encode",
"(",
"reason",
"or",
"u''",
")",
"return",
"cls",
"(",
"reply_to",
"=",
"IS_DEAD",
",",
"*",
"*",
"kwargs",
")"
] | Syntax helper to construct a dead message. | [
"Syntax",
"helper",
"to",
"construct",
"a",
"dead",
"message",
"."
] | python | train |
matousc89/padasip | padasip/detection/elbnd.py | https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/detection/elbnd.py#L93-L138 | def ELBND(w, e, function="max"):
"""
This function estimates Error and Learning Based Novelty Detection measure
from given data.
**Args:**
* `w` : history of adaptive parameters of an adaptive model (2d array),
every row represents parameters in given time index.
* `e` : error of adaptive model (1d array)
**Kwargs:**
* `functions` : output function (str). The way how to produce single
value for every sample (from all parameters)
* `max` - maximal value
* `sum` - sum of values
**Returns:**
* ELBND values (1d array). This vector has same lenght as `w`.
"""
# check if the function is known
if not function in ["max", "sum"]:
raise ValueError('Unknown output function')
# get length of data and number of parameters
N = w.shape[0]
n = w.shape[1]
# get abs dw from w
dw = np.zeros(w.shape)
dw[:-1] = np.abs(np.diff(w, axis=0))
# absolute values of product of increments and error
a = np.random.random((5,2))
b = a.T*np.array([1,2,3,4,5])
elbnd = np.abs((dw.T*e).T)
# apply output function
if function == "max":
elbnd = np.max(elbnd, axis=1)
elif function == "sum":
elbnd = np.sum(elbnd, axis=1)
# return output
return elbnd | [
"def",
"ELBND",
"(",
"w",
",",
"e",
",",
"function",
"=",
"\"max\"",
")",
":",
"# check if the function is known",
"if",
"not",
"function",
"in",
"[",
"\"max\"",
",",
"\"sum\"",
"]",
":",
"raise",
"ValueError",
"(",
"'Unknown output function'",
")",
"# get length of data and number of parameters",
"N",
"=",
"w",
".",
"shape",
"[",
"0",
"]",
"n",
"=",
"w",
".",
"shape",
"[",
"1",
"]",
"# get abs dw from w",
"dw",
"=",
"np",
".",
"zeros",
"(",
"w",
".",
"shape",
")",
"dw",
"[",
":",
"-",
"1",
"]",
"=",
"np",
".",
"abs",
"(",
"np",
".",
"diff",
"(",
"w",
",",
"axis",
"=",
"0",
")",
")",
"# absolute values of product of increments and error",
"a",
"=",
"np",
".",
"random",
".",
"random",
"(",
"(",
"5",
",",
"2",
")",
")",
"b",
"=",
"a",
".",
"T",
"*",
"np",
".",
"array",
"(",
"[",
"1",
",",
"2",
",",
"3",
",",
"4",
",",
"5",
"]",
")",
"elbnd",
"=",
"np",
".",
"abs",
"(",
"(",
"dw",
".",
"T",
"*",
"e",
")",
".",
"T",
")",
"# apply output function",
"if",
"function",
"==",
"\"max\"",
":",
"elbnd",
"=",
"np",
".",
"max",
"(",
"elbnd",
",",
"axis",
"=",
"1",
")",
"elif",
"function",
"==",
"\"sum\"",
":",
"elbnd",
"=",
"np",
".",
"sum",
"(",
"elbnd",
",",
"axis",
"=",
"1",
")",
"# return output",
"return",
"elbnd"
] | This function estimates Error and Learning Based Novelty Detection measure
from given data.
**Args:**
* `w` : history of adaptive parameters of an adaptive model (2d array),
every row represents parameters in given time index.
* `e` : error of adaptive model (1d array)
**Kwargs:**
* `functions` : output function (str). The way how to produce single
value for every sample (from all parameters)
* `max` - maximal value
* `sum` - sum of values
**Returns:**
* ELBND values (1d array). This vector has same lenght as `w`. | [
"This",
"function",
"estimates",
"Error",
"and",
"Learning",
"Based",
"Novelty",
"Detection",
"measure",
"from",
"given",
"data",
"."
] | python | train |
mitsei/dlkit | dlkit/json_/assessment/sessions.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment/sessions.py#L5704-L5746 | def get_assessments_offered_by_query(self, assessment_offered_query):
"""Gets a list of ``AssessmentOffered`` elements matching the given assessment offered query.
arg: assessment_offered_query
(osid.assessment.AssessmentOfferedQuery): the assessment
offered query
return: (osid.assessment.AssessmentOfferedList) - the returned
``AssessmentOfferedList``
raise: NullArgument - ``assessment_offered_query`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
raise: Unsupported - ``assessment_offered_query`` is not of
this service
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceQuerySession.get_resources_by_query
and_list = list()
or_list = list()
for term in assessment_offered_query._query_terms:
if '$in' in assessment_offered_query._query_terms[term] and '$nin' in assessment_offered_query._query_terms[term]:
and_list.append(
{'$or': [{term: {'$in': assessment_offered_query._query_terms[term]['$in']}},
{term: {'$nin': assessment_offered_query._query_terms[term]['$nin']}}]})
else:
and_list.append({term: assessment_offered_query._query_terms[term]})
for term in assessment_offered_query._keyword_terms:
or_list.append({term: assessment_offered_query._keyword_terms[term]})
if or_list:
and_list.append({'$or': or_list})
view_filter = self._view_filter()
if view_filter:
and_list.append(view_filter)
if and_list:
query_terms = {'$and': and_list}
collection = JSONClientValidated('assessment',
collection='AssessmentOffered',
runtime=self._runtime)
result = collection.find(query_terms).sort('_id', DESCENDING)
else:
result = []
return objects.AssessmentOfferedList(result, runtime=self._runtime, proxy=self._proxy) | [
"def",
"get_assessments_offered_by_query",
"(",
"self",
",",
"assessment_offered_query",
")",
":",
"# Implemented from template for",
"# osid.resource.ResourceQuerySession.get_resources_by_query",
"and_list",
"=",
"list",
"(",
")",
"or_list",
"=",
"list",
"(",
")",
"for",
"term",
"in",
"assessment_offered_query",
".",
"_query_terms",
":",
"if",
"'$in'",
"in",
"assessment_offered_query",
".",
"_query_terms",
"[",
"term",
"]",
"and",
"'$nin'",
"in",
"assessment_offered_query",
".",
"_query_terms",
"[",
"term",
"]",
":",
"and_list",
".",
"append",
"(",
"{",
"'$or'",
":",
"[",
"{",
"term",
":",
"{",
"'$in'",
":",
"assessment_offered_query",
".",
"_query_terms",
"[",
"term",
"]",
"[",
"'$in'",
"]",
"}",
"}",
",",
"{",
"term",
":",
"{",
"'$nin'",
":",
"assessment_offered_query",
".",
"_query_terms",
"[",
"term",
"]",
"[",
"'$nin'",
"]",
"}",
"}",
"]",
"}",
")",
"else",
":",
"and_list",
".",
"append",
"(",
"{",
"term",
":",
"assessment_offered_query",
".",
"_query_terms",
"[",
"term",
"]",
"}",
")",
"for",
"term",
"in",
"assessment_offered_query",
".",
"_keyword_terms",
":",
"or_list",
".",
"append",
"(",
"{",
"term",
":",
"assessment_offered_query",
".",
"_keyword_terms",
"[",
"term",
"]",
"}",
")",
"if",
"or_list",
":",
"and_list",
".",
"append",
"(",
"{",
"'$or'",
":",
"or_list",
"}",
")",
"view_filter",
"=",
"self",
".",
"_view_filter",
"(",
")",
"if",
"view_filter",
":",
"and_list",
".",
"append",
"(",
"view_filter",
")",
"if",
"and_list",
":",
"query_terms",
"=",
"{",
"'$and'",
":",
"and_list",
"}",
"collection",
"=",
"JSONClientValidated",
"(",
"'assessment'",
",",
"collection",
"=",
"'AssessmentOffered'",
",",
"runtime",
"=",
"self",
".",
"_runtime",
")",
"result",
"=",
"collection",
".",
"find",
"(",
"query_terms",
")",
".",
"sort",
"(",
"'_id'",
",",
"DESCENDING",
")",
"else",
":",
"result",
"=",
"[",
"]",
"return",
"objects",
".",
"AssessmentOfferedList",
"(",
"result",
",",
"runtime",
"=",
"self",
".",
"_runtime",
",",
"proxy",
"=",
"self",
".",
"_proxy",
")"
] | Gets a list of ``AssessmentOffered`` elements matching the given assessment offered query.
arg: assessment_offered_query
(osid.assessment.AssessmentOfferedQuery): the assessment
offered query
return: (osid.assessment.AssessmentOfferedList) - the returned
``AssessmentOfferedList``
raise: NullArgument - ``assessment_offered_query`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
raise: Unsupported - ``assessment_offered_query`` is not of
this service
*compliance: mandatory -- This method must be implemented.* | [
"Gets",
"a",
"list",
"of",
"AssessmentOffered",
"elements",
"matching",
"the",
"given",
"assessment",
"offered",
"query",
"."
] | python | train |
etingof/pysnmp | pysnmp/proto/rfc1902.py | https://github.com/etingof/pysnmp/blob/cde062dd42f67dfd2d7686286a322d40e9c3a4b7/pysnmp/proto/rfc1902.py#L93-L102 | def withValues(cls, *values):
"""Creates a subclass with discreet values constraint.
"""
class X(cls):
subtypeSpec = cls.subtypeSpec + constraint.SingleValueConstraint(
*values)
X.__name__ = cls.__name__
return X | [
"def",
"withValues",
"(",
"cls",
",",
"*",
"values",
")",
":",
"class",
"X",
"(",
"cls",
")",
":",
"subtypeSpec",
"=",
"cls",
".",
"subtypeSpec",
"+",
"constraint",
".",
"SingleValueConstraint",
"(",
"*",
"values",
")",
"X",
".",
"__name__",
"=",
"cls",
".",
"__name__",
"return",
"X"
] | Creates a subclass with discreet values constraint. | [
"Creates",
"a",
"subclass",
"with",
"discreet",
"values",
"constraint",
"."
] | python | train |
hydrosquall/tiingo-python | tools/api_key_tool.py | https://github.com/hydrosquall/tiingo-python/blob/9bb98ca9d24f2e4db651cf0590e4b47184546482/tools/api_key_tool.py#L31-L41 | def remove_api_key(file_name):
"""
Change the api key in the Token object to 40*'0'. See issue #86.
:param file: path-to-file to change
"""
with open(file_name, 'r') as fp:
text = fp.read()
text = re.sub(real_api_regex, zero_token_string, text)
with open(file_name, 'w') as fp:
fp.write(text)
return | [
"def",
"remove_api_key",
"(",
"file_name",
")",
":",
"with",
"open",
"(",
"file_name",
",",
"'r'",
")",
"as",
"fp",
":",
"text",
"=",
"fp",
".",
"read",
"(",
")",
"text",
"=",
"re",
".",
"sub",
"(",
"real_api_regex",
",",
"zero_token_string",
",",
"text",
")",
"with",
"open",
"(",
"file_name",
",",
"'w'",
")",
"as",
"fp",
":",
"fp",
".",
"write",
"(",
"text",
")",
"return"
] | Change the api key in the Token object to 40*'0'. See issue #86.
:param file: path-to-file to change | [
"Change",
"the",
"api",
"key",
"in",
"the",
"Token",
"object",
"to",
"40",
"*",
"0",
".",
"See",
"issue",
"#86",
".",
":",
"param",
"file",
":",
"path",
"-",
"to",
"-",
"file",
"to",
"change"
] | python | test |
anlutro/diay.py | diay/__init__.py | https://github.com/anlutro/diay.py/blob/78cfd2b53c8dca3dbac468d620eaa0bb7af08275/diay/__init__.py#L55-L75 | def inject(*args, **kwargs):
"""
Mark a class or function for injection, meaning that a DI container knows
that it should inject dependencies into it.
Normally you won't need this as the injector will inject the required
arguments anyway, but it can be used to inject properties into a class
without having to specify it in the constructor, or to inject arguments
that aren't properly type hinted.
Example:
@diay.inject('foo', MyClass)
class MyOtherClass: pass
assert isinstance(injector.get(MyOtherClass).foo, MyClass)
"""
def wrapper(obj):
if inspect.isclass(obj) or callable(obj):
_inject_object(obj, *args, **kwargs)
return obj
raise DiayException("Don't know how to inject into %r" % obj)
return wrapper | [
"def",
"inject",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"wrapper",
"(",
"obj",
")",
":",
"if",
"inspect",
".",
"isclass",
"(",
"obj",
")",
"or",
"callable",
"(",
"obj",
")",
":",
"_inject_object",
"(",
"obj",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"obj",
"raise",
"DiayException",
"(",
"\"Don't know how to inject into %r\"",
"%",
"obj",
")",
"return",
"wrapper"
] | Mark a class or function for injection, meaning that a DI container knows
that it should inject dependencies into it.
Normally you won't need this as the injector will inject the required
arguments anyway, but it can be used to inject properties into a class
without having to specify it in the constructor, or to inject arguments
that aren't properly type hinted.
Example:
@diay.inject('foo', MyClass)
class MyOtherClass: pass
assert isinstance(injector.get(MyOtherClass).foo, MyClass) | [
"Mark",
"a",
"class",
"or",
"function",
"for",
"injection",
"meaning",
"that",
"a",
"DI",
"container",
"knows",
"that",
"it",
"should",
"inject",
"dependencies",
"into",
"it",
"."
] | python | train |
reportportal/client-Python | reportportal_client/service.py | https://github.com/reportportal/client-Python/blob/8d22445d0de73f46fb23d0c0e49ac309335173ce/reportportal_client/service.py#L250-L312 | def log_batch(self, log_data):
"""Logs batch of messages with attachment.
Args:
log_data: list of log records.
log record is a dict of;
time, message, level, attachment
attachment is a dict of:
name: name of attachment
data: fileobj or content
mime: content type for attachment
"""
url = uri_join(self.base_url, "log")
attachments = []
for log_item in log_data:
log_item["item_id"] = self.stack[-1]
attachment = log_item.get("attachment", None)
if "attachment" in log_item:
del log_item["attachment"]
if attachment:
if not isinstance(attachment, collections.Mapping):
attachment = {"data": attachment}
name = attachment.get("name", str(uuid.uuid4()))
log_item["file"] = {"name": name}
attachments.append(("file", (
name,
attachment["data"],
attachment.get("mime", "application/octet-stream")
)))
files = [(
"json_request_part", (
None,
json.dumps(log_data),
"application/json"
)
)]
files.extend(attachments)
from reportportal_client import POST_LOGBATCH_RETRY_COUNT
for i in range(POST_LOGBATCH_RETRY_COUNT):
try:
r = self.session.post(
url=url,
files=files,
verify=self.verify_ssl
)
except KeyError:
if i < POST_LOGBATCH_RETRY_COUNT - 1:
continue
else:
raise
break
logger.debug("log_batch - Stack: %s", self.stack)
logger.debug("log_batch response: %s", r.text)
return _get_data(r) | [
"def",
"log_batch",
"(",
"self",
",",
"log_data",
")",
":",
"url",
"=",
"uri_join",
"(",
"self",
".",
"base_url",
",",
"\"log\"",
")",
"attachments",
"=",
"[",
"]",
"for",
"log_item",
"in",
"log_data",
":",
"log_item",
"[",
"\"item_id\"",
"]",
"=",
"self",
".",
"stack",
"[",
"-",
"1",
"]",
"attachment",
"=",
"log_item",
".",
"get",
"(",
"\"attachment\"",
",",
"None",
")",
"if",
"\"attachment\"",
"in",
"log_item",
":",
"del",
"log_item",
"[",
"\"attachment\"",
"]",
"if",
"attachment",
":",
"if",
"not",
"isinstance",
"(",
"attachment",
",",
"collections",
".",
"Mapping",
")",
":",
"attachment",
"=",
"{",
"\"data\"",
":",
"attachment",
"}",
"name",
"=",
"attachment",
".",
"get",
"(",
"\"name\"",
",",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
")",
"log_item",
"[",
"\"file\"",
"]",
"=",
"{",
"\"name\"",
":",
"name",
"}",
"attachments",
".",
"append",
"(",
"(",
"\"file\"",
",",
"(",
"name",
",",
"attachment",
"[",
"\"data\"",
"]",
",",
"attachment",
".",
"get",
"(",
"\"mime\"",
",",
"\"application/octet-stream\"",
")",
")",
")",
")",
"files",
"=",
"[",
"(",
"\"json_request_part\"",
",",
"(",
"None",
",",
"json",
".",
"dumps",
"(",
"log_data",
")",
",",
"\"application/json\"",
")",
")",
"]",
"files",
".",
"extend",
"(",
"attachments",
")",
"from",
"reportportal_client",
"import",
"POST_LOGBATCH_RETRY_COUNT",
"for",
"i",
"in",
"range",
"(",
"POST_LOGBATCH_RETRY_COUNT",
")",
":",
"try",
":",
"r",
"=",
"self",
".",
"session",
".",
"post",
"(",
"url",
"=",
"url",
",",
"files",
"=",
"files",
",",
"verify",
"=",
"self",
".",
"verify_ssl",
")",
"except",
"KeyError",
":",
"if",
"i",
"<",
"POST_LOGBATCH_RETRY_COUNT",
"-",
"1",
":",
"continue",
"else",
":",
"raise",
"break",
"logger",
".",
"debug",
"(",
"\"log_batch - Stack: %s\"",
",",
"self",
".",
"stack",
")",
"logger",
".",
"debug",
"(",
"\"log_batch response: %s\"",
",",
"r",
".",
"text",
")",
"return",
"_get_data",
"(",
"r",
")"
] | Logs batch of messages with attachment.
Args:
log_data: list of log records.
log record is a dict of;
time, message, level, attachment
attachment is a dict of:
name: name of attachment
data: fileobj or content
mime: content type for attachment | [
"Logs",
"batch",
"of",
"messages",
"with",
"attachment",
"."
] | python | train |
moonso/loqusdb | loqusdb/utils/profiling.py | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/utils/profiling.py#L78-L124 | def profile_match(adapter, profiles, hard_threshold=0.95, soft_threshold=0.9):
"""
given a dict of profiles, searches through all the samples in the DB
for a match. If a matching sample is found an exception is raised,
and the variants will not be loaded into the database.
Args:
adapter (MongoAdapter): Adapter to mongodb
profiles (dict(str)): The profiles (given as strings) for each sample in vcf.
hard_threshold(float): Rejects load if hamming distance above this is found
soft_threshold(float): Stores similar samples if hamming distance above this is found
Returns:
matches(dict(list)): list of similar samples for each sample in vcf.
"""
matches = {sample: [] for sample in profiles.keys()}
for case in adapter.cases():
for individual in case['individuals']:
for sample in profiles.keys():
if individual.get('profile'):
similarity = compare_profiles(
profiles[sample], individual['profile']
)
if similarity >= hard_threshold:
msg = (
f"individual {sample} has a {similarity} similarity "
f"with individual {individual['ind_id']} in case "
f"{case['case_id']}"
)
LOG.critical(msg)
#Raise some exception
raise ProfileError
if similarity >= soft_threshold:
match = f"{case['case_id']}.{individual['ind_id']}"
matches[sample].append(match)
return matches | [
"def",
"profile_match",
"(",
"adapter",
",",
"profiles",
",",
"hard_threshold",
"=",
"0.95",
",",
"soft_threshold",
"=",
"0.9",
")",
":",
"matches",
"=",
"{",
"sample",
":",
"[",
"]",
"for",
"sample",
"in",
"profiles",
".",
"keys",
"(",
")",
"}",
"for",
"case",
"in",
"adapter",
".",
"cases",
"(",
")",
":",
"for",
"individual",
"in",
"case",
"[",
"'individuals'",
"]",
":",
"for",
"sample",
"in",
"profiles",
".",
"keys",
"(",
")",
":",
"if",
"individual",
".",
"get",
"(",
"'profile'",
")",
":",
"similarity",
"=",
"compare_profiles",
"(",
"profiles",
"[",
"sample",
"]",
",",
"individual",
"[",
"'profile'",
"]",
")",
"if",
"similarity",
">=",
"hard_threshold",
":",
"msg",
"=",
"(",
"f\"individual {sample} has a {similarity} similarity \"",
"f\"with individual {individual['ind_id']} in case \"",
"f\"{case['case_id']}\"",
")",
"LOG",
".",
"critical",
"(",
"msg",
")",
"#Raise some exception",
"raise",
"ProfileError",
"if",
"similarity",
">=",
"soft_threshold",
":",
"match",
"=",
"f\"{case['case_id']}.{individual['ind_id']}\"",
"matches",
"[",
"sample",
"]",
".",
"append",
"(",
"match",
")",
"return",
"matches"
] | given a dict of profiles, searches through all the samples in the DB
for a match. If a matching sample is found an exception is raised,
and the variants will not be loaded into the database.
Args:
adapter (MongoAdapter): Adapter to mongodb
profiles (dict(str)): The profiles (given as strings) for each sample in vcf.
hard_threshold(float): Rejects load if hamming distance above this is found
soft_threshold(float): Stores similar samples if hamming distance above this is found
Returns:
matches(dict(list)): list of similar samples for each sample in vcf. | [
"given",
"a",
"dict",
"of",
"profiles",
"searches",
"through",
"all",
"the",
"samples",
"in",
"the",
"DB",
"for",
"a",
"match",
".",
"If",
"a",
"matching",
"sample",
"is",
"found",
"an",
"exception",
"is",
"raised",
"and",
"the",
"variants",
"will",
"not",
"be",
"loaded",
"into",
"the",
"database",
"."
] | python | train |
idlesign/django-sitecats | sitecats/toolbox.py | https://github.com/idlesign/django-sitecats/blob/9b45e91fc0dcb63a0011780437fe28145e3ecce9/sitecats/toolbox.py#L16-L26 | def get_category_aliases_under(parent_alias=None):
"""Returns a list of category aliases under the given parent.
Could be useful to pass to `ModelWithCategory.enable_category_lists_editor`
in `additional_parents_aliases` parameter.
:param str|None parent_alias: Parent alias or None to categories under root
:rtype: list
:return: a list of category aliases
"""
return [ch.alias for ch in get_cache().get_children_for(parent_alias, only_with_aliases=True)] | [
"def",
"get_category_aliases_under",
"(",
"parent_alias",
"=",
"None",
")",
":",
"return",
"[",
"ch",
".",
"alias",
"for",
"ch",
"in",
"get_cache",
"(",
")",
".",
"get_children_for",
"(",
"parent_alias",
",",
"only_with_aliases",
"=",
"True",
")",
"]"
] | Returns a list of category aliases under the given parent.
Could be useful to pass to `ModelWithCategory.enable_category_lists_editor`
in `additional_parents_aliases` parameter.
:param str|None parent_alias: Parent alias or None to categories under root
:rtype: list
:return: a list of category aliases | [
"Returns",
"a",
"list",
"of",
"category",
"aliases",
"under",
"the",
"given",
"parent",
"."
] | python | train |
hsolbrig/PyShEx | pyshex/shape_expressions_language/p3_terminology.py | https://github.com/hsolbrig/PyShEx/blob/9d659cc36e808afd66d4a6d60e8ea21cb12eb744/pyshex/shape_expressions_language/p3_terminology.py#L23-L25 | def predicatesOut(G: Graph, n: Node) -> Set[TriplePredicate]:
""" predicatesOut(G, n) is the set of predicates in arcsOut(G, n). """
return {p for p, _ in G.predicate_objects(n)} | [
"def",
"predicatesOut",
"(",
"G",
":",
"Graph",
",",
"n",
":",
"Node",
")",
"->",
"Set",
"[",
"TriplePredicate",
"]",
":",
"return",
"{",
"p",
"for",
"p",
",",
"_",
"in",
"G",
".",
"predicate_objects",
"(",
"n",
")",
"}"
] | predicatesOut(G, n) is the set of predicates in arcsOut(G, n). | [
"predicatesOut",
"(",
"G",
"n",
")",
"is",
"the",
"set",
"of",
"predicates",
"in",
"arcsOut",
"(",
"G",
"n",
")",
"."
] | python | train |
KelSolaar/Umbra | umbra/ui/common.py | https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/ui/common.py#L143-L167 | def get_resource_path(name, raise_exception=False):
"""
Returns the resource file path matching the given name.
:param name: Resource name.
:type name: unicode
:param raise_exception: Raise the exception.
:type raise_exception: bool
:return: Resource path.
:rtype: unicode
"""
if not RuntimeGlobals.resources_directories:
RuntimeGlobals.resources_directories.append(
os.path.normpath(os.path.join(umbra.__path__[0], Constants.resources_directory)))
for path in RuntimeGlobals.resources_directories:
path = os.path.join(path, name)
if foundations.common.path_exists(path):
LOGGER.debug("> '{0}' resource path: '{1}'.".format(name, path))
return path
if raise_exception:
raise umbra.exceptions.ResourceExistsError(
"{0} | No resource file path found for '{1}' name!".format(__name__, name)) | [
"def",
"get_resource_path",
"(",
"name",
",",
"raise_exception",
"=",
"False",
")",
":",
"if",
"not",
"RuntimeGlobals",
".",
"resources_directories",
":",
"RuntimeGlobals",
".",
"resources_directories",
".",
"append",
"(",
"os",
".",
"path",
".",
"normpath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"umbra",
".",
"__path__",
"[",
"0",
"]",
",",
"Constants",
".",
"resources_directory",
")",
")",
")",
"for",
"path",
"in",
"RuntimeGlobals",
".",
"resources_directories",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"name",
")",
"if",
"foundations",
".",
"common",
".",
"path_exists",
"(",
"path",
")",
":",
"LOGGER",
".",
"debug",
"(",
"\"> '{0}' resource path: '{1}'.\"",
".",
"format",
"(",
"name",
",",
"path",
")",
")",
"return",
"path",
"if",
"raise_exception",
":",
"raise",
"umbra",
".",
"exceptions",
".",
"ResourceExistsError",
"(",
"\"{0} | No resource file path found for '{1}' name!\"",
".",
"format",
"(",
"__name__",
",",
"name",
")",
")"
] | Returns the resource file path matching the given name.
:param name: Resource name.
:type name: unicode
:param raise_exception: Raise the exception.
:type raise_exception: bool
:return: Resource path.
:rtype: unicode | [
"Returns",
"the",
"resource",
"file",
"path",
"matching",
"the",
"given",
"name",
"."
] | python | train |
odlgroup/odl | odl/tomo/geometry/parallel.py | https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/tomo/geometry/parallel.py#L1471-L1587 | def parallel_beam_geometry(space, num_angles=None, det_shape=None):
r"""Create default parallel beam geometry from ``space``.
This is intended for simple test cases where users do not need the full
flexibility of the geometries, but simply want a geometry that works.
This default geometry gives a fully sampled sinogram according to the
Nyquist criterion, which in general results in a very large number of
samples. In particular, a ``space`` that is not centered at the origin
can result in very large detectors.
Parameters
----------
space : `DiscreteLp`
Reconstruction space, the space of the volumetric data to be projected.
Needs to be 2d or 3d.
num_angles : int, optional
Number of angles.
Default: Enough to fully sample the data, see Notes.
det_shape : int or sequence of int, optional
Number of detector pixels.
Default: Enough to fully sample the data, see Notes.
Returns
-------
geometry : `ParallelBeamGeometry`
If ``space`` is 2d, return a `Parallel2dGeometry`.
If ``space`` is 3d, return a `Parallel3dAxisGeometry`.
Examples
--------
Create a parallel beam geometry from a 2d space:
>>> space = odl.uniform_discr([-1, -1], [1, 1], (20, 20))
>>> geometry = parallel_beam_geometry(space)
>>> geometry.angles.size
45
>>> geometry.detector.size
31
Notes
-----
According to [NW2001]_, pages 72--74, a function
:math:`f : \mathbb{R}^2 \to \mathbb{R}` that has compact support
.. math::
\| x \| > \rho \implies f(x) = 0,
and is essentially bandlimited
.. math::
\| \xi \| > \Omega \implies \hat{f}(\xi) \approx 0,
can be fully reconstructed from a parallel beam ray transform
if (1) the projection angles are sampled with a spacing of
:math:`\Delta \psi` such that
.. math::
\Delta \psi \leq \frac{\pi}{\rho \Omega},
and (2) the detector is sampled with an interval :math:`\Delta s`
that satisfies
.. math::
\Delta s \leq \frac{\pi}{\Omega}.
The geometry returned by this function satisfies these conditions exactly.
If the domain is 3-dimensional, the geometry is "separable", in that each
slice along the z-dimension of the data is treated as independed 2d data.
References
----------
.. [NW2001] Natterer, F and Wuebbeling, F.
*Mathematical Methods in Image Reconstruction*.
SIAM, 2001.
https://dx.doi.org/10.1137/1.9780898718324
"""
# Find maximum distance from rotation axis
corners = space.domain.corners()[:, :2]
rho = np.max(np.linalg.norm(corners, axis=1))
# Find default values according to Nyquist criterion.
# We assume that the function is bandlimited by a wave along the x or y
# axis. The highest frequency we can measure is then a standing wave with
# period of twice the inter-node distance.
min_side = min(space.partition.cell_sides[:2])
omega = np.pi / min_side
num_px_horiz = 2 * int(np.ceil(rho * omega / np.pi)) + 1
if space.ndim == 2:
det_min_pt = -rho
det_max_pt = rho
if det_shape is None:
det_shape = num_px_horiz
elif space.ndim == 3:
num_px_vert = space.shape[2]
min_h = space.domain.min_pt[2]
max_h = space.domain.max_pt[2]
det_min_pt = [-rho, min_h]
det_max_pt = [rho, max_h]
if det_shape is None:
det_shape = [num_px_horiz, num_px_vert]
if num_angles is None:
num_angles = int(np.ceil(omega * rho))
angle_partition = uniform_partition(0, np.pi, num_angles)
det_partition = uniform_partition(det_min_pt, det_max_pt, det_shape)
if space.ndim == 2:
return Parallel2dGeometry(angle_partition, det_partition)
elif space.ndim == 3:
return Parallel3dAxisGeometry(angle_partition, det_partition)
else:
raise ValueError('``space.ndim`` must be 2 or 3.') | [
"def",
"parallel_beam_geometry",
"(",
"space",
",",
"num_angles",
"=",
"None",
",",
"det_shape",
"=",
"None",
")",
":",
"# Find maximum distance from rotation axis",
"corners",
"=",
"space",
".",
"domain",
".",
"corners",
"(",
")",
"[",
":",
",",
":",
"2",
"]",
"rho",
"=",
"np",
".",
"max",
"(",
"np",
".",
"linalg",
".",
"norm",
"(",
"corners",
",",
"axis",
"=",
"1",
")",
")",
"# Find default values according to Nyquist criterion.",
"# We assume that the function is bandlimited by a wave along the x or y",
"# axis. The highest frequency we can measure is then a standing wave with",
"# period of twice the inter-node distance.",
"min_side",
"=",
"min",
"(",
"space",
".",
"partition",
".",
"cell_sides",
"[",
":",
"2",
"]",
")",
"omega",
"=",
"np",
".",
"pi",
"/",
"min_side",
"num_px_horiz",
"=",
"2",
"*",
"int",
"(",
"np",
".",
"ceil",
"(",
"rho",
"*",
"omega",
"/",
"np",
".",
"pi",
")",
")",
"+",
"1",
"if",
"space",
".",
"ndim",
"==",
"2",
":",
"det_min_pt",
"=",
"-",
"rho",
"det_max_pt",
"=",
"rho",
"if",
"det_shape",
"is",
"None",
":",
"det_shape",
"=",
"num_px_horiz",
"elif",
"space",
".",
"ndim",
"==",
"3",
":",
"num_px_vert",
"=",
"space",
".",
"shape",
"[",
"2",
"]",
"min_h",
"=",
"space",
".",
"domain",
".",
"min_pt",
"[",
"2",
"]",
"max_h",
"=",
"space",
".",
"domain",
".",
"max_pt",
"[",
"2",
"]",
"det_min_pt",
"=",
"[",
"-",
"rho",
",",
"min_h",
"]",
"det_max_pt",
"=",
"[",
"rho",
",",
"max_h",
"]",
"if",
"det_shape",
"is",
"None",
":",
"det_shape",
"=",
"[",
"num_px_horiz",
",",
"num_px_vert",
"]",
"if",
"num_angles",
"is",
"None",
":",
"num_angles",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"omega",
"*",
"rho",
")",
")",
"angle_partition",
"=",
"uniform_partition",
"(",
"0",
",",
"np",
".",
"pi",
",",
"num_angles",
")",
"det_partition",
"=",
"uniform_partition",
"(",
"det_min_pt",
",",
"det_max_pt",
",",
"det_shape",
")",
"if",
"space",
".",
"ndim",
"==",
"2",
":",
"return",
"Parallel2dGeometry",
"(",
"angle_partition",
",",
"det_partition",
")",
"elif",
"space",
".",
"ndim",
"==",
"3",
":",
"return",
"Parallel3dAxisGeometry",
"(",
"angle_partition",
",",
"det_partition",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'``space.ndim`` must be 2 or 3.'",
")"
] | r"""Create default parallel beam geometry from ``space``.
This is intended for simple test cases where users do not need the full
flexibility of the geometries, but simply want a geometry that works.
This default geometry gives a fully sampled sinogram according to the
Nyquist criterion, which in general results in a very large number of
samples. In particular, a ``space`` that is not centered at the origin
can result in very large detectors.
Parameters
----------
space : `DiscreteLp`
Reconstruction space, the space of the volumetric data to be projected.
Needs to be 2d or 3d.
num_angles : int, optional
Number of angles.
Default: Enough to fully sample the data, see Notes.
det_shape : int or sequence of int, optional
Number of detector pixels.
Default: Enough to fully sample the data, see Notes.
Returns
-------
geometry : `ParallelBeamGeometry`
If ``space`` is 2d, return a `Parallel2dGeometry`.
If ``space`` is 3d, return a `Parallel3dAxisGeometry`.
Examples
--------
Create a parallel beam geometry from a 2d space:
>>> space = odl.uniform_discr([-1, -1], [1, 1], (20, 20))
>>> geometry = parallel_beam_geometry(space)
>>> geometry.angles.size
45
>>> geometry.detector.size
31
Notes
-----
According to [NW2001]_, pages 72--74, a function
:math:`f : \mathbb{R}^2 \to \mathbb{R}` that has compact support
.. math::
\| x \| > \rho \implies f(x) = 0,
and is essentially bandlimited
.. math::
\| \xi \| > \Omega \implies \hat{f}(\xi) \approx 0,
can be fully reconstructed from a parallel beam ray transform
if (1) the projection angles are sampled with a spacing of
:math:`\Delta \psi` such that
.. math::
\Delta \psi \leq \frac{\pi}{\rho \Omega},
and (2) the detector is sampled with an interval :math:`\Delta s`
that satisfies
.. math::
\Delta s \leq \frac{\pi}{\Omega}.
The geometry returned by this function satisfies these conditions exactly.
If the domain is 3-dimensional, the geometry is "separable", in that each
slice along the z-dimension of the data is treated as independed 2d data.
References
----------
.. [NW2001] Natterer, F and Wuebbeling, F.
*Mathematical Methods in Image Reconstruction*.
SIAM, 2001.
https://dx.doi.org/10.1137/1.9780898718324 | [
"r",
"Create",
"default",
"parallel",
"beam",
"geometry",
"from",
"space",
"."
] | python | train |
softwarefactory-project/distroinfo | scripts/di.py | https://github.com/softwarefactory-project/distroinfo/blob/86a7419232a3376157c06e70528ec627e03ff82a/scripts/di.py#L95-L132 | def distroinfo(cargs, version=__version__):
"""
distroinfo Command-Line Interface
"""
code = 1
args = docopt(__doc__, argv=cargs)
try:
if args['--version']:
if not version:
version = 'N/A'
print(version)
code = 0
elif args['fetch']:
code = fetch(
info_url=args['<info-url>'],
info_files=args['<info-file>'],
cache_dir=args['--cache-dir'],
fetcher=args['--fetcher'],
)
elif args['dump']:
code = dump(
info_url=args['<info-url>'],
info_files=args['<info-file>'],
yaml_out=args['--yaml-out'],
json_out=args['--json-out'],
cache_dir=args['--cache-dir'],
fetcher=args['--fetcher'],
)
except (
exception.InvalidInfoFormat,
KeyboardInterrupt,
) as ex:
code = getattr(ex, 'exit_code', code)
print("")
print(str(ex) or type(ex).__name__)
return code | [
"def",
"distroinfo",
"(",
"cargs",
",",
"version",
"=",
"__version__",
")",
":",
"code",
"=",
"1",
"args",
"=",
"docopt",
"(",
"__doc__",
",",
"argv",
"=",
"cargs",
")",
"try",
":",
"if",
"args",
"[",
"'--version'",
"]",
":",
"if",
"not",
"version",
":",
"version",
"=",
"'N/A'",
"print",
"(",
"version",
")",
"code",
"=",
"0",
"elif",
"args",
"[",
"'fetch'",
"]",
":",
"code",
"=",
"fetch",
"(",
"info_url",
"=",
"args",
"[",
"'<info-url>'",
"]",
",",
"info_files",
"=",
"args",
"[",
"'<info-file>'",
"]",
",",
"cache_dir",
"=",
"args",
"[",
"'--cache-dir'",
"]",
",",
"fetcher",
"=",
"args",
"[",
"'--fetcher'",
"]",
",",
")",
"elif",
"args",
"[",
"'dump'",
"]",
":",
"code",
"=",
"dump",
"(",
"info_url",
"=",
"args",
"[",
"'<info-url>'",
"]",
",",
"info_files",
"=",
"args",
"[",
"'<info-file>'",
"]",
",",
"yaml_out",
"=",
"args",
"[",
"'--yaml-out'",
"]",
",",
"json_out",
"=",
"args",
"[",
"'--json-out'",
"]",
",",
"cache_dir",
"=",
"args",
"[",
"'--cache-dir'",
"]",
",",
"fetcher",
"=",
"args",
"[",
"'--fetcher'",
"]",
",",
")",
"except",
"(",
"exception",
".",
"InvalidInfoFormat",
",",
"KeyboardInterrupt",
",",
")",
"as",
"ex",
":",
"code",
"=",
"getattr",
"(",
"ex",
",",
"'exit_code'",
",",
"code",
")",
"print",
"(",
"\"\"",
")",
"print",
"(",
"str",
"(",
"ex",
")",
"or",
"type",
"(",
"ex",
")",
".",
"__name__",
")",
"return",
"code"
] | distroinfo Command-Line Interface | [
"distroinfo",
"Command",
"-",
"Line",
"Interface"
] | python | train |
samastur/pyimagediet | pyimagediet/cli.py | https://github.com/samastur/pyimagediet/blob/480c6e171577df36e166590b031bc8891b3c9e7b/pyimagediet/cli.py#L22-L26 | def diet(file, configuration, check):
"""Simple program that either print config customisations for your
environment or compresses file FILE."""
config = process.read_yaml_configuration(configuration)
process.diet(file, config) | [
"def",
"diet",
"(",
"file",
",",
"configuration",
",",
"check",
")",
":",
"config",
"=",
"process",
".",
"read_yaml_configuration",
"(",
"configuration",
")",
"process",
".",
"diet",
"(",
"file",
",",
"config",
")"
] | Simple program that either print config customisations for your
environment or compresses file FILE. | [
"Simple",
"program",
"that",
"either",
"print",
"config",
"customisations",
"for",
"your",
"environment",
"or",
"compresses",
"file",
"FILE",
"."
] | python | train |
jenanwise/codequality | codequality/checkers.py | https://github.com/jenanwise/codequality/blob/8a2bd767fd73091c49a5318fdbfb2b4fff77533d/codequality/checkers.py#L40-L57 | def check(self, paths):
"""
Return list of error dicts for all found errors in paths.
The default implementation expects `tool`, and `tool_err_re` to be
defined.
tool: external binary to use for checking.
tool_err_re: regexp that can match output of `tool` -- must provide
a groupdict with at least "filename", "lineno", "colno",
and "msg" keys. See example checkers.
"""
if not paths:
return ()
cmd_pieces = [self.tool]
cmd_pieces.extend(self.tool_args)
return self._check_std(paths, cmd_pieces) | [
"def",
"check",
"(",
"self",
",",
"paths",
")",
":",
"if",
"not",
"paths",
":",
"return",
"(",
")",
"cmd_pieces",
"=",
"[",
"self",
".",
"tool",
"]",
"cmd_pieces",
".",
"extend",
"(",
"self",
".",
"tool_args",
")",
"return",
"self",
".",
"_check_std",
"(",
"paths",
",",
"cmd_pieces",
")"
] | Return list of error dicts for all found errors in paths.
The default implementation expects `tool`, and `tool_err_re` to be
defined.
tool: external binary to use for checking.
tool_err_re: regexp that can match output of `tool` -- must provide
a groupdict with at least "filename", "lineno", "colno",
and "msg" keys. See example checkers. | [
"Return",
"list",
"of",
"error",
"dicts",
"for",
"all",
"found",
"errors",
"in",
"paths",
"."
] | python | train |
iamteem/redisco | redisco/models/base.py | https://github.com/iamteem/redisco/blob/a7ba19ff3c38061d6d8bc0c10fa754baadcfeb91/redisco/models/base.py#L415-L418 | def _add_to_indices(self, pipeline):
"""Adds the base64 encoded values of the indices."""
for att in self.indices:
self._add_to_index(att, pipeline=pipeline) | [
"def",
"_add_to_indices",
"(",
"self",
",",
"pipeline",
")",
":",
"for",
"att",
"in",
"self",
".",
"indices",
":",
"self",
".",
"_add_to_index",
"(",
"att",
",",
"pipeline",
"=",
"pipeline",
")"
] | Adds the base64 encoded values of the indices. | [
"Adds",
"the",
"base64",
"encoded",
"values",
"of",
"the",
"indices",
"."
] | python | train |
angr/angr | angr/analyses/cfg/cfg_fast.py | https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/cfg/cfg_fast.py#L1286-L1311 | def _scan_block(self, cfg_job):
"""
Scan a basic block starting at a specific address
:param CFGJob cfg_job: The CFGJob instance.
:return: a list of successors
:rtype: list
"""
addr = cfg_job.addr
current_func_addr = cfg_job.func_addr
# Fix the function address
# This is for rare cases where we cannot successfully determine the end boundary of a previous function, and
# as a consequence, our analysis mistakenly thinks the previous function goes all the way across the boundary,
# resulting the missing of the second function in function manager.
if addr in self._function_addresses_from_symbols:
current_func_addr = addr
if self._addr_hooked_or_syscall(addr):
entries = self._scan_procedure(cfg_job, current_func_addr)
else:
entries = self._scan_irsb(cfg_job, current_func_addr)
return entries | [
"def",
"_scan_block",
"(",
"self",
",",
"cfg_job",
")",
":",
"addr",
"=",
"cfg_job",
".",
"addr",
"current_func_addr",
"=",
"cfg_job",
".",
"func_addr",
"# Fix the function address",
"# This is for rare cases where we cannot successfully determine the end boundary of a previous function, and",
"# as a consequence, our analysis mistakenly thinks the previous function goes all the way across the boundary,",
"# resulting the missing of the second function in function manager.",
"if",
"addr",
"in",
"self",
".",
"_function_addresses_from_symbols",
":",
"current_func_addr",
"=",
"addr",
"if",
"self",
".",
"_addr_hooked_or_syscall",
"(",
"addr",
")",
":",
"entries",
"=",
"self",
".",
"_scan_procedure",
"(",
"cfg_job",
",",
"current_func_addr",
")",
"else",
":",
"entries",
"=",
"self",
".",
"_scan_irsb",
"(",
"cfg_job",
",",
"current_func_addr",
")",
"return",
"entries"
] | Scan a basic block starting at a specific address
:param CFGJob cfg_job: The CFGJob instance.
:return: a list of successors
:rtype: list | [
"Scan",
"a",
"basic",
"block",
"starting",
"at",
"a",
"specific",
"address"
] | python | train |
inasafe/inasafe | safe/gui/tools/wizard/step_fc90_analysis.py | https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/wizard/step_fc90_analysis.py#L321-L346 | def progress_callback(self, current_value, maximum_value, message=None):
"""GUI based callback implementation for showing progress.
:param current_value: Current progress.
:type current_value: int
:param maximum_value: Maximum range (point at which task is complete.
:type maximum_value: int
:param message: Optional message dictionary to containing content
we can display to the user. See safe.definitions.analysis_steps
for an example of the expected format
:type message: dict
"""
report = m.Message()
report.add(LOGO_ELEMENT)
report.add(m.Heading(
tr('Analysis status'), **INFO_STYLE))
if message is not None:
report.add(m.ImportantText(message['name']))
report.add(m.Paragraph(message['description']))
report.add(self.impact_function.performance_log_message())
send_static_message(self, report)
self.progress_bar.setMaximum(maximum_value)
self.progress_bar.setValue(current_value)
QgsApplication.processEvents() | [
"def",
"progress_callback",
"(",
"self",
",",
"current_value",
",",
"maximum_value",
",",
"message",
"=",
"None",
")",
":",
"report",
"=",
"m",
".",
"Message",
"(",
")",
"report",
".",
"add",
"(",
"LOGO_ELEMENT",
")",
"report",
".",
"add",
"(",
"m",
".",
"Heading",
"(",
"tr",
"(",
"'Analysis status'",
")",
",",
"*",
"*",
"INFO_STYLE",
")",
")",
"if",
"message",
"is",
"not",
"None",
":",
"report",
".",
"add",
"(",
"m",
".",
"ImportantText",
"(",
"message",
"[",
"'name'",
"]",
")",
")",
"report",
".",
"add",
"(",
"m",
".",
"Paragraph",
"(",
"message",
"[",
"'description'",
"]",
")",
")",
"report",
".",
"add",
"(",
"self",
".",
"impact_function",
".",
"performance_log_message",
"(",
")",
")",
"send_static_message",
"(",
"self",
",",
"report",
")",
"self",
".",
"progress_bar",
".",
"setMaximum",
"(",
"maximum_value",
")",
"self",
".",
"progress_bar",
".",
"setValue",
"(",
"current_value",
")",
"QgsApplication",
".",
"processEvents",
"(",
")"
] | GUI based callback implementation for showing progress.
:param current_value: Current progress.
:type current_value: int
:param maximum_value: Maximum range (point at which task is complete.
:type maximum_value: int
:param message: Optional message dictionary to containing content
we can display to the user. See safe.definitions.analysis_steps
for an example of the expected format
:type message: dict | [
"GUI",
"based",
"callback",
"implementation",
"for",
"showing",
"progress",
"."
] | python | train |
KelSolaar/Umbra | umbra/components/factory/script_editor/editor.py | https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/components/factory/script_editor/editor.py#L496-L517 | def load_document(self, document, file=None, language=None):
"""
Loads given document into the editor.
:param document: Document to load.
:type document: QTextDocument
:param file: File.
:type file: unicode
:param language: Editor language.
:type language: unicode
:return: Method success.
:rtype: bool
"""
document.setDocumentLayout(QPlainTextDocumentLayout(document))
self.setDocument(document)
self.set_file(file)
self.set_language(language)
self.__set_document_signals()
self.file_loaded.emit()
return True | [
"def",
"load_document",
"(",
"self",
",",
"document",
",",
"file",
"=",
"None",
",",
"language",
"=",
"None",
")",
":",
"document",
".",
"setDocumentLayout",
"(",
"QPlainTextDocumentLayout",
"(",
"document",
")",
")",
"self",
".",
"setDocument",
"(",
"document",
")",
"self",
".",
"set_file",
"(",
"file",
")",
"self",
".",
"set_language",
"(",
"language",
")",
"self",
".",
"__set_document_signals",
"(",
")",
"self",
".",
"file_loaded",
".",
"emit",
"(",
")",
"return",
"True"
] | Loads given document into the editor.
:param document: Document to load.
:type document: QTextDocument
:param file: File.
:type file: unicode
:param language: Editor language.
:type language: unicode
:return: Method success.
:rtype: bool | [
"Loads",
"given",
"document",
"into",
"the",
"editor",
"."
] | python | train |
Toilal/rebulk | rebulk/match.py | https://github.com/Toilal/rebulk/blob/7511a4671f2fd9493e3df1e5177b7656789069e8/rebulk/match.py#L379-L433 | def holes(self, start=0, end=None, formatter=None, ignore=None, seps=None, predicate=None,
index=None): # pylint: disable=too-many-branches,too-many-locals
"""
Retrieves a set of Match objects that are not defined in given range.
:param start:
:type start:
:param end:
:type end:
:param formatter:
:type formatter:
:param ignore:
:type ignore:
:param seps:
:type seps:
:param predicate:
:type predicate:
:param index:
:type index:
:return:
:rtype:
"""
assert self.input_string if seps else True, "input_string must be defined when using seps parameter"
if end is None:
end = self.max_end
else:
end = min(self.max_end, end)
ret = _BaseMatches._base()
hole = False
rindex = start
loop_start = self._hole_start(start, ignore)
for rindex in range(loop_start, end):
current = []
for at_index in self.at_index(rindex):
if not ignore or not ignore(at_index):
current.append(at_index)
if seps and hole and self.input_string and self.input_string[rindex] in seps:
hole = False
ret[-1].end = rindex
else:
if not current and not hole:
# Open a new hole match
hole = True
ret.append(Match(max(rindex, start), None, input_string=self.input_string, formatter=formatter))
elif current and hole:
# Close current hole match
hole = False
ret[-1].end = rindex
if ret and hole:
# go the the next starting element ...
ret[-1].end = min(self._hole_end(rindex, ignore), end)
return filter_index(ret, predicate, index) | [
"def",
"holes",
"(",
"self",
",",
"start",
"=",
"0",
",",
"end",
"=",
"None",
",",
"formatter",
"=",
"None",
",",
"ignore",
"=",
"None",
",",
"seps",
"=",
"None",
",",
"predicate",
"=",
"None",
",",
"index",
"=",
"None",
")",
":",
"# pylint: disable=too-many-branches,too-many-locals",
"assert",
"self",
".",
"input_string",
"if",
"seps",
"else",
"True",
",",
"\"input_string must be defined when using seps parameter\"",
"if",
"end",
"is",
"None",
":",
"end",
"=",
"self",
".",
"max_end",
"else",
":",
"end",
"=",
"min",
"(",
"self",
".",
"max_end",
",",
"end",
")",
"ret",
"=",
"_BaseMatches",
".",
"_base",
"(",
")",
"hole",
"=",
"False",
"rindex",
"=",
"start",
"loop_start",
"=",
"self",
".",
"_hole_start",
"(",
"start",
",",
"ignore",
")",
"for",
"rindex",
"in",
"range",
"(",
"loop_start",
",",
"end",
")",
":",
"current",
"=",
"[",
"]",
"for",
"at_index",
"in",
"self",
".",
"at_index",
"(",
"rindex",
")",
":",
"if",
"not",
"ignore",
"or",
"not",
"ignore",
"(",
"at_index",
")",
":",
"current",
".",
"append",
"(",
"at_index",
")",
"if",
"seps",
"and",
"hole",
"and",
"self",
".",
"input_string",
"and",
"self",
".",
"input_string",
"[",
"rindex",
"]",
"in",
"seps",
":",
"hole",
"=",
"False",
"ret",
"[",
"-",
"1",
"]",
".",
"end",
"=",
"rindex",
"else",
":",
"if",
"not",
"current",
"and",
"not",
"hole",
":",
"# Open a new hole match",
"hole",
"=",
"True",
"ret",
".",
"append",
"(",
"Match",
"(",
"max",
"(",
"rindex",
",",
"start",
")",
",",
"None",
",",
"input_string",
"=",
"self",
".",
"input_string",
",",
"formatter",
"=",
"formatter",
")",
")",
"elif",
"current",
"and",
"hole",
":",
"# Close current hole match",
"hole",
"=",
"False",
"ret",
"[",
"-",
"1",
"]",
".",
"end",
"=",
"rindex",
"if",
"ret",
"and",
"hole",
":",
"# go the the next starting element ...",
"ret",
"[",
"-",
"1",
"]",
".",
"end",
"=",
"min",
"(",
"self",
".",
"_hole_end",
"(",
"rindex",
",",
"ignore",
")",
",",
"end",
")",
"return",
"filter_index",
"(",
"ret",
",",
"predicate",
",",
"index",
")"
] | Retrieves a set of Match objects that are not defined in given range.
:param start:
:type start:
:param end:
:type end:
:param formatter:
:type formatter:
:param ignore:
:type ignore:
:param seps:
:type seps:
:param predicate:
:type predicate:
:param index:
:type index:
:return:
:rtype: | [
"Retrieves",
"a",
"set",
"of",
"Match",
"objects",
"that",
"are",
"not",
"defined",
"in",
"given",
"range",
".",
":",
"param",
"start",
":",
":",
"type",
"start",
":",
":",
"param",
"end",
":",
":",
"type",
"end",
":",
":",
"param",
"formatter",
":",
":",
"type",
"formatter",
":",
":",
"param",
"ignore",
":",
":",
"type",
"ignore",
":",
":",
"param",
"seps",
":",
":",
"type",
"seps",
":",
":",
"param",
"predicate",
":",
":",
"type",
"predicate",
":",
":",
"param",
"index",
":",
":",
"type",
"index",
":",
":",
"return",
":",
":",
"rtype",
":"
] | python | train |
quantumlib/Cirq | cirq/google/programs.py | https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/google/programs.py#L171-L190 | def schedule_from_proto_dicts(
device: 'xmon_device.XmonDevice',
ops: Iterable[Dict],
) -> Schedule:
"""Convert proto dictionaries into a Schedule for the given device."""
scheduled_ops = []
last_time_picos = 0
for op in ops:
delay_picos = 0
if 'incremental_delay_picoseconds' in op:
delay_picos = op['incremental_delay_picoseconds']
time_picos = last_time_picos + delay_picos
last_time_picos = time_picos
xmon_op = xmon_op_from_proto_dict(op)
scheduled_ops.append(ScheduledOperation.op_at_on(
operation=xmon_op,
time=Timestamp(picos=time_picos),
device=device,
))
return Schedule(device, scheduled_ops) | [
"def",
"schedule_from_proto_dicts",
"(",
"device",
":",
"'xmon_device.XmonDevice'",
",",
"ops",
":",
"Iterable",
"[",
"Dict",
"]",
",",
")",
"->",
"Schedule",
":",
"scheduled_ops",
"=",
"[",
"]",
"last_time_picos",
"=",
"0",
"for",
"op",
"in",
"ops",
":",
"delay_picos",
"=",
"0",
"if",
"'incremental_delay_picoseconds'",
"in",
"op",
":",
"delay_picos",
"=",
"op",
"[",
"'incremental_delay_picoseconds'",
"]",
"time_picos",
"=",
"last_time_picos",
"+",
"delay_picos",
"last_time_picos",
"=",
"time_picos",
"xmon_op",
"=",
"xmon_op_from_proto_dict",
"(",
"op",
")",
"scheduled_ops",
".",
"append",
"(",
"ScheduledOperation",
".",
"op_at_on",
"(",
"operation",
"=",
"xmon_op",
",",
"time",
"=",
"Timestamp",
"(",
"picos",
"=",
"time_picos",
")",
",",
"device",
"=",
"device",
",",
")",
")",
"return",
"Schedule",
"(",
"device",
",",
"scheduled_ops",
")"
] | Convert proto dictionaries into a Schedule for the given device. | [
"Convert",
"proto",
"dictionaries",
"into",
"a",
"Schedule",
"for",
"the",
"given",
"device",
"."
] | python | train |
hydpy-dev/hydpy | hydpy/models/arma/arma_model.py | https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/models/arma/arma_model.py#L390-L423 | def calc_qout_v1(self):
"""Sum up the results of the different response functions.
Required derived parameter:
|Nmb|
Required flux sequences:
|QPOut|
Calculated flux sequence:
|QOut|
Examples:
Initialize an arma model with three different response functions:
>>> from hydpy.models.arma import *
>>> parameterstep()
>>> derived.nmb(3)
>>> fluxes.qpout.shape = 3
Define the output values of the three response functions and
apply method |calc_qout_v1|:
>>> fluxes.qpout = 1.0, 2.0, 3.0
>>> model.calc_qout_v1()
>>> fluxes.qout
qout(6.0)
"""
der = self.parameters.derived.fastaccess
flu = self.sequences.fluxes.fastaccess
flu.qout = 0.
for idx in range(der.nmb):
flu.qout += flu.qpout[idx] | [
"def",
"calc_qout_v1",
"(",
"self",
")",
":",
"der",
"=",
"self",
".",
"parameters",
".",
"derived",
".",
"fastaccess",
"flu",
"=",
"self",
".",
"sequences",
".",
"fluxes",
".",
"fastaccess",
"flu",
".",
"qout",
"=",
"0.",
"for",
"idx",
"in",
"range",
"(",
"der",
".",
"nmb",
")",
":",
"flu",
".",
"qout",
"+=",
"flu",
".",
"qpout",
"[",
"idx",
"]"
] | Sum up the results of the different response functions.
Required derived parameter:
|Nmb|
Required flux sequences:
|QPOut|
Calculated flux sequence:
|QOut|
Examples:
Initialize an arma model with three different response functions:
>>> from hydpy.models.arma import *
>>> parameterstep()
>>> derived.nmb(3)
>>> fluxes.qpout.shape = 3
Define the output values of the three response functions and
apply method |calc_qout_v1|:
>>> fluxes.qpout = 1.0, 2.0, 3.0
>>> model.calc_qout_v1()
>>> fluxes.qout
qout(6.0) | [
"Sum",
"up",
"the",
"results",
"of",
"the",
"different",
"response",
"functions",
"."
] | python | train |
moonlitesolutions/SolrClient | SolrClient/solrresp.py | https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/solrresp.py#L182-L207 | def get_facet_pivot(self):
'''
Parses facet pivot response. Example::
>>> res = solr.query('SolrClient_unittest',{
'q':'*:*',
'fq':'price:[50 TO *]',
'facet':True,
'facet.pivot':'facet_test,price' #Note how there is no space between fields. They are just separated by commas
})
>>> res.get_facet_pivot()
{'facet_test,price': {'Lorem': {89: 1, 75: 1}, 'ipsum': {53: 1, 70: 1, 55: 1, 89: 1, 74: 1, 93: 1, 79: 1}, 'dolor': {61: 1, 94: 1}, 'sit': {99: 1, 50: 1, 67: 1, 52: 1, 54: 1, 71: 1, 72: 1, 84: 1, 62: 1}, 'amet,': {68: 1}}}
This method has built in recursion and can support indefinite number of facets. However, note that the output format is significantly massaged since Solr by default outputs a list of fields in each pivot field.
'''
if not hasattr(self,'facet_pivot'):
self.facet_pivot = {}
if 'facet_counts' in self.data.keys():
pivots = self.data['facet_counts']['facet_pivot']
for fieldset in pivots:
self.facet_pivot[fieldset] = {}
for sub_field_set in pivots[fieldset]:
res = self._rec_subfield(sub_field_set)
self.facet_pivot[fieldset].update(res)
return self.facet_pivot
else:
return self.facet_pivot | [
"def",
"get_facet_pivot",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'facet_pivot'",
")",
":",
"self",
".",
"facet_pivot",
"=",
"{",
"}",
"if",
"'facet_counts'",
"in",
"self",
".",
"data",
".",
"keys",
"(",
")",
":",
"pivots",
"=",
"self",
".",
"data",
"[",
"'facet_counts'",
"]",
"[",
"'facet_pivot'",
"]",
"for",
"fieldset",
"in",
"pivots",
":",
"self",
".",
"facet_pivot",
"[",
"fieldset",
"]",
"=",
"{",
"}",
"for",
"sub_field_set",
"in",
"pivots",
"[",
"fieldset",
"]",
":",
"res",
"=",
"self",
".",
"_rec_subfield",
"(",
"sub_field_set",
")",
"self",
".",
"facet_pivot",
"[",
"fieldset",
"]",
".",
"update",
"(",
"res",
")",
"return",
"self",
".",
"facet_pivot",
"else",
":",
"return",
"self",
".",
"facet_pivot"
] | Parses facet pivot response. Example::
>>> res = solr.query('SolrClient_unittest',{
'q':'*:*',
'fq':'price:[50 TO *]',
'facet':True,
'facet.pivot':'facet_test,price' #Note how there is no space between fields. They are just separated by commas
})
>>> res.get_facet_pivot()
{'facet_test,price': {'Lorem': {89: 1, 75: 1}, 'ipsum': {53: 1, 70: 1, 55: 1, 89: 1, 74: 1, 93: 1, 79: 1}, 'dolor': {61: 1, 94: 1}, 'sit': {99: 1, 50: 1, 67: 1, 52: 1, 54: 1, 71: 1, 72: 1, 84: 1, 62: 1}, 'amet,': {68: 1}}}
This method has built in recursion and can support indefinite number of facets. However, note that the output format is significantly massaged since Solr by default outputs a list of fields in each pivot field. | [
"Parses",
"facet",
"pivot",
"response",
".",
"Example",
"::",
">>>",
"res",
"=",
"solr",
".",
"query",
"(",
"SolrClient_unittest",
"{",
"q",
":",
"*",
":",
"*",
"fq",
":",
"price",
":",
"[",
"50",
"TO",
"*",
"]",
"facet",
":",
"True",
"facet",
".",
"pivot",
":",
"facet_test",
"price",
"#Note",
"how",
"there",
"is",
"no",
"space",
"between",
"fields",
".",
"They",
"are",
"just",
"separated",
"by",
"commas",
"}",
")",
">>>",
"res",
".",
"get_facet_pivot",
"()",
"{",
"facet_test",
"price",
":",
"{",
"Lorem",
":",
"{",
"89",
":",
"1",
"75",
":",
"1",
"}",
"ipsum",
":",
"{",
"53",
":",
"1",
"70",
":",
"1",
"55",
":",
"1",
"89",
":",
"1",
"74",
":",
"1",
"93",
":",
"1",
"79",
":",
"1",
"}",
"dolor",
":",
"{",
"61",
":",
"1",
"94",
":",
"1",
"}",
"sit",
":",
"{",
"99",
":",
"1",
"50",
":",
"1",
"67",
":",
"1",
"52",
":",
"1",
"54",
":",
"1",
"71",
":",
"1",
"72",
":",
"1",
"84",
":",
"1",
"62",
":",
"1",
"}",
"amet",
":",
"{",
"68",
":",
"1",
"}}}"
] | python | train |
typemytype/booleanOperations | Lib/booleanOperations/flatten.py | https://github.com/typemytype/booleanOperations/blob/b7d9fc95c155824662f4a0020e653c77b7723d24/Lib/booleanOperations/flatten.py#L211-L237 | def split(self, tValues):
"""
Split the segment according the t values
"""
if self.segmentType == "curve":
on1 = self.previousOnCurve
off1 = self.points[0].coordinates
off2 = self.points[1].coordinates
on2 = self.points[2].coordinates
return bezierTools.splitCubicAtT(on1, off1, off2, on2, *tValues)
elif self.segmentType == "line":
segments = []
x1, y1 = self.previousOnCurve
x2, y2 = self.points[0].coordinates
dx = x2 - x1
dy = y2 - y1
pp = x1, y1
for t in tValues:
np = (x1+dx*t, y1+dy*t)
segments.append([pp, np])
pp = np
segments.append([pp, (x2, y2)])
return segments
elif self.segmentType == "qcurve":
raise NotImplementedError
else:
raise NotImplementedError | [
"def",
"split",
"(",
"self",
",",
"tValues",
")",
":",
"if",
"self",
".",
"segmentType",
"==",
"\"curve\"",
":",
"on1",
"=",
"self",
".",
"previousOnCurve",
"off1",
"=",
"self",
".",
"points",
"[",
"0",
"]",
".",
"coordinates",
"off2",
"=",
"self",
".",
"points",
"[",
"1",
"]",
".",
"coordinates",
"on2",
"=",
"self",
".",
"points",
"[",
"2",
"]",
".",
"coordinates",
"return",
"bezierTools",
".",
"splitCubicAtT",
"(",
"on1",
",",
"off1",
",",
"off2",
",",
"on2",
",",
"*",
"tValues",
")",
"elif",
"self",
".",
"segmentType",
"==",
"\"line\"",
":",
"segments",
"=",
"[",
"]",
"x1",
",",
"y1",
"=",
"self",
".",
"previousOnCurve",
"x2",
",",
"y2",
"=",
"self",
".",
"points",
"[",
"0",
"]",
".",
"coordinates",
"dx",
"=",
"x2",
"-",
"x1",
"dy",
"=",
"y2",
"-",
"y1",
"pp",
"=",
"x1",
",",
"y1",
"for",
"t",
"in",
"tValues",
":",
"np",
"=",
"(",
"x1",
"+",
"dx",
"*",
"t",
",",
"y1",
"+",
"dy",
"*",
"t",
")",
"segments",
".",
"append",
"(",
"[",
"pp",
",",
"np",
"]",
")",
"pp",
"=",
"np",
"segments",
".",
"append",
"(",
"[",
"pp",
",",
"(",
"x2",
",",
"y2",
")",
"]",
")",
"return",
"segments",
"elif",
"self",
".",
"segmentType",
"==",
"\"qcurve\"",
":",
"raise",
"NotImplementedError",
"else",
":",
"raise",
"NotImplementedError"
] | Split the segment according the t values | [
"Split",
"the",
"segment",
"according",
"the",
"t",
"values"
] | python | train |
obriencj/python-javatools | javatools/report.py | https://github.com/obriencj/python-javatools/blob/9e2332b452ddc508bed0615937dddcb2cf051557/javatools/report.py#L106-L120 | def subreporter(self, subpath, entry):
"""
create a reporter for a sub-report, with updated breadcrumbs and
the same output formats
"""
newbase = join(self.basedir, subpath)
r = Reporter(newbase, entry, self.options)
crumbs = list(self.breadcrumbs)
crumbs.append((self.basedir, self.entry))
r.breadcrumbs = crumbs
r.formats = set(self.formats)
return r | [
"def",
"subreporter",
"(",
"self",
",",
"subpath",
",",
"entry",
")",
":",
"newbase",
"=",
"join",
"(",
"self",
".",
"basedir",
",",
"subpath",
")",
"r",
"=",
"Reporter",
"(",
"newbase",
",",
"entry",
",",
"self",
".",
"options",
")",
"crumbs",
"=",
"list",
"(",
"self",
".",
"breadcrumbs",
")",
"crumbs",
".",
"append",
"(",
"(",
"self",
".",
"basedir",
",",
"self",
".",
"entry",
")",
")",
"r",
".",
"breadcrumbs",
"=",
"crumbs",
"r",
".",
"formats",
"=",
"set",
"(",
"self",
".",
"formats",
")",
"return",
"r"
] | create a reporter for a sub-report, with updated breadcrumbs and
the same output formats | [
"create",
"a",
"reporter",
"for",
"a",
"sub",
"-",
"report",
"with",
"updated",
"breadcrumbs",
"and",
"the",
"same",
"output",
"formats"
] | python | train |
resync/resync | resync/resource_container.py | https://github.com/resync/resync/blob/98292c17b2c00f2d6f5191c6ab51fef8c292a018/resync/resource_container.py#L224-L253 | def prune_dupes(self):
"""Remove all but the last entry for a given resource URI.
Returns the number of entries removed. Also removes all entries for a
given URI where the first entry is a create and the last entry is a
delete.
"""
n = 0
pruned1 = []
seen = set()
deletes = {}
for r in reversed(self.resources):
if (r.uri in seen):
n += 1
if (r.uri in deletes):
deletes[r.uri] = r.change
else:
pruned1.append(r)
seen.add(r.uri)
if (r.change == 'deleted'):
deletes[r.uri] = r.change
# go through all deletes and prune if first was create
pruned2 = []
for r in reversed(pruned1):
if (r.uri in deletes and deletes[r.uri] == 'created'):
n += 1
else:
pruned2.append(r)
self.resources = pruned2
return(n) | [
"def",
"prune_dupes",
"(",
"self",
")",
":",
"n",
"=",
"0",
"pruned1",
"=",
"[",
"]",
"seen",
"=",
"set",
"(",
")",
"deletes",
"=",
"{",
"}",
"for",
"r",
"in",
"reversed",
"(",
"self",
".",
"resources",
")",
":",
"if",
"(",
"r",
".",
"uri",
"in",
"seen",
")",
":",
"n",
"+=",
"1",
"if",
"(",
"r",
".",
"uri",
"in",
"deletes",
")",
":",
"deletes",
"[",
"r",
".",
"uri",
"]",
"=",
"r",
".",
"change",
"else",
":",
"pruned1",
".",
"append",
"(",
"r",
")",
"seen",
".",
"add",
"(",
"r",
".",
"uri",
")",
"if",
"(",
"r",
".",
"change",
"==",
"'deleted'",
")",
":",
"deletes",
"[",
"r",
".",
"uri",
"]",
"=",
"r",
".",
"change",
"# go through all deletes and prune if first was create",
"pruned2",
"=",
"[",
"]",
"for",
"r",
"in",
"reversed",
"(",
"pruned1",
")",
":",
"if",
"(",
"r",
".",
"uri",
"in",
"deletes",
"and",
"deletes",
"[",
"r",
".",
"uri",
"]",
"==",
"'created'",
")",
":",
"n",
"+=",
"1",
"else",
":",
"pruned2",
".",
"append",
"(",
"r",
")",
"self",
".",
"resources",
"=",
"pruned2",
"return",
"(",
"n",
")"
] | Remove all but the last entry for a given resource URI.
Returns the number of entries removed. Also removes all entries for a
given URI where the first entry is a create and the last entry is a
delete. | [
"Remove",
"all",
"but",
"the",
"last",
"entry",
"for",
"a",
"given",
"resource",
"URI",
"."
] | python | train |
quantmind/pulsar | pulsar/apps/http/client.py | https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/apps/http/client.py#L331-L336 | def remove_header(self, header_name):
"""Remove ``header_name`` from this request.
"""
val1 = self.headers.pop(header_name, None)
val2 = self.unredirected_headers.pop(header_name, None)
return val1 or val2 | [
"def",
"remove_header",
"(",
"self",
",",
"header_name",
")",
":",
"val1",
"=",
"self",
".",
"headers",
".",
"pop",
"(",
"header_name",
",",
"None",
")",
"val2",
"=",
"self",
".",
"unredirected_headers",
".",
"pop",
"(",
"header_name",
",",
"None",
")",
"return",
"val1",
"or",
"val2"
] | Remove ``header_name`` from this request. | [
"Remove",
"header_name",
"from",
"this",
"request",
"."
] | python | train |
pallets/werkzeug | src/werkzeug/routing.py | https://github.com/pallets/werkzeug/blob/a220671d66755a94630a212378754bb432811158/src/werkzeug/routing.py#L1523-L1541 | def is_endpoint_expecting(self, endpoint, *arguments):
"""Iterate over all rules and check if the endpoint expects
the arguments provided. This is for example useful if you have
some URLs that expect a language code and others that do not and
you want to wrap the builder a bit so that the current language
code is automatically added if not provided but endpoints expect
it.
:param endpoint: the endpoint to check.
:param arguments: this function accepts one or more arguments
as positional arguments. Each one of them is
checked.
"""
self.update()
arguments = set(arguments)
for rule in self._rules_by_endpoint[endpoint]:
if arguments.issubset(rule.arguments):
return True
return False | [
"def",
"is_endpoint_expecting",
"(",
"self",
",",
"endpoint",
",",
"*",
"arguments",
")",
":",
"self",
".",
"update",
"(",
")",
"arguments",
"=",
"set",
"(",
"arguments",
")",
"for",
"rule",
"in",
"self",
".",
"_rules_by_endpoint",
"[",
"endpoint",
"]",
":",
"if",
"arguments",
".",
"issubset",
"(",
"rule",
".",
"arguments",
")",
":",
"return",
"True",
"return",
"False"
] | Iterate over all rules and check if the endpoint expects
the arguments provided. This is for example useful if you have
some URLs that expect a language code and others that do not and
you want to wrap the builder a bit so that the current language
code is automatically added if not provided but endpoints expect
it.
:param endpoint: the endpoint to check.
:param arguments: this function accepts one or more arguments
as positional arguments. Each one of them is
checked. | [
"Iterate",
"over",
"all",
"rules",
"and",
"check",
"if",
"the",
"endpoint",
"expects",
"the",
"arguments",
"provided",
".",
"This",
"is",
"for",
"example",
"useful",
"if",
"you",
"have",
"some",
"URLs",
"that",
"expect",
"a",
"language",
"code",
"and",
"others",
"that",
"do",
"not",
"and",
"you",
"want",
"to",
"wrap",
"the",
"builder",
"a",
"bit",
"so",
"that",
"the",
"current",
"language",
"code",
"is",
"automatically",
"added",
"if",
"not",
"provided",
"but",
"endpoints",
"expect",
"it",
"."
] | python | train |
capitalone/giraffez | giraffez/types.py | https://github.com/capitalone/giraffez/blob/6b4d27eb1a1eaf188c6885c7364ef27e92b1b957/giraffez/types.py#L204-L215 | def get(self, column_name):
"""
Retrieve a column from the list with name value :code:`column_name`
:param str column_name: The name of the column to get
:return: :class:`~giraffez.types.Column` with the specified name, or :code:`None` if it does not exist.
"""
column_name = column_name.lower()
for c in self.columns:
if c.name == column_name:
return c
return None | [
"def",
"get",
"(",
"self",
",",
"column_name",
")",
":",
"column_name",
"=",
"column_name",
".",
"lower",
"(",
")",
"for",
"c",
"in",
"self",
".",
"columns",
":",
"if",
"c",
".",
"name",
"==",
"column_name",
":",
"return",
"c",
"return",
"None"
] | Retrieve a column from the list with name value :code:`column_name`
:param str column_name: The name of the column to get
:return: :class:`~giraffez.types.Column` with the specified name, or :code:`None` if it does not exist. | [
"Retrieve",
"a",
"column",
"from",
"the",
"list",
"with",
"name",
"value",
":",
"code",
":",
"column_name"
] | python | test |
jarun/Buku | bukuserver/server.py | https://github.com/jarun/Buku/blob/5f101363cf68f7666d4f5b28f0887ee07e916054/bukuserver/server.py#L42-L52 | def get_tags():
"""get tags."""
tags = getattr(flask.g, 'bukudb', get_bukudb()).get_tag_all()
result = {
'tags': tags[0]
}
if request.path.startswith('/api/'):
res = jsonify(result)
else:
res = render_template('bukuserver/tags.html', result=result)
return res | [
"def",
"get_tags",
"(",
")",
":",
"tags",
"=",
"getattr",
"(",
"flask",
".",
"g",
",",
"'bukudb'",
",",
"get_bukudb",
"(",
")",
")",
".",
"get_tag_all",
"(",
")",
"result",
"=",
"{",
"'tags'",
":",
"tags",
"[",
"0",
"]",
"}",
"if",
"request",
".",
"path",
".",
"startswith",
"(",
"'/api/'",
")",
":",
"res",
"=",
"jsonify",
"(",
"result",
")",
"else",
":",
"res",
"=",
"render_template",
"(",
"'bukuserver/tags.html'",
",",
"result",
"=",
"result",
")",
"return",
"res"
] | get tags. | [
"get",
"tags",
"."
] | python | train |
lago-project/lago | lago/templates.py | https://github.com/lago-project/lago/blob/5b8970f7687e063e4619066d5b8093ca997678c9/lago/templates.py#L230-L247 | def get_metadata(self, handle):
"""
Returns the associated metadata info for the given handle, the metadata
file must exist (``handle + '.metadata'``). If the given handle has an
``.xz`` extension, it will get removed when calculating the handle
metadata path
Args:
handle (str): Path to the template to get the metadata from
Returns:
dict: Metadata for the given handle
"""
response = self.open_url(url=handle, suffix='.metadata')
try:
return json.load(response)
finally:
response.close() | [
"def",
"get_metadata",
"(",
"self",
",",
"handle",
")",
":",
"response",
"=",
"self",
".",
"open_url",
"(",
"url",
"=",
"handle",
",",
"suffix",
"=",
"'.metadata'",
")",
"try",
":",
"return",
"json",
".",
"load",
"(",
"response",
")",
"finally",
":",
"response",
".",
"close",
"(",
")"
] | Returns the associated metadata info for the given handle, the metadata
file must exist (``handle + '.metadata'``). If the given handle has an
``.xz`` extension, it will get removed when calculating the handle
metadata path
Args:
handle (str): Path to the template to get the metadata from
Returns:
dict: Metadata for the given handle | [
"Returns",
"the",
"associated",
"metadata",
"info",
"for",
"the",
"given",
"handle",
"the",
"metadata",
"file",
"must",
"exist",
"(",
"handle",
"+",
".",
"metadata",
")",
".",
"If",
"the",
"given",
"handle",
"has",
"an",
".",
"xz",
"extension",
"it",
"will",
"get",
"removed",
"when",
"calculating",
"the",
"handle",
"metadata",
"path"
] | python | train |
spacetelescope/stsci.tools | lib/stsci/tools/fileutil.py | https://github.com/spacetelescope/stsci.tools/blob/9a022503ad24ca54ce83331482dfa3ff6de9f403/lib/stsci/tools/fileutil.py#L1076-L1081 | def copyFile(input, output, replace=None):
"""Copy a file whole from input to output."""
_found = findFile(output)
if not _found or (_found and replace):
shutil.copy2(input, output) | [
"def",
"copyFile",
"(",
"input",
",",
"output",
",",
"replace",
"=",
"None",
")",
":",
"_found",
"=",
"findFile",
"(",
"output",
")",
"if",
"not",
"_found",
"or",
"(",
"_found",
"and",
"replace",
")",
":",
"shutil",
".",
"copy2",
"(",
"input",
",",
"output",
")"
] | Copy a file whole from input to output. | [
"Copy",
"a",
"file",
"whole",
"from",
"input",
"to",
"output",
"."
] | python | train |
Duke-GCB/DukeDSClient | ddsc/cmdparser.py | https://github.com/Duke-GCB/DukeDSClient/blob/117f68fb9bae82e4c81ea487ad5d61ac350f3726/ddsc/cmdparser.py#L421-L443 | def register_deliver_command(self, deliver_func):
"""
Add 'deliver' command for transferring a project to another user.,
:param deliver_func: function to run when user choses this option
"""
description = "Initiate delivery of a project to another user. Removes other user's current permissions. " \
"Send message to D4S2 service to send email and allow access to the project once user " \
"acknowledges receiving the data."
deliver_parser = self.subparsers.add_parser('deliver', description=description)
add_project_name_or_id_arg(deliver_parser)
user_or_email = deliver_parser.add_mutually_exclusive_group(required=True)
add_user_arg(user_or_email)
add_email_arg(user_or_email)
add_share_usernames_arg(deliver_parser)
add_share_emails_arg(deliver_parser)
_add_copy_project_arg(deliver_parser)
_add_resend_arg(deliver_parser, "Resend delivery")
include_or_exclude = deliver_parser.add_mutually_exclusive_group(required=False)
_add_include_arg(include_or_exclude)
_add_exclude_arg(include_or_exclude)
_add_message_file(deliver_parser, "Filename containing a message to be sent with the delivery. "
"Pass - to read from stdin.")
deliver_parser.set_defaults(func=deliver_func) | [
"def",
"register_deliver_command",
"(",
"self",
",",
"deliver_func",
")",
":",
"description",
"=",
"\"Initiate delivery of a project to another user. Removes other user's current permissions. \"",
"\"Send message to D4S2 service to send email and allow access to the project once user \"",
"\"acknowledges receiving the data.\"",
"deliver_parser",
"=",
"self",
".",
"subparsers",
".",
"add_parser",
"(",
"'deliver'",
",",
"description",
"=",
"description",
")",
"add_project_name_or_id_arg",
"(",
"deliver_parser",
")",
"user_or_email",
"=",
"deliver_parser",
".",
"add_mutually_exclusive_group",
"(",
"required",
"=",
"True",
")",
"add_user_arg",
"(",
"user_or_email",
")",
"add_email_arg",
"(",
"user_or_email",
")",
"add_share_usernames_arg",
"(",
"deliver_parser",
")",
"add_share_emails_arg",
"(",
"deliver_parser",
")",
"_add_copy_project_arg",
"(",
"deliver_parser",
")",
"_add_resend_arg",
"(",
"deliver_parser",
",",
"\"Resend delivery\"",
")",
"include_or_exclude",
"=",
"deliver_parser",
".",
"add_mutually_exclusive_group",
"(",
"required",
"=",
"False",
")",
"_add_include_arg",
"(",
"include_or_exclude",
")",
"_add_exclude_arg",
"(",
"include_or_exclude",
")",
"_add_message_file",
"(",
"deliver_parser",
",",
"\"Filename containing a message to be sent with the delivery. \"",
"\"Pass - to read from stdin.\"",
")",
"deliver_parser",
".",
"set_defaults",
"(",
"func",
"=",
"deliver_func",
")"
] | Add 'deliver' command for transferring a project to another user.,
:param deliver_func: function to run when user choses this option | [
"Add",
"deliver",
"command",
"for",
"transferring",
"a",
"project",
"to",
"another",
"user",
".",
":",
"param",
"deliver_func",
":",
"function",
"to",
"run",
"when",
"user",
"choses",
"this",
"option"
] | python | train |
rorr73/LifeSOSpy | lifesospy/device.py | https://github.com/rorr73/LifeSOSpy/blob/62360fbab2e90bf04d52b547093bdab2d4e389b4/lifesospy/device.py#L374-L376 | def current_reading(self) -> Optional[Union[int, float]]:
"""Current reading for a special sensor."""
return self._get_field_value(SpecialDevice.PROP_CURRENT_READING) | [
"def",
"current_reading",
"(",
"self",
")",
"->",
"Optional",
"[",
"Union",
"[",
"int",
",",
"float",
"]",
"]",
":",
"return",
"self",
".",
"_get_field_value",
"(",
"SpecialDevice",
".",
"PROP_CURRENT_READING",
")"
] | Current reading for a special sensor. | [
"Current",
"reading",
"for",
"a",
"special",
"sensor",
"."
] | python | train |
rocky/python3-trepan | trepan/lib/sighandler.py | https://github.com/rocky/python3-trepan/blob/14e91bc0acce090d67be145b1ac040cab92ac5f3/trepan/lib/sighandler.py#L49-L60 | def lookup_signum(name):
"""Find the corresponding signal number for 'name'. Return None
if 'name' is invalid."""
uname = name.upper()
if (uname.startswith('SIG') and hasattr(signal, uname)):
return getattr(signal, uname)
else:
uname = "SIG"+uname
if hasattr(signal, uname):
return getattr(signal, uname)
return None
return | [
"def",
"lookup_signum",
"(",
"name",
")",
":",
"uname",
"=",
"name",
".",
"upper",
"(",
")",
"if",
"(",
"uname",
".",
"startswith",
"(",
"'SIG'",
")",
"and",
"hasattr",
"(",
"signal",
",",
"uname",
")",
")",
":",
"return",
"getattr",
"(",
"signal",
",",
"uname",
")",
"else",
":",
"uname",
"=",
"\"SIG\"",
"+",
"uname",
"if",
"hasattr",
"(",
"signal",
",",
"uname",
")",
":",
"return",
"getattr",
"(",
"signal",
",",
"uname",
")",
"return",
"None",
"return"
] | Find the corresponding signal number for 'name'. Return None
if 'name' is invalid. | [
"Find",
"the",
"corresponding",
"signal",
"number",
"for",
"name",
".",
"Return",
"None",
"if",
"name",
"is",
"invalid",
"."
] | python | test |
bcbio/bcbio-nextgen | bcbio/structural/shared.py | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/shared.py#L270-L283 | def get_cur_batch(items):
"""Retrieve name of the batch shared between all items in a group.
"""
batches = []
for data in items:
batch = tz.get_in(["metadata", "batch"], data, [])
batches.append(set(batch) if isinstance(batch, (list, tuple)) else set([batch]))
combo_batches = reduce(lambda b1, b2: b1.intersection(b2), batches)
if len(combo_batches) == 1:
return combo_batches.pop()
elif len(combo_batches) == 0:
return None
else:
raise ValueError("Found multiple overlapping batches: %s -- %s" % (combo_batches, batches)) | [
"def",
"get_cur_batch",
"(",
"items",
")",
":",
"batches",
"=",
"[",
"]",
"for",
"data",
"in",
"items",
":",
"batch",
"=",
"tz",
".",
"get_in",
"(",
"[",
"\"metadata\"",
",",
"\"batch\"",
"]",
",",
"data",
",",
"[",
"]",
")",
"batches",
".",
"append",
"(",
"set",
"(",
"batch",
")",
"if",
"isinstance",
"(",
"batch",
",",
"(",
"list",
",",
"tuple",
")",
")",
"else",
"set",
"(",
"[",
"batch",
"]",
")",
")",
"combo_batches",
"=",
"reduce",
"(",
"lambda",
"b1",
",",
"b2",
":",
"b1",
".",
"intersection",
"(",
"b2",
")",
",",
"batches",
")",
"if",
"len",
"(",
"combo_batches",
")",
"==",
"1",
":",
"return",
"combo_batches",
".",
"pop",
"(",
")",
"elif",
"len",
"(",
"combo_batches",
")",
"==",
"0",
":",
"return",
"None",
"else",
":",
"raise",
"ValueError",
"(",
"\"Found multiple overlapping batches: %s -- %s\"",
"%",
"(",
"combo_batches",
",",
"batches",
")",
")"
] | Retrieve name of the batch shared between all items in a group. | [
"Retrieve",
"name",
"of",
"the",
"batch",
"shared",
"between",
"all",
"items",
"in",
"a",
"group",
"."
] | python | train |
timothydmorton/isochrones | isochrones/observation.py | https://github.com/timothydmorton/isochrones/blob/d84495573044c66db2fd6b959fe69e370757ea14/isochrones/observation.py#L859-L884 | def load_hdf(cls, filename, path='', ic=None):
"""
Loads stored ObservationTree from file.
You can provide the isochrone to use; or it will default to MIST
TODO: saving and loading must be fixed! save ic type, bands, etc.
"""
store = pd.HDFStore(filename)
try:
samples = store[path+'/df']
attrs = store.get_storer(path+'/df').attrs
except:
store.close()
raise
df = store[path+'/df']
new = cls.from_df(df)
if ic is None:
ic = get_ichrone('mist')
new.define_models(ic, N=attrs.N, index=attrs.index)
new.spectroscopy = attrs.spectroscopy
new.parallax = attrs.parallax
store.close()
return new | [
"def",
"load_hdf",
"(",
"cls",
",",
"filename",
",",
"path",
"=",
"''",
",",
"ic",
"=",
"None",
")",
":",
"store",
"=",
"pd",
".",
"HDFStore",
"(",
"filename",
")",
"try",
":",
"samples",
"=",
"store",
"[",
"path",
"+",
"'/df'",
"]",
"attrs",
"=",
"store",
".",
"get_storer",
"(",
"path",
"+",
"'/df'",
")",
".",
"attrs",
"except",
":",
"store",
".",
"close",
"(",
")",
"raise",
"df",
"=",
"store",
"[",
"path",
"+",
"'/df'",
"]",
"new",
"=",
"cls",
".",
"from_df",
"(",
"df",
")",
"if",
"ic",
"is",
"None",
":",
"ic",
"=",
"get_ichrone",
"(",
"'mist'",
")",
"new",
".",
"define_models",
"(",
"ic",
",",
"N",
"=",
"attrs",
".",
"N",
",",
"index",
"=",
"attrs",
".",
"index",
")",
"new",
".",
"spectroscopy",
"=",
"attrs",
".",
"spectroscopy",
"new",
".",
"parallax",
"=",
"attrs",
".",
"parallax",
"store",
".",
"close",
"(",
")",
"return",
"new"
] | Loads stored ObservationTree from file.
You can provide the isochrone to use; or it will default to MIST
TODO: saving and loading must be fixed! save ic type, bands, etc. | [
"Loads",
"stored",
"ObservationTree",
"from",
"file",
"."
] | python | train |
rraadd88/rohan | rohan/dandage/align/align_annot.py | https://github.com/rraadd88/rohan/blob/b0643a3582a2fffc0165ace69fb80880d92bfb10/rohan/dandage/align/align_annot.py#L272-L332 | def dannots2dalignbed2dannotsagg(cfg):
"""
Aggregate annotations per query
step#8
:param cfg: configuration dict
"""
datatmpd=cfg['datatmpd']
daannotp=f'{datatmpd}/08_dannot.tsv'
cfg['daannotp']=daannotp
dannotsaggp=cfg['dannotsaggp']
logging.info(basename(daannotp))
if ((not exists(daannotp)) and (not exists(dannotsaggp))) or cfg['force']:
gff_renamed_cols=[c+' annotation' if c in set(bed_colns).intersection(gff_colns) else c for c in gff_colns]
dannots=pd.read_csv(cfg['annotationsbedp'],sep='\t',
names=bed_colns+gff_renamed_cols,
low_memory=False)
dannots=del_Unnamed(dannots)
dannots=dannots.set_index('id')
dannots['annotations count']=1
# separate ids from attribute columns
dannots=lambda2cols(dannots,lambdaf=gffatributes2ids,
in_coln='attributes',
to_colns=['gene name','gene id','transcript id','protein id','exon id'])
dannots=dannots.drop(['attributes']+[c for c in gff_renamed_cols if 'annotation' in c],axis=1)
logging.debug('or this step takes more time?')
to_table(dannots,daannotp)
# to_table_pqt(dannots,daannotp)
else:
# dannots=read_table_pqt(daannotp)
dannots=read_table(daannotp)
dannots=del_Unnamed(dannots)
logging.info(basename(dannotsaggp))
if not exists(dannotsaggp) or cfg['force']:
if not 'dannots' in locals():
# dannots=read_table_pqt(daannotp)
dannots=pd.read_table(daannotp,low_memory=False)
dannots=del_Unnamed(dannots)
dannots=dannots.reset_index()
logging.debug('aggregating the annotations')
from rohan.dandage.io_sets import unique
cols2aggf={'annotations count':np.sum,
'type': unique,
'gene name': unique,
'gene id': unique,
'transcript id': unique,
'protein id': unique,
'exon id': unique}
dannotsagg=dannots.groupby('id').agg(cols2aggf)
dannotsagg['annotations count']=dannotsagg['annotations count']-1
dannotsagg.loc[dannotsagg['annotations count']==0,'region']='intergenic'
dannotsagg.loc[dannotsagg['annotations count']!=0,'region']='genic'
logging.debug('end of the slowest step')
del dannots
dannotsagg=dannotsagg.reset_index()
# to_table_pqt(dannotsagg,dannotsaggp)
dannotsagg.to_csv(dannotsaggp,sep='\t')
return cfg | [
"def",
"dannots2dalignbed2dannotsagg",
"(",
"cfg",
")",
":",
"datatmpd",
"=",
"cfg",
"[",
"'datatmpd'",
"]",
"daannotp",
"=",
"f'{datatmpd}/08_dannot.tsv'",
"cfg",
"[",
"'daannotp'",
"]",
"=",
"daannotp",
"dannotsaggp",
"=",
"cfg",
"[",
"'dannotsaggp'",
"]",
"logging",
".",
"info",
"(",
"basename",
"(",
"daannotp",
")",
")",
"if",
"(",
"(",
"not",
"exists",
"(",
"daannotp",
")",
")",
"and",
"(",
"not",
"exists",
"(",
"dannotsaggp",
")",
")",
")",
"or",
"cfg",
"[",
"'force'",
"]",
":",
"gff_renamed_cols",
"=",
"[",
"c",
"+",
"' annotation'",
"if",
"c",
"in",
"set",
"(",
"bed_colns",
")",
".",
"intersection",
"(",
"gff_colns",
")",
"else",
"c",
"for",
"c",
"in",
"gff_colns",
"]",
"dannots",
"=",
"pd",
".",
"read_csv",
"(",
"cfg",
"[",
"'annotationsbedp'",
"]",
",",
"sep",
"=",
"'\\t'",
",",
"names",
"=",
"bed_colns",
"+",
"gff_renamed_cols",
",",
"low_memory",
"=",
"False",
")",
"dannots",
"=",
"del_Unnamed",
"(",
"dannots",
")",
"dannots",
"=",
"dannots",
".",
"set_index",
"(",
"'id'",
")",
"dannots",
"[",
"'annotations count'",
"]",
"=",
"1",
"# separate ids from attribute columns",
"dannots",
"=",
"lambda2cols",
"(",
"dannots",
",",
"lambdaf",
"=",
"gffatributes2ids",
",",
"in_coln",
"=",
"'attributes'",
",",
"to_colns",
"=",
"[",
"'gene name'",
",",
"'gene id'",
",",
"'transcript id'",
",",
"'protein id'",
",",
"'exon id'",
"]",
")",
"dannots",
"=",
"dannots",
".",
"drop",
"(",
"[",
"'attributes'",
"]",
"+",
"[",
"c",
"for",
"c",
"in",
"gff_renamed_cols",
"if",
"'annotation'",
"in",
"c",
"]",
",",
"axis",
"=",
"1",
")",
"logging",
".",
"debug",
"(",
"'or this step takes more time?'",
")",
"to_table",
"(",
"dannots",
",",
"daannotp",
")",
"# to_table_pqt(dannots,daannotp)",
"else",
":",
"# dannots=read_table_pqt(daannotp)",
"dannots",
"=",
"read_table",
"(",
"daannotp",
")",
"dannots",
"=",
"del_Unnamed",
"(",
"dannots",
")",
"logging",
".",
"info",
"(",
"basename",
"(",
"dannotsaggp",
")",
")",
"if",
"not",
"exists",
"(",
"dannotsaggp",
")",
"or",
"cfg",
"[",
"'force'",
"]",
":",
"if",
"not",
"'dannots'",
"in",
"locals",
"(",
")",
":",
"# dannots=read_table_pqt(daannotp)",
"dannots",
"=",
"pd",
".",
"read_table",
"(",
"daannotp",
",",
"low_memory",
"=",
"False",
")",
"dannots",
"=",
"del_Unnamed",
"(",
"dannots",
")",
"dannots",
"=",
"dannots",
".",
"reset_index",
"(",
")",
"logging",
".",
"debug",
"(",
"'aggregating the annotations'",
")",
"from",
"rohan",
".",
"dandage",
".",
"io_sets",
"import",
"unique",
"cols2aggf",
"=",
"{",
"'annotations count'",
":",
"np",
".",
"sum",
",",
"'type'",
":",
"unique",
",",
"'gene name'",
":",
"unique",
",",
"'gene id'",
":",
"unique",
",",
"'transcript id'",
":",
"unique",
",",
"'protein id'",
":",
"unique",
",",
"'exon id'",
":",
"unique",
"}",
"dannotsagg",
"=",
"dannots",
".",
"groupby",
"(",
"'id'",
")",
".",
"agg",
"(",
"cols2aggf",
")",
"dannotsagg",
"[",
"'annotations count'",
"]",
"=",
"dannotsagg",
"[",
"'annotations count'",
"]",
"-",
"1",
"dannotsagg",
".",
"loc",
"[",
"dannotsagg",
"[",
"'annotations count'",
"]",
"==",
"0",
",",
"'region'",
"]",
"=",
"'intergenic'",
"dannotsagg",
".",
"loc",
"[",
"dannotsagg",
"[",
"'annotations count'",
"]",
"!=",
"0",
",",
"'region'",
"]",
"=",
"'genic'",
"logging",
".",
"debug",
"(",
"'end of the slowest step'",
")",
"del",
"dannots",
"dannotsagg",
"=",
"dannotsagg",
".",
"reset_index",
"(",
")",
"# to_table_pqt(dannotsagg,dannotsaggp)",
"dannotsagg",
".",
"to_csv",
"(",
"dannotsaggp",
",",
"sep",
"=",
"'\\t'",
")",
"return",
"cfg"
] | Aggregate annotations per query
step#8
:param cfg: configuration dict | [
"Aggregate",
"annotations",
"per",
"query",
"step#8"
] | python | train |
EelcoHoogendoorn/Numpy_arraysetops_EP | numpy_indexed/funcs.py | https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/funcs.py#L19-L39 | def count(keys, axis=semantics.axis_default):
"""count the number of times each key occurs in the input set
Arguments
---------
keys : indexable object
Returns
-------
unique : ndarray, [groups, ...]
unique keys
count : ndarray, [groups], int
the number of times each key occurs in the input set
Notes
-----
Can be seen as numpy work-alike of collections.Counter
Alternatively, as sparse equivalent of count_table
"""
index = as_index(keys, axis, base=True)
return index.unique, index.count | [
"def",
"count",
"(",
"keys",
",",
"axis",
"=",
"semantics",
".",
"axis_default",
")",
":",
"index",
"=",
"as_index",
"(",
"keys",
",",
"axis",
",",
"base",
"=",
"True",
")",
"return",
"index",
".",
"unique",
",",
"index",
".",
"count"
] | count the number of times each key occurs in the input set
Arguments
---------
keys : indexable object
Returns
-------
unique : ndarray, [groups, ...]
unique keys
count : ndarray, [groups], int
the number of times each key occurs in the input set
Notes
-----
Can be seen as numpy work-alike of collections.Counter
Alternatively, as sparse equivalent of count_table | [
"count",
"the",
"number",
"of",
"times",
"each",
"key",
"occurs",
"in",
"the",
"input",
"set"
] | python | train |
cytoscape/py2cytoscape | py2cytoscape/cyrest/cyndex2.py | https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/cyrest/cyndex2.py#L85-L99 | def updateCurrentNetworkInNdex(self, body, verbose=None):
"""
Update current network's record in NDEx
:param body: Properties required to update a network record in NDEx.
:param verbose: print more
:returns: 200: successful operation; 404: Network does not exist
"""
surl=self.___url
sv=surl.split('/')[-1]
surl=surl.rstrip(sv+'/')
response=api(url=surl+'/cyndex2/'+sv+'/networks/current', method="PUT", body=body, verbose=verbose)
return response | [
"def",
"updateCurrentNetworkInNdex",
"(",
"self",
",",
"body",
",",
"verbose",
"=",
"None",
")",
":",
"surl",
"=",
"self",
".",
"___url",
"sv",
"=",
"surl",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"1",
"]",
"surl",
"=",
"surl",
".",
"rstrip",
"(",
"sv",
"+",
"'/'",
")",
"response",
"=",
"api",
"(",
"url",
"=",
"surl",
"+",
"'/cyndex2/'",
"+",
"sv",
"+",
"'/networks/current'",
",",
"method",
"=",
"\"PUT\"",
",",
"body",
"=",
"body",
",",
"verbose",
"=",
"verbose",
")",
"return",
"response"
] | Update current network's record in NDEx
:param body: Properties required to update a network record in NDEx.
:param verbose: print more
:returns: 200: successful operation; 404: Network does not exist | [
"Update",
"current",
"network",
"s",
"record",
"in",
"NDEx"
] | python | train |
RedFantom/ttkwidgets | ttkwidgets/font/chooser.py | https://github.com/RedFantom/ttkwidgets/blob/02150322060f867b6e59a175522ef84b09168019/ttkwidgets/font/chooser.py#L127-L142 | def font(self):
"""
Selected font.
:return: font tuple (family_name, size, \*options), :class:`~font.Font` object
"""
if self._family is None:
return None, None
else:
font_tuple = self.__generate_font_tuple()
font_obj = font.Font(family=self._family, size=self._size,
weight=font.BOLD if self._bold else font.NORMAL,
slant=font.ITALIC if self._italic else font.ROMAN,
underline=1 if self._underline else 0,
overstrike=1 if self._overstrike else 0)
return font_tuple, font_obj | [
"def",
"font",
"(",
"self",
")",
":",
"if",
"self",
".",
"_family",
"is",
"None",
":",
"return",
"None",
",",
"None",
"else",
":",
"font_tuple",
"=",
"self",
".",
"__generate_font_tuple",
"(",
")",
"font_obj",
"=",
"font",
".",
"Font",
"(",
"family",
"=",
"self",
".",
"_family",
",",
"size",
"=",
"self",
".",
"_size",
",",
"weight",
"=",
"font",
".",
"BOLD",
"if",
"self",
".",
"_bold",
"else",
"font",
".",
"NORMAL",
",",
"slant",
"=",
"font",
".",
"ITALIC",
"if",
"self",
".",
"_italic",
"else",
"font",
".",
"ROMAN",
",",
"underline",
"=",
"1",
"if",
"self",
".",
"_underline",
"else",
"0",
",",
"overstrike",
"=",
"1",
"if",
"self",
".",
"_overstrike",
"else",
"0",
")",
"return",
"font_tuple",
",",
"font_obj"
] | Selected font.
:return: font tuple (family_name, size, \*options), :class:`~font.Font` object | [
"Selected",
"font",
".",
":",
"return",
":",
"font",
"tuple",
"(",
"family_name",
"size",
"\\",
"*",
"options",
")",
":",
"class",
":",
"~font",
".",
"Font",
"object"
] | python | train |
bcbio/bcbio-nextgen | bcbio/pipeline/shared.py | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/shared.py#L246-L259 | def remove_exclude_regions(f):
"""Remove regions to exclude based on configuration: polyA, LCR, high depth.
"""
exclude_fns = {"lcr": remove_lcr_regions, "highdepth": remove_highdepth_regions,
"polyx": remove_polyx_regions}
@functools.wraps(f)
def wrapper(variant_regions, region, out_file, items=None, do_merge=True, data=None):
region_bed = f(variant_regions, region, out_file, items, do_merge, data)
if region_bed and isinstance(region_bed, six.string_types) and os.path.exists(region_bed) and items:
for e in get_exclude_regions(items):
if e in exclude_fns:
region_bed = exclude_fns[e](region_bed, items)
return region_bed
return wrapper | [
"def",
"remove_exclude_regions",
"(",
"f",
")",
":",
"exclude_fns",
"=",
"{",
"\"lcr\"",
":",
"remove_lcr_regions",
",",
"\"highdepth\"",
":",
"remove_highdepth_regions",
",",
"\"polyx\"",
":",
"remove_polyx_regions",
"}",
"@",
"functools",
".",
"wraps",
"(",
"f",
")",
"def",
"wrapper",
"(",
"variant_regions",
",",
"region",
",",
"out_file",
",",
"items",
"=",
"None",
",",
"do_merge",
"=",
"True",
",",
"data",
"=",
"None",
")",
":",
"region_bed",
"=",
"f",
"(",
"variant_regions",
",",
"region",
",",
"out_file",
",",
"items",
",",
"do_merge",
",",
"data",
")",
"if",
"region_bed",
"and",
"isinstance",
"(",
"region_bed",
",",
"six",
".",
"string_types",
")",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"region_bed",
")",
"and",
"items",
":",
"for",
"e",
"in",
"get_exclude_regions",
"(",
"items",
")",
":",
"if",
"e",
"in",
"exclude_fns",
":",
"region_bed",
"=",
"exclude_fns",
"[",
"e",
"]",
"(",
"region_bed",
",",
"items",
")",
"return",
"region_bed",
"return",
"wrapper"
] | Remove regions to exclude based on configuration: polyA, LCR, high depth. | [
"Remove",
"regions",
"to",
"exclude",
"based",
"on",
"configuration",
":",
"polyA",
"LCR",
"high",
"depth",
"."
] | python | train |
Grunny/zap-cli | zapcli/cli.py | https://github.com/Grunny/zap-cli/blob/d58d4850ecfc5467badfac5e5bcc841d064bd419/zapcli/cli.py#L267-L276 | def report(zap_helper, output, output_format):
"""Generate XML, MD or HTML report."""
if output_format == 'html':
zap_helper.html_report(output)
elif output_format == 'md':
zap_helper.md_report(output)
else:
zap_helper.xml_report(output)
console.info('Report saved to "{0}"'.format(output)) | [
"def",
"report",
"(",
"zap_helper",
",",
"output",
",",
"output_format",
")",
":",
"if",
"output_format",
"==",
"'html'",
":",
"zap_helper",
".",
"html_report",
"(",
"output",
")",
"elif",
"output_format",
"==",
"'md'",
":",
"zap_helper",
".",
"md_report",
"(",
"output",
")",
"else",
":",
"zap_helper",
".",
"xml_report",
"(",
"output",
")",
"console",
".",
"info",
"(",
"'Report saved to \"{0}\"'",
".",
"format",
"(",
"output",
")",
")"
] | Generate XML, MD or HTML report. | [
"Generate",
"XML",
"MD",
"or",
"HTML",
"report",
"."
] | python | train |
pytroll/posttroll | posttroll/message.py | https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/message.py#L85-L94 | def is_valid_data(obj):
"""Check if data is JSON serializable.
"""
if obj:
try:
tmp = json.dumps(obj, default=datetime_encoder)
del tmp
except (TypeError, UnicodeDecodeError):
return False
return True | [
"def",
"is_valid_data",
"(",
"obj",
")",
":",
"if",
"obj",
":",
"try",
":",
"tmp",
"=",
"json",
".",
"dumps",
"(",
"obj",
",",
"default",
"=",
"datetime_encoder",
")",
"del",
"tmp",
"except",
"(",
"TypeError",
",",
"UnicodeDecodeError",
")",
":",
"return",
"False",
"return",
"True"
] | Check if data is JSON serializable. | [
"Check",
"if",
"data",
"is",
"JSON",
"serializable",
"."
] | python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.