nwo
stringlengths 5
106
| sha
stringlengths 40
40
| path
stringlengths 4
174
| language
stringclasses 1
value | identifier
stringlengths 1
140
| parameters
stringlengths 0
87.7k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
426k
| docstring
stringlengths 0
64.3k
| docstring_summary
stringlengths 0
26.3k
| docstring_tokens
list | function
stringlengths 18
4.83M
| function_tokens
list | url
stringlengths 83
304
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
openedx/edx-platform
|
68dd185a0ab45862a2a61e0f803d7e03d2be71b5
|
lms/djangoapps/discussion/rest_api/pagination.py
|
python
|
DiscussionAPIPagination.get_previous_link
|
(self)
|
return previous_url
|
Returns absolute url of the previous page if there's a previous page available
otherwise returns None
|
Returns absolute url of the previous page if there's a previous page available
otherwise returns None
|
[
"Returns",
"absolute",
"url",
"of",
"the",
"previous",
"page",
"if",
"there",
"s",
"a",
"previous",
"page",
"available",
"otherwise",
"returns",
"None"
] |
def get_previous_link(self):
"""
Returns absolute url of the previous page if there's a previous page available
otherwise returns None
"""
previous_url = None
if self.page.has_previous():
previous_url = replace_query_param(self.base_url, "page", self.page.previous_page_number())
return previous_url
|
[
"def",
"get_previous_link",
"(",
"self",
")",
":",
"previous_url",
"=",
"None",
"if",
"self",
".",
"page",
".",
"has_previous",
"(",
")",
":",
"previous_url",
"=",
"replace_query_param",
"(",
"self",
".",
"base_url",
",",
"\"page\"",
",",
"self",
".",
"page",
".",
"previous_page_number",
"(",
")",
")",
"return",
"previous_url"
] |
https://github.com/openedx/edx-platform/blob/68dd185a0ab45862a2a61e0f803d7e03d2be71b5/lms/djangoapps/discussion/rest_api/pagination.py#L78-L86
|
|
Amulet-Team/Amulet-Map-Editor
|
e99619ba6aab855173b9f7c203455944ab97f89a
|
amulet_map_editor/programs/edit/api/ui/tool_manager.py
|
python
|
ToolManagerSizer.disable
|
(self)
|
Disable the active tool.
|
Disable the active tool.
|
[
"Disable",
"the",
"active",
"tool",
"."
] |
def disable(self):
"""Disable the active tool."""
if self._active_tool is not None:
self._active_tool.disable()
|
[
"def",
"disable",
"(",
"self",
")",
":",
"if",
"self",
".",
"_active_tool",
"is",
"not",
"None",
":",
"self",
".",
"_active_tool",
".",
"disable",
"(",
")"
] |
https://github.com/Amulet-Team/Amulet-Map-Editor/blob/e99619ba6aab855173b9f7c203455944ab97f89a/amulet_map_editor/programs/edit/api/ui/tool_manager.py#L88-L91
|
||
Cog-Creators/Red-DiscordBot
|
b05933274a11fb097873ab0d1b246d37b06aa306
|
redbot/core/commands/commands.py
|
python
|
Command.error
|
(self, coro)
|
return super().error(coro)
|
A decorator that registers a coroutine as a local error handler.
A local error handler is an :func:`.on_command_error` event limited to
a single command.
The on_command_error event is still dispatched
for commands with a dedicated error handler.
Red's global error handler will ignore commands with a registered error handler.
To have red handle specific errors with the default behavior,
call ``Red.on_command_error`` with ``unhandled_by_cog`` set to True.
Due to how discord.py wraps exceptions, the exception you are expecting here
is likely in ``error.original`` despite that the normal event handler for bot
wide command error handling has no such wrapping.
For example:
.. code-block:: python
@a_command.error
async def a_command_error_handler(self, ctx, error):
if isinstance(error.original, MyErrorType):
self.log_exception(error.original)
else:
await ctx.bot.on_command_error(ctx, error.original, unhandled_by_cog=True)
Parameters
-----------
coro : :term:`coroutine function`
The coroutine to register as the local error handler.
Raises
-------
discord.ClientException
The coroutine is not actually a coroutine.
|
A decorator that registers a coroutine as a local error handler.
|
[
"A",
"decorator",
"that",
"registers",
"a",
"coroutine",
"as",
"a",
"local",
"error",
"handler",
"."
] |
def error(self, coro):
"""
A decorator that registers a coroutine as a local error handler.
A local error handler is an :func:`.on_command_error` event limited to
a single command.
The on_command_error event is still dispatched
for commands with a dedicated error handler.
Red's global error handler will ignore commands with a registered error handler.
To have red handle specific errors with the default behavior,
call ``Red.on_command_error`` with ``unhandled_by_cog`` set to True.
Due to how discord.py wraps exceptions, the exception you are expecting here
is likely in ``error.original`` despite that the normal event handler for bot
wide command error handling has no such wrapping.
For example:
.. code-block:: python
@a_command.error
async def a_command_error_handler(self, ctx, error):
if isinstance(error.original, MyErrorType):
self.log_exception(error.original)
else:
await ctx.bot.on_command_error(ctx, error.original, unhandled_by_cog=True)
Parameters
-----------
coro : :term:`coroutine function`
The coroutine to register as the local error handler.
Raises
-------
discord.ClientException
The coroutine is not actually a coroutine.
"""
return super().error(coro)
|
[
"def",
"error",
"(",
"self",
",",
"coro",
")",
":",
"return",
"super",
"(",
")",
".",
"error",
"(",
"coro",
")"
] |
https://github.com/Cog-Creators/Red-DiscordBot/blob/b05933274a11fb097873ab0d1b246d37b06aa306/redbot/core/commands/commands.py#L639-L680
|
|
topydo/topydo
|
57d7577c987515d4b49d5500f666da29080ca3c2
|
topydo/lib/TodoListBase.py
|
python
|
TodoListBase.replace
|
(self, p_todos)
|
Replaces whole todolist with todo objects supplied as p_todos.
|
Replaces whole todolist with todo objects supplied as p_todos.
|
[
"Replaces",
"whole",
"todolist",
"with",
"todo",
"objects",
"supplied",
"as",
"p_todos",
"."
] |
def replace(self, p_todos):
""" Replaces whole todolist with todo objects supplied as p_todos. """
self.erase()
self.add_todos(p_todos)
self.dirty = True
|
[
"def",
"replace",
"(",
"self",
",",
"p_todos",
")",
":",
"self",
".",
"erase",
"(",
")",
"self",
".",
"add_todos",
"(",
"p_todos",
")",
"self",
".",
"dirty",
"=",
"True"
] |
https://github.com/topydo/topydo/blob/57d7577c987515d4b49d5500f666da29080ca3c2/topydo/lib/TodoListBase.py#L190-L194
|
||
misterch0c/shadowbroker
|
e3a069bea47a2c1009697941ac214adc6f90aa8d
|
windows/Resources/Python/Core/Lib/lib-tk/Tkinter.py
|
python
|
Canvas.scan_dragto
|
(self, x, y, gain=10)
|
Adjust the view of the canvas to GAIN times the
difference between X and Y and the coordinates given in
scan_mark.
|
Adjust the view of the canvas to GAIN times the
difference between X and Y and the coordinates given in
scan_mark.
|
[
"Adjust",
"the",
"view",
"of",
"the",
"canvas",
"to",
"GAIN",
"times",
"the",
"difference",
"between",
"X",
"and",
"Y",
"and",
"the",
"coordinates",
"given",
"in",
"scan_mark",
"."
] |
def scan_dragto(self, x, y, gain=10):
"""Adjust the view of the canvas to GAIN times the
difference between X and Y and the coordinates given in
scan_mark."""
self.tk.call(self._w, 'scan', 'dragto', x, y, gain)
|
[
"def",
"scan_dragto",
"(",
"self",
",",
"x",
",",
"y",
",",
"gain",
"=",
"10",
")",
":",
"self",
".",
"tk",
".",
"call",
"(",
"self",
".",
"_w",
",",
"'scan'",
",",
"'dragto'",
",",
"x",
",",
"y",
",",
"gain",
")"
] |
https://github.com/misterch0c/shadowbroker/blob/e3a069bea47a2c1009697941ac214adc6f90aa8d/windows/Resources/Python/Core/Lib/lib-tk/Tkinter.py#L2576-L2580
|
||
buke/GreenOdoo
|
3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df
|
runtime/python/lib/python2.7/site-packages/psutil-0.6.1-py2.7-linux-x86_64.egg/psutil/_psosx.py
|
python
|
Process.get_process_threads
|
(self)
|
return retlist
|
Return the number of threads belonging to the process.
|
Return the number of threads belonging to the process.
|
[
"Return",
"the",
"number",
"of",
"threads",
"belonging",
"to",
"the",
"process",
"."
] |
def get_process_threads(self):
"""Return the number of threads belonging to the process."""
rawlist = _psutil_osx.get_process_threads(self.pid)
retlist = []
for thread_id, utime, stime in rawlist:
ntuple = nt_thread(thread_id, utime, stime)
retlist.append(ntuple)
return retlist
|
[
"def",
"get_process_threads",
"(",
"self",
")",
":",
"rawlist",
"=",
"_psutil_osx",
".",
"get_process_threads",
"(",
"self",
".",
"pid",
")",
"retlist",
"=",
"[",
"]",
"for",
"thread_id",
",",
"utime",
",",
"stime",
"in",
"rawlist",
":",
"ntuple",
"=",
"nt_thread",
"(",
"thread_id",
",",
"utime",
",",
"stime",
")",
"retlist",
".",
"append",
"(",
"ntuple",
")",
"return",
"retlist"
] |
https://github.com/buke/GreenOdoo/blob/3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df/runtime/python/lib/python2.7/site-packages/psutil-0.6.1-py2.7-linux-x86_64.egg/psutil/_psosx.py#L277-L284
|
|
ayoolaolafenwa/PixelLib
|
ae56003c416a98780141a1170c9d888fe9a31317
|
pixellib/torchbackend/instance/engine/hooks.py
|
python
|
IterationTimer.__init__
|
(self, warmup_iter=3)
|
Args:
warmup_iter (int): the number of iterations at the beginning to exclude
from timing.
|
Args:
warmup_iter (int): the number of iterations at the beginning to exclude
from timing.
|
[
"Args",
":",
"warmup_iter",
"(",
"int",
")",
":",
"the",
"number",
"of",
"iterations",
"at",
"the",
"beginning",
"to",
"exclude",
"from",
"timing",
"."
] |
def __init__(self, warmup_iter=3):
"""
Args:
warmup_iter (int): the number of iterations at the beginning to exclude
from timing.
"""
self._warmup_iter = warmup_iter
self._step_timer = Timer()
self._start_time = time.perf_counter()
self._total_timer = Timer()
|
[
"def",
"__init__",
"(",
"self",
",",
"warmup_iter",
"=",
"3",
")",
":",
"self",
".",
"_warmup_iter",
"=",
"warmup_iter",
"self",
".",
"_step_timer",
"=",
"Timer",
"(",
")",
"self",
".",
"_start_time",
"=",
"time",
".",
"perf_counter",
"(",
")",
"self",
".",
"_total_timer",
"=",
"Timer",
"(",
")"
] |
https://github.com/ayoolaolafenwa/PixelLib/blob/ae56003c416a98780141a1170c9d888fe9a31317/pixellib/torchbackend/instance/engine/hooks.py#L90-L99
|
||
toxygen-project/toxygen
|
0a54012cf5ee72434b923bcde7d8f1a4e575ce2f
|
toxygen/profile.py
|
python
|
Profile.update_filtration
|
(self)
|
Update list of contacts when 1 of friends change connection status
|
Update list of contacts when 1 of friends change connection status
|
[
"Update",
"list",
"of",
"contacts",
"when",
"1",
"of",
"friends",
"change",
"connection",
"status"
] |
def update_filtration(self):
"""
Update list of contacts when 1 of friends change connection status
"""
self.filtration_and_sorting(self._sorting, self._filter_string)
|
[
"def",
"update_filtration",
"(",
"self",
")",
":",
"self",
".",
"filtration_and_sorting",
"(",
"self",
".",
"_sorting",
",",
"self",
".",
"_filter_string",
")"
] |
https://github.com/toxygen-project/toxygen/blob/0a54012cf5ee72434b923bcde7d8f1a4e575ce2f/toxygen/profile.py#L174-L178
|
||
SteveDoyle2/pyNastran
|
eda651ac2d4883d95a34951f8a002ff94f642a1a
|
pyNastran/dev/bdf_vectorized/bdf.py
|
python
|
BDF._prepare_dmix
|
(self, class_obj, add_method, card_obj, comment='')
|
adds a DMIx
|
adds a DMIx
|
[
"adds",
"a",
"DMIx"
] |
def _prepare_dmix(self, class_obj, add_method, card_obj, comment=''):
"""adds a DMIx"""
#elif card_name in ['DMI', 'DMIJ', 'DMIJI', 'DMIK']:
field2 = integer(card_obj, 2, 'flag')
if field2 == 0:
add_method(class_obj(card_obj, comment=comment))
else:
name = string(card_obj, 1, 'name')
self._dmig_temp[name].append((card_obj, comment))
|
[
"def",
"_prepare_dmix",
"(",
"self",
",",
"class_obj",
",",
"add_method",
",",
"card_obj",
",",
"comment",
"=",
"''",
")",
":",
"#elif card_name in ['DMI', 'DMIJ', 'DMIJI', 'DMIK']:",
"field2",
"=",
"integer",
"(",
"card_obj",
",",
"2",
",",
"'flag'",
")",
"if",
"field2",
"==",
"0",
":",
"add_method",
"(",
"class_obj",
"(",
"card_obj",
",",
"comment",
"=",
"comment",
")",
")",
"else",
":",
"name",
"=",
"string",
"(",
"card_obj",
",",
"1",
",",
"'name'",
")",
"self",
".",
"_dmig_temp",
"[",
"name",
"]",
".",
"append",
"(",
"(",
"card_obj",
",",
"comment",
")",
")"
] |
https://github.com/SteveDoyle2/pyNastran/blob/eda651ac2d4883d95a34951f8a002ff94f642a1a/pyNastran/dev/bdf_vectorized/bdf.py#L2092-L2100
|
||
MrH0wl/Cloudmare
|
65e5bc9888f9d362ab2abfb103ea6c1e869d67aa
|
thirdparty/xlsxwriter/worksheet.py
|
python
|
Worksheet.write_row
|
(self, row, col, data, cell_format=None)
|
return 0
|
Write a row of data starting from (row, col).
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
data: A list of tokens to be written with write().
format: An optional cell Format object.
Returns:
0: Success.
other: Return value of write() method.
|
Write a row of data starting from (row, col).
|
[
"Write",
"a",
"row",
"of",
"data",
"starting",
"from",
"(",
"row",
"col",
")",
"."
] |
def write_row(self, row, col, data, cell_format=None):
"""
Write a row of data starting from (row, col).
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
data: A list of tokens to be written with write().
format: An optional cell Format object.
Returns:
0: Success.
other: Return value of write() method.
"""
for token in data:
error = self._write(row, col, token, cell_format)
if error:
return error
col += 1
return 0
|
[
"def",
"write_row",
"(",
"self",
",",
"row",
",",
"col",
",",
"data",
",",
"cell_format",
"=",
"None",
")",
":",
"for",
"token",
"in",
"data",
":",
"error",
"=",
"self",
".",
"_write",
"(",
"row",
",",
"col",
",",
"token",
",",
"cell_format",
")",
"if",
"error",
":",
"return",
"error",
"col",
"+=",
"1",
"return",
"0"
] |
https://github.com/MrH0wl/Cloudmare/blob/65e5bc9888f9d362ab2abfb103ea6c1e869d67aa/thirdparty/xlsxwriter/worksheet.py#L1126-L1146
|
|
AutodeskRoboticsLab/Mimic
|
85447f0d346be66988303a6a054473d92f1ed6f4
|
mimic/scripts/extern/pyqtgraph_0_11_0_dev0/pyqtgraph/graphicsItems/ViewBox/ViewBox.py
|
python
|
ViewBox.setXLink
|
(self, view)
|
Link this view's X axis to another view. (see LinkView)
|
Link this view's X axis to another view. (see LinkView)
|
[
"Link",
"this",
"view",
"s",
"X",
"axis",
"to",
"another",
"view",
".",
"(",
"see",
"LinkView",
")"
] |
def setXLink(self, view):
"""Link this view's X axis to another view. (see LinkView)"""
self.linkView(self.XAxis, view)
|
[
"def",
"setXLink",
"(",
"self",
",",
"view",
")",
":",
"self",
".",
"linkView",
"(",
"self",
".",
"XAxis",
",",
"view",
")"
] |
https://github.com/AutodeskRoboticsLab/Mimic/blob/85447f0d346be66988303a6a054473d92f1ed6f4/mimic/scripts/extern/pyqtgraph_0_11_0_dev0/pyqtgraph/graphicsItems/ViewBox/ViewBox.py#L860-L862
|
||
TencentCloud/tencentcloud-sdk-python
|
3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2
|
tencentcloud/tia/v20180226/models.py
|
python
|
QueryLogsResponse.__init__
|
(self)
|
r"""
:param Context: 日志查询上下文,用于加载更多日志
:type Context: str
:param Logs: 日志内容列表
:type Logs: list of Log
:param Listover: 是否已经返回所有符合条件的日志
:type Listover: bool
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
|
r"""
:param Context: 日志查询上下文,用于加载更多日志
:type Context: str
:param Logs: 日志内容列表
:type Logs: list of Log
:param Listover: 是否已经返回所有符合条件的日志
:type Listover: bool
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
|
[
"r",
":",
"param",
"Context",
":",
"日志查询上下文,用于加载更多日志",
":",
"type",
"Context",
":",
"str",
":",
"param",
"Logs",
":",
"日志内容列表",
":",
"type",
"Logs",
":",
"list",
"of",
"Log",
":",
"param",
"Listover",
":",
"是否已经返回所有符合条件的日志",
":",
"type",
"Listover",
":",
"bool",
":",
"param",
"RequestId",
":",
"唯一请求",
"ID,每次请求都会返回。定位问题时需要提供该次请求的",
"RequestId。",
":",
"type",
"RequestId",
":",
"str"
] |
def __init__(self):
r"""
:param Context: 日志查询上下文,用于加载更多日志
:type Context: str
:param Logs: 日志内容列表
:type Logs: list of Log
:param Listover: 是否已经返回所有符合条件的日志
:type Listover: bool
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Context = None
self.Logs = None
self.Listover = None
self.RequestId = None
|
[
"def",
"__init__",
"(",
"self",
")",
":",
"self",
".",
"Context",
"=",
"None",
"self",
".",
"Logs",
"=",
"None",
"self",
".",
"Listover",
"=",
"None",
"self",
".",
"RequestId",
"=",
"None"
] |
https://github.com/TencentCloud/tencentcloud-sdk-python/blob/3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2/tencentcloud/tia/v20180226/models.py#L874-L888
|
||
Netflix/metaflow
|
25887ae77494f818d76bfa5f1560cf103cd15604
|
metaflow/datastore/content_addressed_store.py
|
python
|
ContentAddressedStore.save_blobs
|
(self, blob_iter, raw=False, len_hint=0)
|
return results
|
Saves blobs of data to the datastore
The blobs of data are saved as is if raw is True. If raw is False, the
datastore may process the blobs and they should then only be loaded
using load_blob
NOTE: The idea here is that there are two modes to access the file once
it is saved to the datastore:
- if raw is True, you would be able to access it directly using the
URI returned; the bytes that are passed in as 'blob' would be
returned directly by reading the object at that URI. You would also
be able to access it using load_blob passing the key returned
- if raw is False, no URI would be returned (the URI would be None)
and you would only be able to access the object using load_blob.
- The API also specifically takes a list to allow for parallel writes
if available in the datastore. We could also make a single
save_blob' API and save_blobs but this seems superfluous
Parameters
----------
blob_iter : Iterator over bytes objects to save
raw : bool, optional
Whether to save the bytes directly or process them, by default False
len_hint : Hint of the number of blobs that will be produced by the
iterator, by default 0
Returns
-------
List of save_blobs_result:
The list order is the same as the blobs passed in. The URI will be
None if raw is False.
|
Saves blobs of data to the datastore
|
[
"Saves",
"blobs",
"of",
"data",
"to",
"the",
"datastore"
] |
def save_blobs(self, blob_iter, raw=False, len_hint=0):
"""
Saves blobs of data to the datastore
The blobs of data are saved as is if raw is True. If raw is False, the
datastore may process the blobs and they should then only be loaded
using load_blob
NOTE: The idea here is that there are two modes to access the file once
it is saved to the datastore:
- if raw is True, you would be able to access it directly using the
URI returned; the bytes that are passed in as 'blob' would be
returned directly by reading the object at that URI. You would also
be able to access it using load_blob passing the key returned
- if raw is False, no URI would be returned (the URI would be None)
and you would only be able to access the object using load_blob.
- The API also specifically takes a list to allow for parallel writes
if available in the datastore. We could also make a single
save_blob' API and save_blobs but this seems superfluous
Parameters
----------
blob_iter : Iterator over bytes objects to save
raw : bool, optional
Whether to save the bytes directly or process them, by default False
len_hint : Hint of the number of blobs that will be produced by the
iterator, by default 0
Returns
-------
List of save_blobs_result:
The list order is the same as the blobs passed in. The URI will be
None if raw is False.
"""
results = []
def packing_iter():
for blob in blob_iter:
sha = sha1(blob).hexdigest()
path = self._storage_impl.path_join(self._prefix, sha[:2], sha)
results.append(
self.save_blobs_result(
uri=self._storage_impl.full_uri(path) if raw else None,
key=sha,
)
)
if not self._storage_impl.is_file([path])[0]:
# only process blobs that don't exist already in the
# backing datastore
meta = {"cas_raw": raw, "cas_version": 1}
if raw:
yield path, (BytesIO(blob), meta)
else:
yield path, (self._pack_v1(blob), meta)
# We don't actually want to overwrite but by saying =True, we avoid
# checking again saving some operations. We are already sure we are not
# sending duplicate files since we already checked.
self._storage_impl.save_bytes(packing_iter(), overwrite=True, len_hint=len_hint)
return results
|
[
"def",
"save_blobs",
"(",
"self",
",",
"blob_iter",
",",
"raw",
"=",
"False",
",",
"len_hint",
"=",
"0",
")",
":",
"results",
"=",
"[",
"]",
"def",
"packing_iter",
"(",
")",
":",
"for",
"blob",
"in",
"blob_iter",
":",
"sha",
"=",
"sha1",
"(",
"blob",
")",
".",
"hexdigest",
"(",
")",
"path",
"=",
"self",
".",
"_storage_impl",
".",
"path_join",
"(",
"self",
".",
"_prefix",
",",
"sha",
"[",
":",
"2",
"]",
",",
"sha",
")",
"results",
".",
"append",
"(",
"self",
".",
"save_blobs_result",
"(",
"uri",
"=",
"self",
".",
"_storage_impl",
".",
"full_uri",
"(",
"path",
")",
"if",
"raw",
"else",
"None",
",",
"key",
"=",
"sha",
",",
")",
")",
"if",
"not",
"self",
".",
"_storage_impl",
".",
"is_file",
"(",
"[",
"path",
"]",
")",
"[",
"0",
"]",
":",
"# only process blobs that don't exist already in the",
"# backing datastore",
"meta",
"=",
"{",
"\"cas_raw\"",
":",
"raw",
",",
"\"cas_version\"",
":",
"1",
"}",
"if",
"raw",
":",
"yield",
"path",
",",
"(",
"BytesIO",
"(",
"blob",
")",
",",
"meta",
")",
"else",
":",
"yield",
"path",
",",
"(",
"self",
".",
"_pack_v1",
"(",
"blob",
")",
",",
"meta",
")",
"# We don't actually want to overwrite but by saying =True, we avoid",
"# checking again saving some operations. We are already sure we are not",
"# sending duplicate files since we already checked.",
"self",
".",
"_storage_impl",
".",
"save_bytes",
"(",
"packing_iter",
"(",
")",
",",
"overwrite",
"=",
"True",
",",
"len_hint",
"=",
"len_hint",
")",
"return",
"results"
] |
https://github.com/Netflix/metaflow/blob/25887ae77494f818d76bfa5f1560cf103cd15604/metaflow/datastore/content_addressed_store.py#L41-L101
|
|
caiiiac/Machine-Learning-with-Python
|
1a26c4467da41ca4ebc3d5bd789ea942ef79422f
|
MachineLearning/venv/lib/python3.5/site-packages/sklearn/feature_extraction/image.py
|
python
|
reconstruct_from_patches_2d
|
(patches, image_size)
|
return img
|
Reconstruct the image from all of its patches.
Patches are assumed to overlap and the image is constructed by filling in
the patches from left to right, top to bottom, averaging the overlapping
regions.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The complete set of patches. If the patches contain colour information,
channels are indexed along the last dimension: RGB patches would
have `n_channels=3`.
image_size : tuple of ints (image_height, image_width) or
(image_height, image_width, n_channels)
the size of the image that will be reconstructed
Returns
-------
image : array, shape = image_size
the reconstructed image
|
Reconstruct the image from all of its patches.
|
[
"Reconstruct",
"the",
"image",
"from",
"all",
"of",
"its",
"patches",
"."
] |
def reconstruct_from_patches_2d(patches, image_size):
"""Reconstruct the image from all of its patches.
Patches are assumed to overlap and the image is constructed by filling in
the patches from left to right, top to bottom, averaging the overlapping
regions.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The complete set of patches. If the patches contain colour information,
channels are indexed along the last dimension: RGB patches would
have `n_channels=3`.
image_size : tuple of ints (image_height, image_width) or
(image_height, image_width, n_channels)
the size of the image that will be reconstructed
Returns
-------
image : array, shape = image_size
the reconstructed image
"""
i_h, i_w = image_size[:2]
p_h, p_w = patches.shape[1:3]
img = np.zeros(image_size)
# compute the dimensions of the patches array
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
for p, (i, j) in zip(patches, product(range(n_h), range(n_w))):
img[i:i + p_h, j:j + p_w] += p
for i in range(i_h):
for j in range(i_w):
# divide by the amount of overlap
# XXX: is this the most efficient way? memory-wise yes, cpu wise?
img[i, j] /= float(min(i + 1, p_h, i_h - i) *
min(j + 1, p_w, i_w - j))
return img
|
[
"def",
"reconstruct_from_patches_2d",
"(",
"patches",
",",
"image_size",
")",
":",
"i_h",
",",
"i_w",
"=",
"image_size",
"[",
":",
"2",
"]",
"p_h",
",",
"p_w",
"=",
"patches",
".",
"shape",
"[",
"1",
":",
"3",
"]",
"img",
"=",
"np",
".",
"zeros",
"(",
"image_size",
")",
"# compute the dimensions of the patches array",
"n_h",
"=",
"i_h",
"-",
"p_h",
"+",
"1",
"n_w",
"=",
"i_w",
"-",
"p_w",
"+",
"1",
"for",
"p",
",",
"(",
"i",
",",
"j",
")",
"in",
"zip",
"(",
"patches",
",",
"product",
"(",
"range",
"(",
"n_h",
")",
",",
"range",
"(",
"n_w",
")",
")",
")",
":",
"img",
"[",
"i",
":",
"i",
"+",
"p_h",
",",
"j",
":",
"j",
"+",
"p_w",
"]",
"+=",
"p",
"for",
"i",
"in",
"range",
"(",
"i_h",
")",
":",
"for",
"j",
"in",
"range",
"(",
"i_w",
")",
":",
"# divide by the amount of overlap",
"# XXX: is this the most efficient way? memory-wise yes, cpu wise?",
"img",
"[",
"i",
",",
"j",
"]",
"/=",
"float",
"(",
"min",
"(",
"i",
"+",
"1",
",",
"p_h",
",",
"i_h",
"-",
"i",
")",
"*",
"min",
"(",
"j",
"+",
"1",
",",
"p_w",
",",
"i_w",
"-",
"j",
")",
")",
"return",
"img"
] |
https://github.com/caiiiac/Machine-Learning-with-Python/blob/1a26c4467da41ca4ebc3d5bd789ea942ef79422f/MachineLearning/venv/lib/python3.5/site-packages/sklearn/feature_extraction/image.py#L393-L435
|
|
Komodo/KomodoEdit
|
61edab75dce2bdb03943b387b0608ea36f548e8e
|
contrib/ecdsa/ecdsa/numbertheory.py
|
python
|
gcd
|
( *a )
|
return a[0]
|
Greatest common divisor.
Usage: gcd( [ 2, 4, 6 ] )
or: gcd( 2, 4, 6 )
|
Greatest common divisor.
|
[
"Greatest",
"common",
"divisor",
"."
] |
def gcd( *a ):
"""Greatest common divisor.
Usage: gcd( [ 2, 4, 6 ] )
or: gcd( 2, 4, 6 )
"""
if len( a ) > 1: return reduce( gcd2, a )
if hasattr( a[0], "__iter__" ): return reduce( gcd2, a[0] )
return a[0]
|
[
"def",
"gcd",
"(",
"*",
"a",
")",
":",
"if",
"len",
"(",
"a",
")",
">",
"1",
":",
"return",
"reduce",
"(",
"gcd2",
",",
"a",
")",
"if",
"hasattr",
"(",
"a",
"[",
"0",
"]",
",",
"\"__iter__\"",
")",
":",
"return",
"reduce",
"(",
"gcd2",
",",
"a",
"[",
"0",
"]",
")",
"return",
"a",
"[",
"0",
"]"
] |
https://github.com/Komodo/KomodoEdit/blob/61edab75dce2bdb03943b387b0608ea36f548e8e/contrib/ecdsa/ecdsa/numbertheory.py#L214-L223
|
|
emesene/emesene
|
4548a4098310e21b16437bb36223a7f632a4f7bc
|
emesene/e3/xmpp/Session.py
|
python
|
Session.set_social_token
|
(self, raw_token)
|
store the social service token.
raw_token is the raw uri to be processed internally
|
store the social service token.
raw_token is the raw uri to be processed internally
|
[
"store",
"the",
"social",
"service",
"token",
".",
"raw_token",
"is",
"the",
"raw",
"uri",
"to",
"be",
"processed",
"internally"
] |
def set_social_token(self, raw_token):
'''store the social service token.
raw_token is the raw uri to be processed internally'''
def get_token(token_url):
'''strips the access token from an url'''
if token_url is None:
return token_url
if token_url.find("#access_token=") == -1:
return None
token_start = "#access_token="
start_token = token_url.find(token_start) + len(token_start)
end_token = token_url.find("&expires_in")
return token_url[start_token:end_token]
self.config.facebook_token = get_token(raw_token)
#only activate service if we have an access token
activate = bool(self.config.facebook_token is not None)
self.activate_social_services(activate)
|
[
"def",
"set_social_token",
"(",
"self",
",",
"raw_token",
")",
":",
"def",
"get_token",
"(",
"token_url",
")",
":",
"'''strips the access token from an url'''",
"if",
"token_url",
"is",
"None",
":",
"return",
"token_url",
"if",
"token_url",
".",
"find",
"(",
"\"#access_token=\"",
")",
"==",
"-",
"1",
":",
"return",
"None",
"token_start",
"=",
"\"#access_token=\"",
"start_token",
"=",
"token_url",
".",
"find",
"(",
"token_start",
")",
"+",
"len",
"(",
"token_start",
")",
"end_token",
"=",
"token_url",
".",
"find",
"(",
"\"&expires_in\"",
")",
"return",
"token_url",
"[",
"start_token",
":",
"end_token",
"]",
"self",
".",
"config",
".",
"facebook_token",
"=",
"get_token",
"(",
"raw_token",
")",
"#only activate service if we have an access token",
"activate",
"=",
"bool",
"(",
"self",
".",
"config",
".",
"facebook_token",
"is",
"not",
"None",
")",
"self",
".",
"activate_social_services",
"(",
"activate",
")"
] |
https://github.com/emesene/emesene/blob/4548a4098310e21b16437bb36223a7f632a4f7bc/emesene/e3/xmpp/Session.py#L170-L189
|
||
bruderstein/PythonScript
|
df9f7071ddf3a079e3a301b9b53a6dc78cf1208f
|
PythonLib/min/platform.py
|
python
|
uname_result.__reduce__
|
(self)
|
return uname_result, tuple(self)[:len(self._fields)]
|
[] |
def __reduce__(self):
return uname_result, tuple(self)[:len(self._fields)]
|
[
"def",
"__reduce__",
"(",
"self",
")",
":",
"return",
"uname_result",
",",
"tuple",
"(",
"self",
")",
"[",
":",
"len",
"(",
"self",
".",
"_fields",
")",
"]"
] |
https://github.com/bruderstein/PythonScript/blob/df9f7071ddf3a079e3a301b9b53a6dc78cf1208f/PythonLib/min/platform.py#L805-L806
|
|||
galaxyproject/galaxy
|
4c03520f05062e0f4a1b3655dc0b7452fda69943
|
lib/galaxy/managers/collections.py
|
python
|
DatasetCollectionManager.copy
|
(self, trans, parent, source, encoded_source_id, copy_elements=False, dataset_instance_attributes=None)
|
return new_hdca
|
PRECONDITION: security checks on ability to add to parent occurred
during load.
|
PRECONDITION: security checks on ability to add to parent occurred
during load.
|
[
"PRECONDITION",
":",
"security",
"checks",
"on",
"ability",
"to",
"add",
"to",
"parent",
"occurred",
"during",
"load",
"."
] |
def copy(self, trans, parent, source, encoded_source_id, copy_elements=False, dataset_instance_attributes=None):
"""
PRECONDITION: security checks on ability to add to parent occurred
during load.
"""
assert source == "hdca" # for now
source_hdca = self.__get_history_collection_instance(trans, encoded_source_id)
copy_kwds = {}
if copy_elements:
copy_kwds["element_destination"] = parent # e.g. a history
if dataset_instance_attributes is not None:
copy_kwds["dataset_instance_attributes"] = dataset_instance_attributes
new_hdca = source_hdca.copy(flush=False, **copy_kwds)
new_hdca.copy_tags_from(target_user=trans.get_user(), source=source_hdca)
if not copy_elements:
parent.add_dataset_collection(new_hdca)
trans.sa_session.flush()
return new_hdca
|
[
"def",
"copy",
"(",
"self",
",",
"trans",
",",
"parent",
",",
"source",
",",
"encoded_source_id",
",",
"copy_elements",
"=",
"False",
",",
"dataset_instance_attributes",
"=",
"None",
")",
":",
"assert",
"source",
"==",
"\"hdca\"",
"# for now",
"source_hdca",
"=",
"self",
".",
"__get_history_collection_instance",
"(",
"trans",
",",
"encoded_source_id",
")",
"copy_kwds",
"=",
"{",
"}",
"if",
"copy_elements",
":",
"copy_kwds",
"[",
"\"element_destination\"",
"]",
"=",
"parent",
"# e.g. a history",
"if",
"dataset_instance_attributes",
"is",
"not",
"None",
":",
"copy_kwds",
"[",
"\"dataset_instance_attributes\"",
"]",
"=",
"dataset_instance_attributes",
"new_hdca",
"=",
"source_hdca",
".",
"copy",
"(",
"flush",
"=",
"False",
",",
"*",
"*",
"copy_kwds",
")",
"new_hdca",
".",
"copy_tags_from",
"(",
"target_user",
"=",
"trans",
".",
"get_user",
"(",
")",
",",
"source",
"=",
"source_hdca",
")",
"if",
"not",
"copy_elements",
":",
"parent",
".",
"add_dataset_collection",
"(",
"new_hdca",
")",
"trans",
".",
"sa_session",
".",
"flush",
"(",
")",
"return",
"new_hdca"
] |
https://github.com/galaxyproject/galaxy/blob/4c03520f05062e0f4a1b3655dc0b7452fda69943/lib/galaxy/managers/collections.py#L339-L356
|
|
uber-research/UPSNet
|
aa8434e5a721ed217849607815304f68dfd7720a
|
upsnet/models/resnet.py
|
python
|
get_params
|
(model, prefixs, suffixes, exclude=None)
|
This generator returns all the parameters of the net except for
the last classification layer. Note that for each batchnorm layer,
requires_grad is set to False in deeplab_resnet.py, therefore this function does not return
any batchnorm parameter
|
This generator returns all the parameters of the net except for
the last classification layer. Note that for each batchnorm layer,
requires_grad is set to False in deeplab_resnet.py, therefore this function does not return
any batchnorm parameter
|
[
"This",
"generator",
"returns",
"all",
"the",
"parameters",
"of",
"the",
"net",
"except",
"for",
"the",
"last",
"classification",
"layer",
".",
"Note",
"that",
"for",
"each",
"batchnorm",
"layer",
"requires_grad",
"is",
"set",
"to",
"False",
"in",
"deeplab_resnet",
".",
"py",
"therefore",
"this",
"function",
"does",
"not",
"return",
"any",
"batchnorm",
"parameter"
] |
def get_params(model, prefixs, suffixes, exclude=None):
"""
This generator returns all the parameters of the net except for
the last classification layer. Note that for each batchnorm layer,
requires_grad is set to False in deeplab_resnet.py, therefore this function does not return
any batchnorm parameter
"""
for name, module in model.named_modules():
for prefix in prefixs:
if name == prefix:
for n, p in module.named_parameters():
n = '.'.join([name, n])
if type(exclude) == list and n in exclude:
continue
if type(exclude) == str and exclude in n:
continue
for suffix in suffixes:
if (n.split('.')[-1].startswith(suffix) or n.endswith(suffix)) and p.requires_grad:
yield p
break
|
[
"def",
"get_params",
"(",
"model",
",",
"prefixs",
",",
"suffixes",
",",
"exclude",
"=",
"None",
")",
":",
"for",
"name",
",",
"module",
"in",
"model",
".",
"named_modules",
"(",
")",
":",
"for",
"prefix",
"in",
"prefixs",
":",
"if",
"name",
"==",
"prefix",
":",
"for",
"n",
",",
"p",
"in",
"module",
".",
"named_parameters",
"(",
")",
":",
"n",
"=",
"'.'",
".",
"join",
"(",
"[",
"name",
",",
"n",
"]",
")",
"if",
"type",
"(",
"exclude",
")",
"==",
"list",
"and",
"n",
"in",
"exclude",
":",
"continue",
"if",
"type",
"(",
"exclude",
")",
"==",
"str",
"and",
"exclude",
"in",
"n",
":",
"continue",
"for",
"suffix",
"in",
"suffixes",
":",
"if",
"(",
"n",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
".",
"startswith",
"(",
"suffix",
")",
"or",
"n",
".",
"endswith",
"(",
"suffix",
")",
")",
"and",
"p",
".",
"requires_grad",
":",
"yield",
"p",
"break"
] |
https://github.com/uber-research/UPSNet/blob/aa8434e5a721ed217849607815304f68dfd7720a/upsnet/models/resnet.py#L31-L51
|
||
FrancescoCeruti/linux-show-player
|
39aba4674d9a2caa365687906640d192e2b47e0f
|
lisp/core/fade_functions.py
|
python
|
ntime
|
(time, begin, duration)
|
return (time - begin) / (duration - begin)
|
Return normalized time.
|
Return normalized time.
|
[
"Return",
"normalized",
"time",
"."
] |
def ntime(time, begin, duration):
"""Return normalized time."""
return (time - begin) / (duration - begin)
|
[
"def",
"ntime",
"(",
"time",
",",
"begin",
",",
"duration",
")",
":",
"return",
"(",
"time",
"-",
"begin",
")",
"/",
"(",
"duration",
"-",
"begin",
")"
] |
https://github.com/FrancescoCeruti/linux-show-player/blob/39aba4674d9a2caa365687906640d192e2b47e0f/lisp/core/fade_functions.py#L62-L64
|
|
google-research/tensor2robot
|
484a15ee63df412f1f7e53861c936630ad31124b
|
meta_learning/meta_tfdata.py
|
python
|
multi_batch_apply
|
(f, num_batch_dims, *args, **kwargs)
|
return expand_batch_dims(outputs, batch_sizes)
|
Vectorized application of f on tensors with multiple batch dims.
Batch dims must be the same for every tensor in args/kwargs.
Args:
f: Callable, needs only expect one batch dim in input tensors.
num_batch_dims: Integer, the number of batch dims.
*args: Args passed into f (tensors will be reshaped to 1 batch dim).
**kwargs: Kwargs passed into f (tensors will be reshaped to 1 batch dim).
Returns:
The result of calling f on args, kwargs.
|
Vectorized application of f on tensors with multiple batch dims.
|
[
"Vectorized",
"application",
"of",
"f",
"on",
"tensors",
"with",
"multiple",
"batch",
"dims",
"."
] |
def multi_batch_apply(f, num_batch_dims, *args, **kwargs):
"""Vectorized application of f on tensors with multiple batch dims.
Batch dims must be the same for every tensor in args/kwargs.
Args:
f: Callable, needs only expect one batch dim in input tensors.
num_batch_dims: Integer, the number of batch dims.
*args: Args passed into f (tensors will be reshaped to 1 batch dim).
**kwargs: Kwargs passed into f (tensors will be reshaped to 1 batch dim).
Returns:
The result of calling f on args, kwargs.
"""
flattened_inputs = nest.flatten(args) + nest.flatten(kwargs)
tensor_inputs = [inp for inp in flattened_inputs
if isinstance(inp, tf.Tensor)]
batch_sizes = tf.shape(tensor_inputs[0])[:num_batch_dims]
merged_args = merge_first_n_dims(args, num_batch_dims)
merged_kwargs = merge_first_n_dims(kwargs, num_batch_dims)
outputs = f(*merged_args, **merged_kwargs)
return expand_batch_dims(outputs, batch_sizes)
|
[
"def",
"multi_batch_apply",
"(",
"f",
",",
"num_batch_dims",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"flattened_inputs",
"=",
"nest",
".",
"flatten",
"(",
"args",
")",
"+",
"nest",
".",
"flatten",
"(",
"kwargs",
")",
"tensor_inputs",
"=",
"[",
"inp",
"for",
"inp",
"in",
"flattened_inputs",
"if",
"isinstance",
"(",
"inp",
",",
"tf",
".",
"Tensor",
")",
"]",
"batch_sizes",
"=",
"tf",
".",
"shape",
"(",
"tensor_inputs",
"[",
"0",
"]",
")",
"[",
":",
"num_batch_dims",
"]",
"merged_args",
"=",
"merge_first_n_dims",
"(",
"args",
",",
"num_batch_dims",
")",
"merged_kwargs",
"=",
"merge_first_n_dims",
"(",
"kwargs",
",",
"num_batch_dims",
")",
"outputs",
"=",
"f",
"(",
"*",
"merged_args",
",",
"*",
"*",
"merged_kwargs",
")",
"return",
"expand_batch_dims",
"(",
"outputs",
",",
"batch_sizes",
")"
] |
https://github.com/google-research/tensor2robot/blob/484a15ee63df412f1f7e53861c936630ad31124b/meta_learning/meta_tfdata.py#L261-L281
|
|
imatge-upc/skiprnn-2017-telecombcn
|
63f93a539a3f2c7a713089fdd2c38bb7b0c581ca
|
src/rnn_cells/basic_rnn_cells.py
|
python
|
BasicGRUCell.__init__
|
(self, num_units, activation=tf.tanh, layer_norm=False)
|
Initialize the basic GRU cell
:param num_units: int, the number of units in the LSTM cell
:param activation: activation function of the inner states
:param layer_norm: bool, whether to use layer normalization
|
Initialize the basic GRU cell
:param num_units: int, the number of units in the LSTM cell
:param activation: activation function of the inner states
:param layer_norm: bool, whether to use layer normalization
|
[
"Initialize",
"the",
"basic",
"GRU",
"cell",
":",
"param",
"num_units",
":",
"int",
"the",
"number",
"of",
"units",
"in",
"the",
"LSTM",
"cell",
":",
"param",
"activation",
":",
"activation",
"function",
"of",
"the",
"inner",
"states",
":",
"param",
"layer_norm",
":",
"bool",
"whether",
"to",
"use",
"layer",
"normalization"
] |
def __init__(self, num_units, activation=tf.tanh, layer_norm=False):
"""
Initialize the basic GRU cell
:param num_units: int, the number of units in the LSTM cell
:param activation: activation function of the inner states
:param layer_norm: bool, whether to use layer normalization
"""
self._num_units = num_units
self._activation = activation
self._layer_norm = layer_norm
|
[
"def",
"__init__",
"(",
"self",
",",
"num_units",
",",
"activation",
"=",
"tf",
".",
"tanh",
",",
"layer_norm",
"=",
"False",
")",
":",
"self",
".",
"_num_units",
"=",
"num_units",
"self",
".",
"_activation",
"=",
"activation",
"self",
".",
"_layer_norm",
"=",
"layer_norm"
] |
https://github.com/imatge-upc/skiprnn-2017-telecombcn/blob/63f93a539a3f2c7a713089fdd2c38bb7b0c581ca/src/rnn_cells/basic_rnn_cells.py#L93-L102
|
||
rembo10/headphones
|
b3199605be1ebc83a7a8feab6b1e99b64014187c
|
headphones/api.py
|
python
|
Api._getAlbumArt
|
(self, **kwargs)
|
[] |
def _getAlbumArt(self, **kwargs):
if 'id' not in kwargs:
self.data = 'Missing parameter: id'
return
else:
self.id = kwargs['id']
self.data = cache.getArtwork(AlbumID=self.id)
|
[
"def",
"_getAlbumArt",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"'id'",
"not",
"in",
"kwargs",
":",
"self",
".",
"data",
"=",
"'Missing parameter: id'",
"return",
"else",
":",
"self",
".",
"id",
"=",
"kwargs",
"[",
"'id'",
"]",
"self",
".",
"data",
"=",
"cache",
".",
"getArtwork",
"(",
"AlbumID",
"=",
"self",
".",
"id",
")"
] |
https://github.com/rembo10/headphones/blob/b3199605be1ebc83a7a8feab6b1e99b64014187c/headphones/api.py#L380-L388
|
||||
theotherp/nzbhydra
|
4b03d7f769384b97dfc60dade4806c0fc987514e
|
libs/cmd.py
|
python
|
Cmd.complete
|
(self, text, state)
|
Return the next possible completion for 'text'.
If a command has not been entered, then complete against command list.
Otherwise try to call complete_<command> to get list of completions.
|
Return the next possible completion for 'text'.
|
[
"Return",
"the",
"next",
"possible",
"completion",
"for",
"text",
"."
] |
def complete(self, text, state):
"""Return the next possible completion for 'text'.
If a command has not been entered, then complete against command list.
Otherwise try to call complete_<command> to get list of completions.
"""
if state == 0:
import readline
origline = readline.get_line_buffer()
line = origline.lstrip()
stripped = len(origline) - len(line)
begidx = readline.get_begidx() - stripped
endidx = readline.get_endidx() - stripped
if begidx>0:
cmd, args, foo = self.parseline(line)
if cmd == '':
compfunc = self.completedefault
else:
try:
compfunc = getattr(self, 'complete_' + cmd)
except AttributeError:
compfunc = self.completedefault
else:
compfunc = self.completenames
self.completion_matches = compfunc(text, line, begidx, endidx)
try:
return self.completion_matches[state]
except IndexError:
return None
|
[
"def",
"complete",
"(",
"self",
",",
"text",
",",
"state",
")",
":",
"if",
"state",
"==",
"0",
":",
"import",
"readline",
"origline",
"=",
"readline",
".",
"get_line_buffer",
"(",
")",
"line",
"=",
"origline",
".",
"lstrip",
"(",
")",
"stripped",
"=",
"len",
"(",
"origline",
")",
"-",
"len",
"(",
"line",
")",
"begidx",
"=",
"readline",
".",
"get_begidx",
"(",
")",
"-",
"stripped",
"endidx",
"=",
"readline",
".",
"get_endidx",
"(",
")",
"-",
"stripped",
"if",
"begidx",
">",
"0",
":",
"cmd",
",",
"args",
",",
"foo",
"=",
"self",
".",
"parseline",
"(",
"line",
")",
"if",
"cmd",
"==",
"''",
":",
"compfunc",
"=",
"self",
".",
"completedefault",
"else",
":",
"try",
":",
"compfunc",
"=",
"getattr",
"(",
"self",
",",
"'complete_'",
"+",
"cmd",
")",
"except",
"AttributeError",
":",
"compfunc",
"=",
"self",
".",
"completedefault",
"else",
":",
"compfunc",
"=",
"self",
".",
"completenames",
"self",
".",
"completion_matches",
"=",
"compfunc",
"(",
"text",
",",
"line",
",",
"begidx",
",",
"endidx",
")",
"try",
":",
"return",
"self",
".",
"completion_matches",
"[",
"state",
"]",
"except",
"IndexError",
":",
"return",
"None"
] |
https://github.com/theotherp/nzbhydra/blob/4b03d7f769384b97dfc60dade4806c0fc987514e/libs/cmd.py#L255-L283
|
||
LeapBeyond/scrubadub
|
ab199f0b3cc3ca11f646aabb05ebe124d2757ea5
|
scrubadub/scrubbers.py
|
python
|
Scrubber.remove_detector
|
(self, detector: Union[Detector, Type[Detector], str])
|
Remove a ``Detector`` from a Scrubber
You can remove a detector from a ``Scrubber`` by passing one of three objects to this function:
1. the uninitalised class to this function, which removes the initalised detector of the same name.
2. an instance of a ``Detector`` class, which removes the initalised detector of the same name.
3. a string containing the name of the detector, which removed the detector of that name.
.. code:: pycon
>>> import scrubadub
>>> scrubber = scrubadub.Scrubber()
>>> scrubber.remove_detector(scrubadub.detectors.CreditCardDetector)
>>> scrubber.remove_detector('url')
>>> detector = scrubadub.detectors.email.EmailDetector()
>>> scrubber.remove_detector(detector)
:param detector: The ``Detector`` to remove from this scrubber.
:type detector: a Detector class, a Detector instance, or a string with the detector's name
|
Remove a ``Detector`` from a Scrubber
|
[
"Remove",
"a",
"Detector",
"from",
"a",
"Scrubber"
] |
def remove_detector(self, detector: Union[Detector, Type[Detector], str]):
"""Remove a ``Detector`` from a Scrubber
You can remove a detector from a ``Scrubber`` by passing one of three objects to this function:
1. the uninitalised class to this function, which removes the initalised detector of the same name.
2. an instance of a ``Detector`` class, which removes the initalised detector of the same name.
3. a string containing the name of the detector, which removed the detector of that name.
.. code:: pycon
>>> import scrubadub
>>> scrubber = scrubadub.Scrubber()
>>> scrubber.remove_detector(scrubadub.detectors.CreditCardDetector)
>>> scrubber.remove_detector('url')
>>> detector = scrubadub.detectors.email.EmailDetector()
>>> scrubber.remove_detector(detector)
:param detector: The ``Detector`` to remove from this scrubber.
:type detector: a Detector class, a Detector instance, or a string with the detector's name
"""
if isinstance(detector, type):
self._detectors.pop(detector().name)
elif isinstance(detector, detectors.base.Detector):
self._detectors.pop(detector.name)
elif isinstance(detector, str):
self._detectors.pop(detector)
|
[
"def",
"remove_detector",
"(",
"self",
",",
"detector",
":",
"Union",
"[",
"Detector",
",",
"Type",
"[",
"Detector",
"]",
",",
"str",
"]",
")",
":",
"if",
"isinstance",
"(",
"detector",
",",
"type",
")",
":",
"self",
".",
"_detectors",
".",
"pop",
"(",
"detector",
"(",
")",
".",
"name",
")",
"elif",
"isinstance",
"(",
"detector",
",",
"detectors",
".",
"base",
".",
"Detector",
")",
":",
"self",
".",
"_detectors",
".",
"pop",
"(",
"detector",
".",
"name",
")",
"elif",
"isinstance",
"(",
"detector",
",",
"str",
")",
":",
"self",
".",
"_detectors",
".",
"pop",
"(",
"detector",
")"
] |
https://github.com/LeapBeyond/scrubadub/blob/ab199f0b3cc3ca11f646aabb05ebe124d2757ea5/scrubadub/scrubbers.py#L107-L133
|
||
metabrainz/picard
|
535bf8c7d9363ffc7abb3f69418ec11823c38118
|
picard/ui/options/releases.py
|
python
|
TipSlider.round_value
|
(self, value)
|
[] |
def round_value(self, value):
step = max(1, int(self._step))
if step > 1:
super().setValue(int(value / step) * step)
|
[
"def",
"round_value",
"(",
"self",
",",
"value",
")",
":",
"step",
"=",
"max",
"(",
"1",
",",
"int",
"(",
"self",
".",
"_step",
")",
")",
"if",
"step",
">",
"1",
":",
"super",
"(",
")",
".",
"setValue",
"(",
"int",
"(",
"value",
"/",
"step",
")",
"*",
"step",
")"
] |
https://github.com/metabrainz/picard/blob/535bf8c7d9363ffc7abb3f69418ec11823c38118/picard/ui/options/releases.py#L103-L106
|
||||
BlackLight/platypush
|
a6b552504e2ac327c94f3a28b607061b6b60cf36
|
platypush/plugins/esp/__init__.py
|
python
|
disable_irq
|
(self, **kwargs)
|
return self.execute(code, **kwargs).output
|
Disable interrupt requests.
:param kwargs: Parameters to pass to :meth:`platypush.plugins.esp.EspPlugin.execute`.
|
Disable interrupt requests.
|
[
"Disable",
"interrupt",
"requests",
"."
] |
def disable_irq(self, **kwargs):
"""
Disable interrupt requests.
:param kwargs: Parameters to pass to :meth:`platypush.plugins.esp.EspPlugin.execute`.
"""
code = '''
import machine
machine.disable_irq()
'''
return self.execute(code, **kwargs).output
|
[
"def",
"disable_irq",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"code",
"=",
"'''\nimport machine\nmachine.disable_irq()\n'''",
"return",
"self",
".",
"execute",
"(",
"code",
",",
"*",
"*",
"kwargs",
")",
".",
"output"
] |
https://github.com/BlackLight/platypush/blob/a6b552504e2ac327c94f3a28b607061b6b60cf36/platypush/plugins/esp/__init__.py#L917-L927
|
|
1040003585/WebScrapingWithPython
|
a770fa5b03894076c8c9539b1ffff34424ffc016
|
portia_examle/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.py
|
python
|
ExFileObject.__iter__
|
(self)
|
Get an iterator over the file's lines.
|
Get an iterator over the file's lines.
|
[
"Get",
"an",
"iterator",
"over",
"the",
"file",
"s",
"lines",
"."
] |
def __iter__(self):
"""Get an iterator over the file's lines.
"""
while True:
line = self.readline()
if not line:
break
yield line
|
[
"def",
"__iter__",
"(",
"self",
")",
":",
"while",
"True",
":",
"line",
"=",
"self",
".",
"readline",
"(",
")",
"if",
"not",
"line",
":",
"break",
"yield",
"line"
] |
https://github.com/1040003585/WebScrapingWithPython/blob/a770fa5b03894076c8c9539b1ffff34424ffc016/portia_examle/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.py#L910-L917
|
||
bruderstein/PythonScript
|
df9f7071ddf3a079e3a301b9b53a6dc78cf1208f
|
PythonLib/min/turtle.py
|
python
|
TurtleScreen.onclick
|
(self, fun, btn=1, add=None)
|
Bind fun to mouse-click event on canvas.
Arguments:
fun -- a function with two arguments, the coordinates of the
clicked point on the canvas.
btn -- the number of the mouse-button, defaults to 1
Example (for a TurtleScreen instance named screen)
>>> screen.onclick(goto)
>>> # Subsequently clicking into the TurtleScreen will
>>> # make the turtle move to the clicked point.
>>> screen.onclick(None)
|
Bind fun to mouse-click event on canvas.
|
[
"Bind",
"fun",
"to",
"mouse",
"-",
"click",
"event",
"on",
"canvas",
"."
] |
def onclick(self, fun, btn=1, add=None):
"""Bind fun to mouse-click event on canvas.
Arguments:
fun -- a function with two arguments, the coordinates of the
clicked point on the canvas.
btn -- the number of the mouse-button, defaults to 1
Example (for a TurtleScreen instance named screen)
>>> screen.onclick(goto)
>>> # Subsequently clicking into the TurtleScreen will
>>> # make the turtle move to the clicked point.
>>> screen.onclick(None)
"""
self._onscreenclick(fun, btn, add)
|
[
"def",
"onclick",
"(",
"self",
",",
"fun",
",",
"btn",
"=",
"1",
",",
"add",
"=",
"None",
")",
":",
"self",
".",
"_onscreenclick",
"(",
"fun",
",",
"btn",
",",
"add",
")"
] |
https://github.com/bruderstein/PythonScript/blob/df9f7071ddf3a079e3a301b9b53a6dc78cf1208f/PythonLib/min/turtle.py#L1350-L1365
|
||
bayespy/bayespy
|
0e6e6130c888a4295cc9421d61d4ad27b2960ebb
|
bayespy/inference/vmp/nodes/gp.py
|
python
|
gp_cov_delta
|
(N)
|
return np.identity(N)
|
[] |
def gp_cov_delta(N):
return np.identity(N)
|
[
"def",
"gp_cov_delta",
"(",
"N",
")",
":",
"return",
"np",
".",
"identity",
"(",
"N",
")"
] |
https://github.com/bayespy/bayespy/blob/0e6e6130c888a4295cc9421d61d4ad27b2960ebb/bayespy/inference/vmp/nodes/gp.py#L156-L157
|
|||
mozillazg/pypy
|
2ff5cd960c075c991389f842c6d59e71cf0cb7d0
|
lib-python/2.7/dummy_thread.py
|
python
|
allocate_lock
|
()
|
return LockType()
|
Dummy implementation of thread.allocate_lock().
|
Dummy implementation of thread.allocate_lock().
|
[
"Dummy",
"implementation",
"of",
"thread",
".",
"allocate_lock",
"()",
"."
] |
def allocate_lock():
"""Dummy implementation of thread.allocate_lock()."""
return LockType()
|
[
"def",
"allocate_lock",
"(",
")",
":",
"return",
"LockType",
"(",
")"
] |
https://github.com/mozillazg/pypy/blob/2ff5cd960c075c991389f842c6d59e71cf0cb7d0/lib-python/2.7/dummy_thread.py#L71-L73
|
|
entropy1337/infernal-twin
|
10995cd03312e39a48ade0f114ebb0ae3a711bb8
|
Modules/build/reportlab/build/lib.linux-i686-2.7/reportlab/pdfgen/pdfgeom.py
|
python
|
bezierArc
|
(x1,y1, x2,y2, startAng=0, extent=90)
|
return pointList
|
bezierArc(x1,y1, x2,y2, startAng=0, extent=90) --> List of Bezier
curve control points.
(x1, y1) and (x2, y2) are the corners of the enclosing rectangle. The
coordinate system has coordinates that increase to the right and down.
Angles, measured in degress, start with 0 to the right (the positive X
axis) and increase counter-clockwise. The arc extends from startAng
to startAng+extent. I.e. startAng=0 and extent=180 yields an openside-down
semi-circle.
The resulting coordinates are of the form (x1,y1, x2,y2, x3,y3, x4,y4)
such that the curve goes from (x1, y1) to (x4, y4) with (x2, y2) and
(x3, y3) as their respective Bezier control points.
|
bezierArc(x1,y1, x2,y2, startAng=0, extent=90) --> List of Bezier
curve control points.
|
[
"bezierArc",
"(",
"x1",
"y1",
"x2",
"y2",
"startAng",
"=",
"0",
"extent",
"=",
"90",
")",
"--",
">",
"List",
"of",
"Bezier",
"curve",
"control",
"points",
"."
] |
def bezierArc(x1,y1, x2,y2, startAng=0, extent=90):
"""bezierArc(x1,y1, x2,y2, startAng=0, extent=90) --> List of Bezier
curve control points.
(x1, y1) and (x2, y2) are the corners of the enclosing rectangle. The
coordinate system has coordinates that increase to the right and down.
Angles, measured in degress, start with 0 to the right (the positive X
axis) and increase counter-clockwise. The arc extends from startAng
to startAng+extent. I.e. startAng=0 and extent=180 yields an openside-down
semi-circle.
The resulting coordinates are of the form (x1,y1, x2,y2, x3,y3, x4,y4)
such that the curve goes from (x1, y1) to (x4, y4) with (x2, y2) and
(x3, y3) as their respective Bezier control points."""
x1,y1, x2,y2 = min(x1,x2), max(y1,y2), max(x1,x2), min(y1,y2)
if abs(extent) <= 90:
arcList = [startAng]
fragAngle = float(extent)
Nfrag = 1
else:
arcList = []
Nfrag = int(ceil(abs(extent)/90.))
fragAngle = float(extent) / Nfrag
x_cen = (x1+x2)/2.
y_cen = (y1+y2)/2.
rx = (x2-x1)/2.
ry = (y2-y1)/2.
halfAng = fragAngle * pi / 360.
kappa = abs(4. / 3. * (1. - cos(halfAng)) / sin(halfAng))
if fragAngle < 0:
sign = -1
else:
sign = 1
pointList = []
for i in range(Nfrag):
theta0 = (startAng + i*fragAngle) * pi / 180.
theta1 = (startAng + (i+1)*fragAngle) *pi / 180.
if fragAngle > 0:
pointList.append((x_cen + rx * cos(theta0),
y_cen - ry * sin(theta0),
x_cen + rx * (cos(theta0) - kappa * sin(theta0)),
y_cen - ry * (sin(theta0) + kappa * cos(theta0)),
x_cen + rx * (cos(theta1) + kappa * sin(theta1)),
y_cen - ry * (sin(theta1) - kappa * cos(theta1)),
x_cen + rx * cos(theta1),
y_cen - ry * sin(theta1)))
else:
pointList.append((x_cen + rx * cos(theta0),
y_cen - ry * sin(theta0),
x_cen + rx * (cos(theta0) + kappa * sin(theta0)),
y_cen - ry * (sin(theta0) - kappa * cos(theta0)),
x_cen + rx * (cos(theta1) - kappa * sin(theta1)),
y_cen - ry * (sin(theta1) + kappa * cos(theta1)),
x_cen + rx * cos(theta1),
y_cen - ry * sin(theta1)))
return pointList
|
[
"def",
"bezierArc",
"(",
"x1",
",",
"y1",
",",
"x2",
",",
"y2",
",",
"startAng",
"=",
"0",
",",
"extent",
"=",
"90",
")",
":",
"x1",
",",
"y1",
",",
"x2",
",",
"y2",
"=",
"min",
"(",
"x1",
",",
"x2",
")",
",",
"max",
"(",
"y1",
",",
"y2",
")",
",",
"max",
"(",
"x1",
",",
"x2",
")",
",",
"min",
"(",
"y1",
",",
"y2",
")",
"if",
"abs",
"(",
"extent",
")",
"<=",
"90",
":",
"arcList",
"=",
"[",
"startAng",
"]",
"fragAngle",
"=",
"float",
"(",
"extent",
")",
"Nfrag",
"=",
"1",
"else",
":",
"arcList",
"=",
"[",
"]",
"Nfrag",
"=",
"int",
"(",
"ceil",
"(",
"abs",
"(",
"extent",
")",
"/",
"90.",
")",
")",
"fragAngle",
"=",
"float",
"(",
"extent",
")",
"/",
"Nfrag",
"x_cen",
"=",
"(",
"x1",
"+",
"x2",
")",
"/",
"2.",
"y_cen",
"=",
"(",
"y1",
"+",
"y2",
")",
"/",
"2.",
"rx",
"=",
"(",
"x2",
"-",
"x1",
")",
"/",
"2.",
"ry",
"=",
"(",
"y2",
"-",
"y1",
")",
"/",
"2.",
"halfAng",
"=",
"fragAngle",
"*",
"pi",
"/",
"360.",
"kappa",
"=",
"abs",
"(",
"4.",
"/",
"3.",
"*",
"(",
"1.",
"-",
"cos",
"(",
"halfAng",
")",
")",
"/",
"sin",
"(",
"halfAng",
")",
")",
"if",
"fragAngle",
"<",
"0",
":",
"sign",
"=",
"-",
"1",
"else",
":",
"sign",
"=",
"1",
"pointList",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"Nfrag",
")",
":",
"theta0",
"=",
"(",
"startAng",
"+",
"i",
"*",
"fragAngle",
")",
"*",
"pi",
"/",
"180.",
"theta1",
"=",
"(",
"startAng",
"+",
"(",
"i",
"+",
"1",
")",
"*",
"fragAngle",
")",
"*",
"pi",
"/",
"180.",
"if",
"fragAngle",
">",
"0",
":",
"pointList",
".",
"append",
"(",
"(",
"x_cen",
"+",
"rx",
"*",
"cos",
"(",
"theta0",
")",
",",
"y_cen",
"-",
"ry",
"*",
"sin",
"(",
"theta0",
")",
",",
"x_cen",
"+",
"rx",
"*",
"(",
"cos",
"(",
"theta0",
")",
"-",
"kappa",
"*",
"sin",
"(",
"theta0",
")",
")",
",",
"y_cen",
"-",
"ry",
"*",
"(",
"sin",
"(",
"theta0",
")",
"+",
"kappa",
"*",
"cos",
"(",
"theta0",
")",
")",
",",
"x_cen",
"+",
"rx",
"*",
"(",
"cos",
"(",
"theta1",
")",
"+",
"kappa",
"*",
"sin",
"(",
"theta1",
")",
")",
",",
"y_cen",
"-",
"ry",
"*",
"(",
"sin",
"(",
"theta1",
")",
"-",
"kappa",
"*",
"cos",
"(",
"theta1",
")",
")",
",",
"x_cen",
"+",
"rx",
"*",
"cos",
"(",
"theta1",
")",
",",
"y_cen",
"-",
"ry",
"*",
"sin",
"(",
"theta1",
")",
")",
")",
"else",
":",
"pointList",
".",
"append",
"(",
"(",
"x_cen",
"+",
"rx",
"*",
"cos",
"(",
"theta0",
")",
",",
"y_cen",
"-",
"ry",
"*",
"sin",
"(",
"theta0",
")",
",",
"x_cen",
"+",
"rx",
"*",
"(",
"cos",
"(",
"theta0",
")",
"+",
"kappa",
"*",
"sin",
"(",
"theta0",
")",
")",
",",
"y_cen",
"-",
"ry",
"*",
"(",
"sin",
"(",
"theta0",
")",
"-",
"kappa",
"*",
"cos",
"(",
"theta0",
")",
")",
",",
"x_cen",
"+",
"rx",
"*",
"(",
"cos",
"(",
"theta1",
")",
"-",
"kappa",
"*",
"sin",
"(",
"theta1",
")",
")",
",",
"y_cen",
"-",
"ry",
"*",
"(",
"sin",
"(",
"theta1",
")",
"+",
"kappa",
"*",
"cos",
"(",
"theta1",
")",
")",
",",
"x_cen",
"+",
"rx",
"*",
"cos",
"(",
"theta1",
")",
",",
"y_cen",
"-",
"ry",
"*",
"sin",
"(",
"theta1",
")",
")",
")",
"return",
"pointList"
] |
https://github.com/entropy1337/infernal-twin/blob/10995cd03312e39a48ade0f114ebb0ae3a711bb8/Modules/build/reportlab/build/lib.linux-i686-2.7/reportlab/pdfgen/pdfgeom.py#L15-L77
|
|
mozilla/zamboni
|
14b1a44658e47b9f048962fa52dbf00a3beaaf30
|
mkt/site/models.py
|
python
|
use_master
|
()
|
Within this context, all queries go to the master.
|
Within this context, all queries go to the master.
|
[
"Within",
"this",
"context",
"all",
"queries",
"go",
"to",
"the",
"master",
"."
] |
def use_master():
"""Within this context, all queries go to the master."""
old = getattr(multidb.pinning._locals, 'pinned', False)
multidb.pinning.pin_this_thread()
try:
yield
finally:
multidb.pinning._locals.pinned = old
|
[
"def",
"use_master",
"(",
")",
":",
"old",
"=",
"getattr",
"(",
"multidb",
".",
"pinning",
".",
"_locals",
",",
"'pinned'",
",",
"False",
")",
"multidb",
".",
"pinning",
".",
"pin_this_thread",
"(",
")",
"try",
":",
"yield",
"finally",
":",
"multidb",
".",
"pinning",
".",
"_locals",
".",
"pinned",
"=",
"old"
] |
https://github.com/mozilla/zamboni/blob/14b1a44658e47b9f048962fa52dbf00a3beaaf30/mkt/site/models.py#L62-L69
|
||
stepjam/RLBench
|
3aa9bb3ad534d8fcdeb93d3f5ff1d161ce5c8fe6
|
rlbench/backend/task.py
|
python
|
Task.decorate_observation
|
(self, observation: Observation)
|
return observation
|
Can be used for tasks that want to modify the observations.
Usually not used. Perhpas cabn be used to model
:param observation: The Observation for this time step.
:return: The modified Observation.
|
Can be used for tasks that want to modify the observations.
|
[
"Can",
"be",
"used",
"for",
"tasks",
"that",
"want",
"to",
"modify",
"the",
"observations",
"."
] |
def decorate_observation(self, observation: Observation) -> Observation:
"""Can be used for tasks that want to modify the observations.
Usually not used. Perhpas cabn be used to model
:param observation: The Observation for this time step.
:return: The modified Observation.
"""
return observation
|
[
"def",
"decorate_observation",
"(",
"self",
",",
"observation",
":",
"Observation",
")",
"->",
"Observation",
":",
"return",
"observation"
] |
https://github.com/stepjam/RLBench/blob/3aa9bb3ad534d8fcdeb93d3f5ff1d161ce5c8fe6/rlbench/backend/task.py#L150-L158
|
|
MycroftAI/mycroft-core
|
3d963cee402e232174850f36918313e87313fb13
|
mycroft/util/file_utils.py
|
python
|
_get_cache_entries
|
(directory)
|
return ((stat[ST_MTIME], stat[ST_SIZE], path)
for stat, path in entries if S_ISREG(stat[ST_MODE]))
|
Get information tuple for all regular files in directory.
Args:
directory (str): path to directory to check
Returns:
(tuple) (modification time, size, filepath)
|
Get information tuple for all regular files in directory.
|
[
"Get",
"information",
"tuple",
"for",
"all",
"regular",
"files",
"in",
"directory",
"."
] |
def _get_cache_entries(directory):
"""Get information tuple for all regular files in directory.
Args:
directory (str): path to directory to check
Returns:
(tuple) (modification time, size, filepath)
"""
entries = (os.path.join(directory, fn) for fn in os.listdir(directory))
entries = ((os.stat(path), path) for path in entries)
# leave only regular files, insert modification date
return ((stat[ST_MTIME], stat[ST_SIZE], path)
for stat, path in entries if S_ISREG(stat[ST_MODE]))
|
[
"def",
"_get_cache_entries",
"(",
"directory",
")",
":",
"entries",
"=",
"(",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"fn",
")",
"for",
"fn",
"in",
"os",
".",
"listdir",
"(",
"directory",
")",
")",
"entries",
"=",
"(",
"(",
"os",
".",
"stat",
"(",
"path",
")",
",",
"path",
")",
"for",
"path",
"in",
"entries",
")",
"# leave only regular files, insert modification date",
"return",
"(",
"(",
"stat",
"[",
"ST_MTIME",
"]",
",",
"stat",
"[",
"ST_SIZE",
"]",
",",
"path",
")",
"for",
"stat",
",",
"path",
"in",
"entries",
"if",
"S_ISREG",
"(",
"stat",
"[",
"ST_MODE",
"]",
")",
")"
] |
https://github.com/MycroftAI/mycroft-core/blob/3d963cee402e232174850f36918313e87313fb13/mycroft/util/file_utils.py#L147-L161
|
|
sassoftware/python-dlpy
|
6082b1eeaab7406cce22715a35a1f4308943d522
|
dlpy/network.py
|
python
|
extract_rpn_layer
|
(layer_table)
|
return layer
|
Extract layer configuration from a Region proposal layer table
Parameters
----------
layer_table : table
Specifies the selection of table containing the information
for the layer.
Returns
-------
:class:`dict`
Options that can be passed to layer definition
|
Extract layer configuration from a Region proposal layer table
|
[
"Extract",
"layer",
"configuration",
"from",
"a",
"Region",
"proposal",
"layer",
"table"
] |
def extract_rpn_layer(layer_table):
'''
Extract layer configuration from a Region proposal layer table
Parameters
----------
layer_table : table
Specifies the selection of table containing the information
for the layer.
Returns
-------
:class:`dict`
Options that can be passed to layer definition
'''
num_keys = ['base_anchor_size', 'max_label_per_image', 'roi_train_sample_num', 'do_RPN_only',
'proposed_roi_num_train', 'proposed_roi_num_score', 'anchor_num_to_sample']
if __dev__:
num_keys += ['preNmsTopNScore', 'preNmsTopNTrain', 'preNmsTopNTrain', 'preNmsTopNScore']
str_key = 'act'
rpn_layer_config = dict()
for key in num_keys:
try:
rpn_layer_config[key] = layer_table['_DLNumVal_'][
layer_table['_DLKey1_'] == 'dlregionproposalopts.' + underscore_to_camelcase(key)].tolist()[0]
except IndexError:
pass
rpn_layer_config[str_key] = layer_table['_DLChrVal_'][
layer_table['_DLKey1_'] == 'dlregionproposalopts.' + underscore_to_camelcase(str_key)].tolist()[0]
num_scale = layer_table[layer_table['_DLChrVal_'] == 'anchorScale'].shape[0]
num_ratio = layer_table[layer_table['_DLChrVal_'] == 'anchorRatio'].shape[0]
rpn_layer_config['anchor_scale'] = []
rpn_layer_config['anchor_ratio'] = []
for i in range(num_scale):
rpn_layer_config['anchor_scale'].append(
layer_table['_DLNumVal_'][layer_table['_DLKey1_'] ==
'dlregionproposalopts.anchorScale.{}'.format(i)].tolist()[0])
for i in range(num_ratio):
rpn_layer_config['anchor_ratio'].append(
layer_table['_DLNumVal_'][layer_table['_DLKey1_'] ==
'dlregionproposalopts.anchorRatio.{}'.format(i)].tolist()[0])
rpn_layer_config['name'] = layer_table['_DLKey0_'].unique()[0]
layer = RegionProposal(**rpn_layer_config)
return layer
|
[
"def",
"extract_rpn_layer",
"(",
"layer_table",
")",
":",
"num_keys",
"=",
"[",
"'base_anchor_size'",
",",
"'max_label_per_image'",
",",
"'roi_train_sample_num'",
",",
"'do_RPN_only'",
",",
"'proposed_roi_num_train'",
",",
"'proposed_roi_num_score'",
",",
"'anchor_num_to_sample'",
"]",
"if",
"__dev__",
":",
"num_keys",
"+=",
"[",
"'preNmsTopNScore'",
",",
"'preNmsTopNTrain'",
",",
"'preNmsTopNTrain'",
",",
"'preNmsTopNScore'",
"]",
"str_key",
"=",
"'act'",
"rpn_layer_config",
"=",
"dict",
"(",
")",
"for",
"key",
"in",
"num_keys",
":",
"try",
":",
"rpn_layer_config",
"[",
"key",
"]",
"=",
"layer_table",
"[",
"'_DLNumVal_'",
"]",
"[",
"layer_table",
"[",
"'_DLKey1_'",
"]",
"==",
"'dlregionproposalopts.'",
"+",
"underscore_to_camelcase",
"(",
"key",
")",
"]",
".",
"tolist",
"(",
")",
"[",
"0",
"]",
"except",
"IndexError",
":",
"pass",
"rpn_layer_config",
"[",
"str_key",
"]",
"=",
"layer_table",
"[",
"'_DLChrVal_'",
"]",
"[",
"layer_table",
"[",
"'_DLKey1_'",
"]",
"==",
"'dlregionproposalopts.'",
"+",
"underscore_to_camelcase",
"(",
"str_key",
")",
"]",
".",
"tolist",
"(",
")",
"[",
"0",
"]",
"num_scale",
"=",
"layer_table",
"[",
"layer_table",
"[",
"'_DLChrVal_'",
"]",
"==",
"'anchorScale'",
"]",
".",
"shape",
"[",
"0",
"]",
"num_ratio",
"=",
"layer_table",
"[",
"layer_table",
"[",
"'_DLChrVal_'",
"]",
"==",
"'anchorRatio'",
"]",
".",
"shape",
"[",
"0",
"]",
"rpn_layer_config",
"[",
"'anchor_scale'",
"]",
"=",
"[",
"]",
"rpn_layer_config",
"[",
"'anchor_ratio'",
"]",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"num_scale",
")",
":",
"rpn_layer_config",
"[",
"'anchor_scale'",
"]",
".",
"append",
"(",
"layer_table",
"[",
"'_DLNumVal_'",
"]",
"[",
"layer_table",
"[",
"'_DLKey1_'",
"]",
"==",
"'dlregionproposalopts.anchorScale.{}'",
".",
"format",
"(",
"i",
")",
"]",
".",
"tolist",
"(",
")",
"[",
"0",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"num_ratio",
")",
":",
"rpn_layer_config",
"[",
"'anchor_ratio'",
"]",
".",
"append",
"(",
"layer_table",
"[",
"'_DLNumVal_'",
"]",
"[",
"layer_table",
"[",
"'_DLKey1_'",
"]",
"==",
"'dlregionproposalopts.anchorRatio.{}'",
".",
"format",
"(",
"i",
")",
"]",
".",
"tolist",
"(",
")",
"[",
"0",
"]",
")",
"rpn_layer_config",
"[",
"'name'",
"]",
"=",
"layer_table",
"[",
"'_DLKey0_'",
"]",
".",
"unique",
"(",
")",
"[",
"0",
"]",
"layer",
"=",
"RegionProposal",
"(",
"*",
"*",
"rpn_layer_config",
")",
"return",
"layer"
] |
https://github.com/sassoftware/python-dlpy/blob/6082b1eeaab7406cce22715a35a1f4308943d522/dlpy/network.py#L2877-L2928
|
|
frappe/erpnext
|
9d36e30ef7043b391b5ed2523b8288bf46c45d18
|
erpnext/manufacturing/report/bom_stock_report/bom_stock_report.py
|
python
|
get_columns
|
()
|
return columns
|
return columns
|
return columns
|
[
"return",
"columns"
] |
def get_columns():
"""return columns"""
columns = [
_("Item") + ":Link/Item:150",
_("Description") + "::300",
_("BOM Qty") + ":Float:160",
_("BOM UoM") + "::160",
_("Required Qty") + ":Float:120",
_("In Stock Qty") + ":Float:120",
_("Enough Parts to Build") + ":Float:200",
]
return columns
|
[
"def",
"get_columns",
"(",
")",
":",
"columns",
"=",
"[",
"_",
"(",
"\"Item\"",
")",
"+",
"\":Link/Item:150\"",
",",
"_",
"(",
"\"Description\"",
")",
"+",
"\"::300\"",
",",
"_",
"(",
"\"BOM Qty\"",
")",
"+",
"\":Float:160\"",
",",
"_",
"(",
"\"BOM UoM\"",
")",
"+",
"\"::160\"",
",",
"_",
"(",
"\"Required Qty\"",
")",
"+",
"\":Float:120\"",
",",
"_",
"(",
"\"In Stock Qty\"",
")",
"+",
"\":Float:120\"",
",",
"_",
"(",
"\"Enough Parts to Build\"",
")",
"+",
"\":Float:200\"",
",",
"]",
"return",
"columns"
] |
https://github.com/frappe/erpnext/blob/9d36e30ef7043b391b5ed2523b8288bf46c45d18/erpnext/manufacturing/report/bom_stock_report/bom_stock_report.py#L18-L30
|
|
CLUEbenchmark/CLUEPretrainedModels
|
b384fd41665a8261f9c689c940cf750b3bc21fce
|
baselines/models/bert/extract_features.py
|
python
|
_truncate_seq_pair
|
(tokens_a, tokens_b, max_length)
|
Truncates a sequence pair in place to the maximum length.
|
Truncates a sequence pair in place to the maximum length.
|
[
"Truncates",
"a",
"sequence",
"pair",
"in",
"place",
"to",
"the",
"maximum",
"length",
"."
] |
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
|
[
"def",
"_truncate_seq_pair",
"(",
"tokens_a",
",",
"tokens_b",
",",
"max_length",
")",
":",
"# This is a simple heuristic which will always truncate the longer sequence",
"# one token at a time. This makes more sense than truncating an equal percent",
"# of tokens from each, since if one sequence is very short then each token",
"# that's truncated likely contains more information than a longer sequence.",
"while",
"True",
":",
"total_length",
"=",
"len",
"(",
"tokens_a",
")",
"+",
"len",
"(",
"tokens_b",
")",
"if",
"total_length",
"<=",
"max_length",
":",
"break",
"if",
"len",
"(",
"tokens_a",
")",
">",
"len",
"(",
"tokens_b",
")",
":",
"tokens_a",
".",
"pop",
"(",
")",
"else",
":",
"tokens_b",
".",
"pop",
"(",
")"
] |
https://github.com/CLUEbenchmark/CLUEPretrainedModels/blob/b384fd41665a8261f9c689c940cf750b3bc21fce/baselines/models/bert/extract_features.py#L302-L316
|
||
pgq/skytools-legacy
|
8b7e6c118572a605d28b7a3403c96aeecfd0d272
|
python/skytools/tnetstrings.py
|
python
|
dump
|
(data)
|
[] |
def dump(data):
if type(data) is long or type(data) is int:
out = str(data)
return '%d:%s#' % (len(out), out)
elif type(data) is float:
out = '%f' % data
return '%d:%s^' % (len(out), out)
elif type(data) is str:
return '%d:' % len(data) + data + ','
elif type(data) is dict:
return dump_dict(data)
elif type(data) is list:
return dump_list(data)
elif data == None:
return '0:~'
elif type(data) is bool:
out = repr(data).lower()
return '%d:%s!' % (len(out), out)
else:
assert False, "Can't serialize stuff that's %s." % type(data)
|
[
"def",
"dump",
"(",
"data",
")",
":",
"if",
"type",
"(",
"data",
")",
"is",
"long",
"or",
"type",
"(",
"data",
")",
"is",
"int",
":",
"out",
"=",
"str",
"(",
"data",
")",
"return",
"'%d:%s#'",
"%",
"(",
"len",
"(",
"out",
")",
",",
"out",
")",
"elif",
"type",
"(",
"data",
")",
"is",
"float",
":",
"out",
"=",
"'%f'",
"%",
"data",
"return",
"'%d:%s^'",
"%",
"(",
"len",
"(",
"out",
")",
",",
"out",
")",
"elif",
"type",
"(",
"data",
")",
"is",
"str",
":",
"return",
"'%d:'",
"%",
"len",
"(",
"data",
")",
"+",
"data",
"+",
"','",
"elif",
"type",
"(",
"data",
")",
"is",
"dict",
":",
"return",
"dump_dict",
"(",
"data",
")",
"elif",
"type",
"(",
"data",
")",
"is",
"list",
":",
"return",
"dump_list",
"(",
"data",
")",
"elif",
"data",
"==",
"None",
":",
"return",
"'0:~'",
"elif",
"type",
"(",
"data",
")",
"is",
"bool",
":",
"out",
"=",
"repr",
"(",
"data",
")",
".",
"lower",
"(",
")",
"return",
"'%d:%s!'",
"%",
"(",
"len",
"(",
"out",
")",
",",
"out",
")",
"else",
":",
"assert",
"False",
",",
"\"Can't serialize stuff that's %s.\"",
"%",
"type",
"(",
"data",
")"
] |
https://github.com/pgq/skytools-legacy/blob/8b7e6c118572a605d28b7a3403c96aeecfd0d272/python/skytools/tnetstrings.py#L4-L23
|
||||
triaquae/triaquae
|
bbabf736b3ba56a0c6498e7f04e16c13b8b8f2b9
|
TriAquae/models/Ubuntu_13/paramiko/transport.py
|
python
|
Transport.auth_password
|
(self, username, password, event=None, fallback=True)
|
return None
|
Authenticate to the server using a password. The username and password
are sent over an encrypted link.
If an C{event} is passed in, this method will return immediately, and
the event will be triggered once authentication succeeds or fails. On
success, L{is_authenticated} will return C{True}. On failure, you may
use L{get_exception} to get more detailed error information.
Since 1.1, if no event is passed, this method will block until the
authentication succeeds or fails. On failure, an exception is raised.
Otherwise, the method simply returns.
Since 1.5, if no event is passed and C{fallback} is C{True} (the
default), if the server doesn't support plain password authentication
but does support so-called "keyboard-interactive" mode, an attempt
will be made to authenticate using this interactive mode. If it fails,
the normal exception will be thrown as if the attempt had never been
made. This is useful for some recent Gentoo and Debian distributions,
which turn off plain password authentication in a misguided belief
that interactive authentication is "more secure". (It's not.)
If the server requires multi-step authentication (which is very rare),
this method will return a list of auth types permissible for the next
step. Otherwise, in the normal case, an empty list is returned.
@param username: the username to authenticate as
@type username: str
@param password: the password to authenticate with
@type password: str or unicode
@param event: an event to trigger when the authentication attempt is
complete (whether it was successful or not)
@type event: threading.Event
@param fallback: C{True} if an attempt at an automated "interactive"
password auth should be made if the server doesn't support normal
password auth
@type fallback: bool
@return: list of auth types permissible for the next stage of
authentication (normally empty)
@rtype: list
@raise BadAuthenticationType: if password authentication isn't
allowed by the server for this user (and no event was passed in)
@raise AuthenticationException: if the authentication failed (and no
event was passed in)
@raise SSHException: if there was a network error
|
Authenticate to the server using a password. The username and password
are sent over an encrypted link.
|
[
"Authenticate",
"to",
"the",
"server",
"using",
"a",
"password",
".",
"The",
"username",
"and",
"password",
"are",
"sent",
"over",
"an",
"encrypted",
"link",
"."
] |
def auth_password(self, username, password, event=None, fallback=True):
"""
Authenticate to the server using a password. The username and password
are sent over an encrypted link.
If an C{event} is passed in, this method will return immediately, and
the event will be triggered once authentication succeeds or fails. On
success, L{is_authenticated} will return C{True}. On failure, you may
use L{get_exception} to get more detailed error information.
Since 1.1, if no event is passed, this method will block until the
authentication succeeds or fails. On failure, an exception is raised.
Otherwise, the method simply returns.
Since 1.5, if no event is passed and C{fallback} is C{True} (the
default), if the server doesn't support plain password authentication
but does support so-called "keyboard-interactive" mode, an attempt
will be made to authenticate using this interactive mode. If it fails,
the normal exception will be thrown as if the attempt had never been
made. This is useful for some recent Gentoo and Debian distributions,
which turn off plain password authentication in a misguided belief
that interactive authentication is "more secure". (It's not.)
If the server requires multi-step authentication (which is very rare),
this method will return a list of auth types permissible for the next
step. Otherwise, in the normal case, an empty list is returned.
@param username: the username to authenticate as
@type username: str
@param password: the password to authenticate with
@type password: str or unicode
@param event: an event to trigger when the authentication attempt is
complete (whether it was successful or not)
@type event: threading.Event
@param fallback: C{True} if an attempt at an automated "interactive"
password auth should be made if the server doesn't support normal
password auth
@type fallback: bool
@return: list of auth types permissible for the next stage of
authentication (normally empty)
@rtype: list
@raise BadAuthenticationType: if password authentication isn't
allowed by the server for this user (and no event was passed in)
@raise AuthenticationException: if the authentication failed (and no
event was passed in)
@raise SSHException: if there was a network error
"""
if (not self.active) or (not self.initial_kex_done):
# we should never try to send the password unless we're on a secure link
raise SSHException('No existing session')
if event is None:
my_event = threading.Event()
else:
my_event = event
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_password(username, password, my_event)
if event is not None:
# caller wants to wait for event themselves
return []
try:
return self.auth_handler.wait_for_response(my_event)
except BadAuthenticationType, x:
# if password auth isn't allowed, but keyboard-interactive *is*, try to fudge it
if not fallback or ('keyboard-interactive' not in x.allowed_types):
raise
try:
def handler(title, instructions, fields):
if len(fields) > 1:
raise SSHException('Fallback authentication failed.')
if len(fields) == 0:
# for some reason, at least on os x, a 2nd request will
# be made with zero fields requested. maybe it's just
# to try to fake out automated scripting of the exact
# type we're doing here. *shrug* :)
return []
return [ password ]
return self.auth_interactive(username, handler)
except SSHException, ignored:
# attempt failed; just raise the original exception
raise x
return None
|
[
"def",
"auth_password",
"(",
"self",
",",
"username",
",",
"password",
",",
"event",
"=",
"None",
",",
"fallback",
"=",
"True",
")",
":",
"if",
"(",
"not",
"self",
".",
"active",
")",
"or",
"(",
"not",
"self",
".",
"initial_kex_done",
")",
":",
"# we should never try to send the password unless we're on a secure link",
"raise",
"SSHException",
"(",
"'No existing session'",
")",
"if",
"event",
"is",
"None",
":",
"my_event",
"=",
"threading",
".",
"Event",
"(",
")",
"else",
":",
"my_event",
"=",
"event",
"self",
".",
"auth_handler",
"=",
"AuthHandler",
"(",
"self",
")",
"self",
".",
"auth_handler",
".",
"auth_password",
"(",
"username",
",",
"password",
",",
"my_event",
")",
"if",
"event",
"is",
"not",
"None",
":",
"# caller wants to wait for event themselves",
"return",
"[",
"]",
"try",
":",
"return",
"self",
".",
"auth_handler",
".",
"wait_for_response",
"(",
"my_event",
")",
"except",
"BadAuthenticationType",
",",
"x",
":",
"# if password auth isn't allowed, but keyboard-interactive *is*, try to fudge it",
"if",
"not",
"fallback",
"or",
"(",
"'keyboard-interactive'",
"not",
"in",
"x",
".",
"allowed_types",
")",
":",
"raise",
"try",
":",
"def",
"handler",
"(",
"title",
",",
"instructions",
",",
"fields",
")",
":",
"if",
"len",
"(",
"fields",
")",
">",
"1",
":",
"raise",
"SSHException",
"(",
"'Fallback authentication failed.'",
")",
"if",
"len",
"(",
"fields",
")",
"==",
"0",
":",
"# for some reason, at least on os x, a 2nd request will",
"# be made with zero fields requested. maybe it's just",
"# to try to fake out automated scripting of the exact",
"# type we're doing here. *shrug* :)",
"return",
"[",
"]",
"return",
"[",
"password",
"]",
"return",
"self",
".",
"auth_interactive",
"(",
"username",
",",
"handler",
")",
"except",
"SSHException",
",",
"ignored",
":",
"# attempt failed; just raise the original exception",
"raise",
"x",
"return",
"None"
] |
https://github.com/triaquae/triaquae/blob/bbabf736b3ba56a0c6498e7f04e16c13b8b8f2b9/TriAquae/models/Ubuntu_13/paramiko/transport.py#L1122-L1203
|
|
brython-dev/brython
|
9cba5fb7f43a9b52fff13e89b403e02a1dfaa5f3
|
www/src/Lib/email/utils.py
|
python
|
getaddresses
|
(fieldvalues)
|
return a.addresslist
|
Return a list of (REALNAME, EMAIL) for each fieldvalue.
|
Return a list of (REALNAME, EMAIL) for each fieldvalue.
|
[
"Return",
"a",
"list",
"of",
"(",
"REALNAME",
"EMAIL",
")",
"for",
"each",
"fieldvalue",
"."
] |
def getaddresses(fieldvalues):
"""Return a list of (REALNAME, EMAIL) for each fieldvalue."""
all = COMMASPACE.join(str(v) for v in fieldvalues)
a = _AddressList(all)
return a.addresslist
|
[
"def",
"getaddresses",
"(",
"fieldvalues",
")",
":",
"all",
"=",
"COMMASPACE",
".",
"join",
"(",
"str",
"(",
"v",
")",
"for",
"v",
"in",
"fieldvalues",
")",
"a",
"=",
"_AddressList",
"(",
"all",
")",
"return",
"a",
".",
"addresslist"
] |
https://github.com/brython-dev/brython/blob/9cba5fb7f43a9b52fff13e89b403e02a1dfaa5f3/www/src/Lib/email/utils.py#L110-L114
|
|
windelbouwman/ppci
|
915c069e0667042c085ec42c78e9e3c9a5295324
|
ppci/lang/c/codegenerator.py
|
python
|
CCodeGenerator.data_layout
|
(self, typ: types.CType)
|
return size, alignment
|
Get size and alignment of the given type.
|
Get size and alignment of the given type.
|
[
"Get",
"size",
"and",
"alignment",
"of",
"the",
"given",
"type",
"."
] |
def data_layout(self, typ: types.CType):
""" Get size and alignment of the given type. """
size = self.sizeof(typ)
alignment = self.context.alignment(typ)
return size, alignment
|
[
"def",
"data_layout",
"(",
"self",
",",
"typ",
":",
"types",
".",
"CType",
")",
":",
"size",
"=",
"self",
".",
"sizeof",
"(",
"typ",
")",
"alignment",
"=",
"self",
".",
"context",
".",
"alignment",
"(",
"typ",
")",
"return",
"size",
",",
"alignment"
] |
https://github.com/windelbouwman/ppci/blob/915c069e0667042c085ec42c78e9e3c9a5295324/ppci/lang/c/codegenerator.py#L1635-L1639
|
|
kevingo/system-design-primer-zh-tw
|
d664bbc3dbcde7e8d399a4a579f28ba7822209c4
|
solutions/system_design/mint/mint_mapreduce.py
|
python
|
SpendingByCategory.extract_year_month
|
(self, timestamp)
|
Return the year and month portions of the timestamp.
|
Return the year and month portions of the timestamp.
|
[
"Return",
"the",
"year",
"and",
"month",
"portions",
"of",
"the",
"timestamp",
"."
] |
def extract_year_month(self, timestamp):
"""Return the year and month portions of the timestamp."""
...
|
[
"def",
"extract_year_month",
"(",
"self",
",",
"timestamp",
")",
":",
"..."
] |
https://github.com/kevingo/system-design-primer-zh-tw/blob/d664bbc3dbcde7e8d399a4a579f28ba7822209c4/solutions/system_design/mint/mint_mapreduce.py#L16-L18
|
||
pypa/setuptools
|
9f37366aab9cd8f6baa23e6a77cfdb8daf97757e
|
pkg_resources/__init__.py
|
python
|
Distribution.has_version
|
(self)
|
return True
|
[] |
def has_version(self):
try:
self.version
except ValueError:
issue_warning("Unbuilt egg for " + repr(self))
return False
return True
|
[
"def",
"has_version",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"version",
"except",
"ValueError",
":",
"issue_warning",
"(",
"\"Unbuilt egg for \"",
"+",
"repr",
"(",
"self",
")",
")",
"return",
"False",
"return",
"True"
] |
https://github.com/pypa/setuptools/blob/9f37366aab9cd8f6baa23e6a77cfdb8daf97757e/pkg_resources/__init__.py#L2970-L2976
|
|||
wxWidgets/Phoenix
|
b2199e299a6ca6d866aa6f3d0888499136ead9d6
|
wx/lib/agw/ultimatelistctrl.py
|
python
|
UltimateListItem.__init__
|
(self, item=None)
|
Default class constructor.
:param `item`: if not ``None``, another instance of :class:`UltimateListItem`.
|
Default class constructor.
|
[
"Default",
"class",
"constructor",
"."
] |
def __init__(self, item=None):
"""
Default class constructor.
:param `item`: if not ``None``, another instance of :class:`UltimateListItem`.
"""
if not item:
self.Init()
self._attr = None
else:
self._mask = item._mask # Indicates what fields are valid
self._itemId = item._itemId # The zero-based item position
self._col = item._col # Zero-based column, if in report mode
self._state = item._state # The state of the item
self._stateMask = item._stateMask # Which flags of self._state are valid (uses same flags)
self._text = item._text # The label/header text
self._tooltip = item._tooltip # The label/header tooltip text
self._image = item._image[:] # The zero-based indexes into an image list
self._data = item._data # App-defined data
self._pyData = item._pyData # Python-specific data
self._format = item._format # left, right, centre
self._width = item._width # width of column
self._colour = item._colour # item text colour
self._font = item._font # item font
self._checked = item._checked # The checking state for the item (if kind > 0)
self._kind = item._kind # Whether it is a normal, checkbox-like or a radiobutton-like item
self._enabled = item._enabled # Whether the item is enabled or not
self._hypertext = item._hypertext # indicates if the item is hypertext
self._visited = item._visited # visited state for an hypertext item
self._wnd = item._wnd
self._windowenabled = item._windowenabled
self._windowsize = item._windowsize
self._isColumnShown = item._isColumnShown
self._customRenderer = item._customRenderer
self._overFlow = item._overFlow
self._footerChecked = item._footerChecked
self._footerFormat = item._footerFormat
self._footerImage = item._footerImage
self._footerKind = item._footerKind
self._footerText = item._footerText
self._expandWin = item._expandWin
self._attr = None
# copy list item attributes
if item.HasAttributes():
self._attr = item.GetAttributes()[:]
|
[
"def",
"__init__",
"(",
"self",
",",
"item",
"=",
"None",
")",
":",
"if",
"not",
"item",
":",
"self",
".",
"Init",
"(",
")",
"self",
".",
"_attr",
"=",
"None",
"else",
":",
"self",
".",
"_mask",
"=",
"item",
".",
"_mask",
"# Indicates what fields are valid",
"self",
".",
"_itemId",
"=",
"item",
".",
"_itemId",
"# The zero-based item position",
"self",
".",
"_col",
"=",
"item",
".",
"_col",
"# Zero-based column, if in report mode",
"self",
".",
"_state",
"=",
"item",
".",
"_state",
"# The state of the item",
"self",
".",
"_stateMask",
"=",
"item",
".",
"_stateMask",
"# Which flags of self._state are valid (uses same flags)",
"self",
".",
"_text",
"=",
"item",
".",
"_text",
"# The label/header text",
"self",
".",
"_tooltip",
"=",
"item",
".",
"_tooltip",
"# The label/header tooltip text",
"self",
".",
"_image",
"=",
"item",
".",
"_image",
"[",
":",
"]",
"# The zero-based indexes into an image list",
"self",
".",
"_data",
"=",
"item",
".",
"_data",
"# App-defined data",
"self",
".",
"_pyData",
"=",
"item",
".",
"_pyData",
"# Python-specific data",
"self",
".",
"_format",
"=",
"item",
".",
"_format",
"# left, right, centre",
"self",
".",
"_width",
"=",
"item",
".",
"_width",
"# width of column",
"self",
".",
"_colour",
"=",
"item",
".",
"_colour",
"# item text colour",
"self",
".",
"_font",
"=",
"item",
".",
"_font",
"# item font",
"self",
".",
"_checked",
"=",
"item",
".",
"_checked",
"# The checking state for the item (if kind > 0)",
"self",
".",
"_kind",
"=",
"item",
".",
"_kind",
"# Whether it is a normal, checkbox-like or a radiobutton-like item",
"self",
".",
"_enabled",
"=",
"item",
".",
"_enabled",
"# Whether the item is enabled or not",
"self",
".",
"_hypertext",
"=",
"item",
".",
"_hypertext",
"# indicates if the item is hypertext",
"self",
".",
"_visited",
"=",
"item",
".",
"_visited",
"# visited state for an hypertext item",
"self",
".",
"_wnd",
"=",
"item",
".",
"_wnd",
"self",
".",
"_windowenabled",
"=",
"item",
".",
"_windowenabled",
"self",
".",
"_windowsize",
"=",
"item",
".",
"_windowsize",
"self",
".",
"_isColumnShown",
"=",
"item",
".",
"_isColumnShown",
"self",
".",
"_customRenderer",
"=",
"item",
".",
"_customRenderer",
"self",
".",
"_overFlow",
"=",
"item",
".",
"_overFlow",
"self",
".",
"_footerChecked",
"=",
"item",
".",
"_footerChecked",
"self",
".",
"_footerFormat",
"=",
"item",
".",
"_footerFormat",
"self",
".",
"_footerImage",
"=",
"item",
".",
"_footerImage",
"self",
".",
"_footerKind",
"=",
"item",
".",
"_footerKind",
"self",
".",
"_footerText",
"=",
"item",
".",
"_footerText",
"self",
".",
"_expandWin",
"=",
"item",
".",
"_expandWin",
"self",
".",
"_attr",
"=",
"None",
"# copy list item attributes",
"if",
"item",
".",
"HasAttributes",
"(",
")",
":",
"self",
".",
"_attr",
"=",
"item",
".",
"GetAttributes",
"(",
")",
"[",
":",
"]"
] |
https://github.com/wxWidgets/Phoenix/blob/b2199e299a6ca6d866aa6f3d0888499136ead9d6/wx/lib/agw/ultimatelistctrl.py#L1344-L1390
|
||
openstack/magnum
|
fa298eeab19b1d87070d72c7c4fb26cd75b0781e
|
magnum/api/controllers/v1/quota.py
|
python
|
QuotaController.__init__
|
(self)
|
[] |
def __init__(self):
super(QuotaController, self).__init__()
|
[
"def",
"__init__",
"(",
"self",
")",
":",
"super",
"(",
"QuotaController",
",",
"self",
")",
".",
"__init__",
"(",
")"
] |
https://github.com/openstack/magnum/blob/fa298eeab19b1d87070d72c7c4fb26cd75b0781e/magnum/api/controllers/v1/quota.py#L94-L95
|
||||
HypothesisWorks/hypothesis
|
d1bfc4acc86899caa7a40f892322e1a69fbf36f4
|
tooling/src/hypothesistooling/releasemanagement.py
|
python
|
commit_pending_release
|
(project)
|
Create a commit with the new release.
|
Create a commit with the new release.
|
[
"Create",
"a",
"commit",
"with",
"the",
"new",
"release",
"."
] |
def commit_pending_release(project):
"""Create a commit with the new release."""
tools.git("rm", project.RELEASE_FILE)
tools.git("add", "-u", project.BASE_DIR)
tools.git(
"commit",
"-m",
f"Bump {project.PACKAGE_NAME} version to {project.current_version()} "
+ "and update changelog\n\n[skip ci]",
)
|
[
"def",
"commit_pending_release",
"(",
"project",
")",
":",
"tools",
".",
"git",
"(",
"\"rm\"",
",",
"project",
".",
"RELEASE_FILE",
")",
"tools",
".",
"git",
"(",
"\"add\"",
",",
"\"-u\"",
",",
"project",
".",
"BASE_DIR",
")",
"tools",
".",
"git",
"(",
"\"commit\"",
",",
"\"-m\"",
",",
"f\"Bump {project.PACKAGE_NAME} version to {project.current_version()} \"",
"+",
"\"and update changelog\\n\\n[skip ci]\"",
",",
")"
] |
https://github.com/HypothesisWorks/hypothesis/blob/d1bfc4acc86899caa7a40f892322e1a69fbf36f4/tooling/src/hypothesistooling/releasemanagement.py#L169-L179
|
||
dimagi/commcare-hq
|
d67ff1d3b4c51fa050c19e60c3253a79d3452a39
|
corehq/apps/app_manager/models.py
|
python
|
ApplicationBase.get_jar_path
|
(self)
|
return spec
|
[] |
def get_jar_path(self):
spec = {
'nokia/s40': 'Nokia/S40',
'nokia/s60': 'Nokia/S60',
'generic': 'Generic/Default',
'winmo': 'Native/WinMo'
}[self.platform]
if self.platform in ('nokia/s40', 'nokia/s60'):
spec += {
('native',): '-native-input',
('roman',): '-generic',
('custom-keys',): '-custom-keys',
('qwerty',): '-qwerty'
}[(self.text_input,)]
return spec
|
[
"def",
"get_jar_path",
"(",
"self",
")",
":",
"spec",
"=",
"{",
"'nokia/s40'",
":",
"'Nokia/S40'",
",",
"'nokia/s60'",
":",
"'Nokia/S60'",
",",
"'generic'",
":",
"'Generic/Default'",
",",
"'winmo'",
":",
"'Native/WinMo'",
"}",
"[",
"self",
".",
"platform",
"]",
"if",
"self",
".",
"platform",
"in",
"(",
"'nokia/s40'",
",",
"'nokia/s60'",
")",
":",
"spec",
"+=",
"{",
"(",
"'native'",
",",
")",
":",
"'-native-input'",
",",
"(",
"'roman'",
",",
")",
":",
"'-generic'",
",",
"(",
"'custom-keys'",
",",
")",
":",
"'-custom-keys'",
",",
"(",
"'qwerty'",
",",
")",
":",
"'-qwerty'",
"}",
"[",
"(",
"self",
".",
"text_input",
",",
")",
"]",
"return",
"spec"
] |
https://github.com/dimagi/commcare-hq/blob/d67ff1d3b4c51fa050c19e60c3253a79d3452a39/corehq/apps/app_manager/models.py#L4337-L4353
|
|||
djc/couchdb-python
|
459bb1ef24587eef2577ad414e1c070e8b0eaff5
|
couchdb/client.py
|
python
|
Database.save
|
(self, doc, **options)
|
return id, rev
|
Create a new document or update an existing document.
If doc has no _id then the server will allocate a random ID and a new
document will be created. Otherwise the doc's _id will be used to
identify the document to create or update. Trying to update an existing
document with an incorrect _rev will raise a ResourceConflict exception.
Note that it is generally better to avoid saving documents with no _id
and instead generate document IDs on the client side. This is due to
the fact that the underlying HTTP ``POST`` method is not idempotent,
and an automatic retry due to a problem somewhere on the networking
stack may cause multiple documents being created in the database.
To avoid such problems you can generate a UUID on the client side.
Python (since version 2.5) comes with a ``uuid`` module that can be
used for this::
from uuid import uuid4
doc = {'_id': uuid4().hex, 'type': 'person', 'name': 'John Doe'}
db.save(doc)
:param doc: the document to store
:param options: optional args, e.g. batch='ok'
:return: (id, rev) tuple of the save document
:rtype: `tuple`
|
Create a new document or update an existing document.
|
[
"Create",
"a",
"new",
"document",
"or",
"update",
"an",
"existing",
"document",
"."
] |
def save(self, doc, **options):
"""Create a new document or update an existing document.
If doc has no _id then the server will allocate a random ID and a new
document will be created. Otherwise the doc's _id will be used to
identify the document to create or update. Trying to update an existing
document with an incorrect _rev will raise a ResourceConflict exception.
Note that it is generally better to avoid saving documents with no _id
and instead generate document IDs on the client side. This is due to
the fact that the underlying HTTP ``POST`` method is not idempotent,
and an automatic retry due to a problem somewhere on the networking
stack may cause multiple documents being created in the database.
To avoid such problems you can generate a UUID on the client side.
Python (since version 2.5) comes with a ``uuid`` module that can be
used for this::
from uuid import uuid4
doc = {'_id': uuid4().hex, 'type': 'person', 'name': 'John Doe'}
db.save(doc)
:param doc: the document to store
:param options: optional args, e.g. batch='ok'
:return: (id, rev) tuple of the save document
:rtype: `tuple`
"""
if '_id' in doc:
func = _doc_resource(self.resource, doc['_id']).put_json
else:
func = self.resource.post_json
_, _, data = func(body=doc, **options)
id, rev = data['id'], data.get('rev')
doc['_id'] = id
if rev is not None: # Not present for batch='ok'
doc['_rev'] = rev
return id, rev
|
[
"def",
"save",
"(",
"self",
",",
"doc",
",",
"*",
"*",
"options",
")",
":",
"if",
"'_id'",
"in",
"doc",
":",
"func",
"=",
"_doc_resource",
"(",
"self",
".",
"resource",
",",
"doc",
"[",
"'_id'",
"]",
")",
".",
"put_json",
"else",
":",
"func",
"=",
"self",
".",
"resource",
".",
"post_json",
"_",
",",
"_",
",",
"data",
"=",
"func",
"(",
"body",
"=",
"doc",
",",
"*",
"*",
"options",
")",
"id",
",",
"rev",
"=",
"data",
"[",
"'id'",
"]",
",",
"data",
".",
"get",
"(",
"'rev'",
")",
"doc",
"[",
"'_id'",
"]",
"=",
"id",
"if",
"rev",
"is",
"not",
"None",
":",
"# Not present for batch='ok'",
"doc",
"[",
"'_rev'",
"]",
"=",
"rev",
"return",
"id",
",",
"rev"
] |
https://github.com/djc/couchdb-python/blob/459bb1ef24587eef2577ad414e1c070e8b0eaff5/couchdb/client.py#L498-L534
|
|
tspurway/hustle
|
e62bf1269b446ea6fae23bc5698f845a2f3247c7
|
hustle/__init__.py
|
python
|
Table.base_tag
|
(cls, name, partition=None)
|
return rval
|
return the *DDFS* tag name for a given hustle table name
:type name: string
:param name: the name of the table
:type partition: string
:param partition: the value of the partition
|
return the *DDFS* tag name for a given hustle table name
|
[
"return",
"the",
"*",
"DDFS",
"*",
"tag",
"name",
"for",
"a",
"given",
"hustle",
"table",
"name"
] |
def base_tag(cls, name, partition=None):
"""
return the *DDFS* tag name for a given hustle table name
:type name: string
:param name: the name of the table
:type partition: string
:param partition: the value of the partition
"""
rval = "hustle:" + name
if partition:
rval += ':' + str(partition)
return rval
|
[
"def",
"base_tag",
"(",
"cls",
",",
"name",
",",
"partition",
"=",
"None",
")",
":",
"rval",
"=",
"\"hustle:\"",
"+",
"name",
"if",
"partition",
":",
"rval",
"+=",
"':'",
"+",
"str",
"(",
"partition",
")",
"return",
"rval"
] |
https://github.com/tspurway/hustle/blob/e62bf1269b446ea6fae23bc5698f845a2f3247c7/hustle/__init__.py#L193-L206
|
|
VITA-Group/AutoGAN
|
a5eeef0592ba92ec8987080c44bf025184da7b30
|
utils/fid_score.py
|
python
|
calculate_activation_statistics_from_files
|
(
files, sess, batch_size=50, verbose=False
)
|
return mu, sigma
|
Calculation of the statistics used by the FID.
Params:
-- files : list of paths to image files. Images need to have same dimensions for all files.
-- sess : current session
-- batch_size : the images numpy array is split into batches with batch size
batch_size. A reasonable batch size depends on the available hardware.
-- verbose : If set to True and parameter out_step is given, the number of calculated
batches is reported.
Returns:
-- mu : The mean over samples of the activations of the pool_3 layer of
the incption model.
-- sigma : The covariance matrix of the activations of the pool_3 layer of
the incption model.
|
Calculation of the statistics used by the FID.
Params:
-- files : list of paths to image files. Images need to have same dimensions for all files.
-- sess : current session
-- batch_size : the images numpy array is split into batches with batch size
batch_size. A reasonable batch size depends on the available hardware.
-- verbose : If set to True and parameter out_step is given, the number of calculated
batches is reported.
Returns:
-- mu : The mean over samples of the activations of the pool_3 layer of
the incption model.
-- sigma : The covariance matrix of the activations of the pool_3 layer of
the incption model.
|
[
"Calculation",
"of",
"the",
"statistics",
"used",
"by",
"the",
"FID",
".",
"Params",
":",
"--",
"files",
":",
"list",
"of",
"paths",
"to",
"image",
"files",
".",
"Images",
"need",
"to",
"have",
"same",
"dimensions",
"for",
"all",
"files",
".",
"--",
"sess",
":",
"current",
"session",
"--",
"batch_size",
":",
"the",
"images",
"numpy",
"array",
"is",
"split",
"into",
"batches",
"with",
"batch",
"size",
"batch_size",
".",
"A",
"reasonable",
"batch",
"size",
"depends",
"on",
"the",
"available",
"hardware",
".",
"--",
"verbose",
":",
"If",
"set",
"to",
"True",
"and",
"parameter",
"out_step",
"is",
"given",
"the",
"number",
"of",
"calculated",
"batches",
"is",
"reported",
".",
"Returns",
":",
"--",
"mu",
":",
"The",
"mean",
"over",
"samples",
"of",
"the",
"activations",
"of",
"the",
"pool_3",
"layer",
"of",
"the",
"incption",
"model",
".",
"--",
"sigma",
":",
"The",
"covariance",
"matrix",
"of",
"the",
"activations",
"of",
"the",
"pool_3",
"layer",
"of",
"the",
"incption",
"model",
"."
] |
def calculate_activation_statistics_from_files(
files, sess, batch_size=50, verbose=False
):
"""Calculation of the statistics used by the FID.
Params:
-- files : list of paths to image files. Images need to have same dimensions for all files.
-- sess : current session
-- batch_size : the images numpy array is split into batches with batch size
batch_size. A reasonable batch size depends on the available hardware.
-- verbose : If set to True and parameter out_step is given, the number of calculated
batches is reported.
Returns:
-- mu : The mean over samples of the activations of the pool_3 layer of
the incption model.
-- sigma : The covariance matrix of the activations of the pool_3 layer of
the incption model.
"""
act = get_activations_from_files(files, sess, batch_size, verbose)
mu = np.mean(act, axis=0)
sigma = np.cov(act, rowvar=False)
return mu, sigma
|
[
"def",
"calculate_activation_statistics_from_files",
"(",
"files",
",",
"sess",
",",
"batch_size",
"=",
"50",
",",
"verbose",
"=",
"False",
")",
":",
"act",
"=",
"get_activations_from_files",
"(",
"files",
",",
"sess",
",",
"batch_size",
",",
"verbose",
")",
"mu",
"=",
"np",
".",
"mean",
"(",
"act",
",",
"axis",
"=",
"0",
")",
"sigma",
"=",
"np",
".",
"cov",
"(",
"act",
",",
"rowvar",
"=",
"False",
")",
"return",
"mu",
",",
"sigma"
] |
https://github.com/VITA-Group/AutoGAN/blob/a5eeef0592ba92ec8987080c44bf025184da7b30/utils/fid_score.py#L256-L276
|
|
Azure/azure-devops-cli-extension
|
11334cd55806bef0b99c3bee5a438eed71e44037
|
azure-devops/azext_devops/devops_sdk/v5_0/feed/feed_client.py
|
python
|
FeedClient.get_global_permissions
|
(self)
|
return self._deserialize('[GlobalPermission]', self._unwrap_collection(response))
|
GetGlobalPermissions.
[Preview API] Get all service-wide feed creation permissions.
:rtype: [GlobalPermission]
|
GetGlobalPermissions.
[Preview API] Get all service-wide feed creation permissions.
:rtype: [GlobalPermission]
|
[
"GetGlobalPermissions",
".",
"[",
"Preview",
"API",
"]",
"Get",
"all",
"service",
"-",
"wide",
"feed",
"creation",
"permissions",
".",
":",
"rtype",
":",
"[",
"GlobalPermission",
"]"
] |
def get_global_permissions(self):
"""GetGlobalPermissions.
[Preview API] Get all service-wide feed creation permissions.
:rtype: [GlobalPermission]
"""
response = self._send(http_method='GET',
location_id='a74419ef-b477-43df-8758-3cd1cd5f56c6',
version='5.0-preview.1')
return self._deserialize('[GlobalPermission]', self._unwrap_collection(response))
|
[
"def",
"get_global_permissions",
"(",
"self",
")",
":",
"response",
"=",
"self",
".",
"_send",
"(",
"http_method",
"=",
"'GET'",
",",
"location_id",
"=",
"'a74419ef-b477-43df-8758-3cd1cd5f56c6'",
",",
"version",
"=",
"'5.0-preview.1'",
")",
"return",
"self",
".",
"_deserialize",
"(",
"'[GlobalPermission]'",
",",
"self",
".",
"_unwrap_collection",
"(",
"response",
")",
")"
] |
https://github.com/Azure/azure-devops-cli-extension/blob/11334cd55806bef0b99c3bee5a438eed71e44037/azure-devops/azext_devops/devops_sdk/v5_0/feed/feed_client.py#L164-L172
|
|
ZhixiuYe/HSCRF-pytorch
|
86bb64890317b74517c5ad44b47c66635df327d8
|
model/utils.py
|
python
|
calc_threshold_mean
|
(features)
|
return [lower_average, average, upper_average, max_len]
|
calculate the threshold for bucket by mean
|
calculate the threshold for bucket by mean
|
[
"calculate",
"the",
"threshold",
"for",
"bucket",
"by",
"mean"
] |
def calc_threshold_mean(features):
"""
calculate the threshold for bucket by mean
"""
lines_len = list(map(lambda t: len(t) + 1, features))
average = int(sum(lines_len) / len(lines_len))
lower_line = list(filter(lambda t: t < average, lines_len))
upper_line = list(filter(lambda t: t >= average, lines_len))
lower_average = int(sum(lower_line) / len(lower_line))
upper_average = int(sum(upper_line) / len(upper_line))
max_len = max(lines_len)
return [lower_average, average, upper_average, max_len]
|
[
"def",
"calc_threshold_mean",
"(",
"features",
")",
":",
"lines_len",
"=",
"list",
"(",
"map",
"(",
"lambda",
"t",
":",
"len",
"(",
"t",
")",
"+",
"1",
",",
"features",
")",
")",
"average",
"=",
"int",
"(",
"sum",
"(",
"lines_len",
")",
"/",
"len",
"(",
"lines_len",
")",
")",
"lower_line",
"=",
"list",
"(",
"filter",
"(",
"lambda",
"t",
":",
"t",
"<",
"average",
",",
"lines_len",
")",
")",
"upper_line",
"=",
"list",
"(",
"filter",
"(",
"lambda",
"t",
":",
"t",
">=",
"average",
",",
"lines_len",
")",
")",
"lower_average",
"=",
"int",
"(",
"sum",
"(",
"lower_line",
")",
"/",
"len",
"(",
"lower_line",
")",
")",
"upper_average",
"=",
"int",
"(",
"sum",
"(",
"upper_line",
")",
"/",
"len",
"(",
"upper_line",
")",
")",
"max_len",
"=",
"max",
"(",
"lines_len",
")",
"return",
"[",
"lower_average",
",",
"average",
",",
"upper_average",
",",
"max_len",
"]"
] |
https://github.com/ZhixiuYe/HSCRF-pytorch/blob/86bb64890317b74517c5ad44b47c66635df327d8/model/utils.py#L360-L371
|
|
wucng/TensorExpand
|
4ea58f64f5c5082b278229b799c9f679536510b7
|
TensorExpand/Object detection/Mask RCNN/CharlesShang_FastMaskRCNN/libs/boxes/gprof2dot.py
|
python
|
GprofParser.parse
|
(self)
|
return profile
|
[] |
def parse(self):
self.parse_cg()
self.fp.close()
profile = Profile()
profile[TIME] = 0.0
cycles = {}
for index in self.cycles:
cycles[index] = Cycle()
for entry in compat_itervalues(self.functions):
# populate the function
function = Function(entry.index, entry.name)
function[TIME] = entry.self
if entry.called is not None:
function.called = entry.called
if entry.called_self is not None:
call = Call(entry.index)
call[CALLS] = entry.called_self
function.called += entry.called_self
# populate the function calls
for child in entry.children:
call = Call(child.index)
assert child.called is not None
call[CALLS] = child.called
if child.index not in self.functions:
# NOTE: functions that were never called but were discovered by gprof's
# static call graph analysis dont have a call graph entry so we need
# to add them here
missing = Function(child.index, child.name)
function[TIME] = 0.0
function.called = 0
profile.add_function(missing)
function.add_call(call)
profile.add_function(function)
if entry.cycle is not None:
try:
cycle = cycles[entry.cycle]
except KeyError:
sys.stderr.write('warning: <cycle %u as a whole> entry missing\n' % entry.cycle)
cycle = Cycle()
cycles[entry.cycle] = cycle
cycle.add_function(function)
profile[TIME] = profile[TIME] + function[TIME]
for cycle in compat_itervalues(cycles):
profile.add_cycle(cycle)
# Compute derived events
profile.validate()
profile.ratio(TIME_RATIO, TIME)
profile.call_ratios(CALLS)
profile.integrate(TOTAL_TIME, TIME)
profile.ratio(TOTAL_TIME_RATIO, TOTAL_TIME)
return profile
|
[
"def",
"parse",
"(",
"self",
")",
":",
"self",
".",
"parse_cg",
"(",
")",
"self",
".",
"fp",
".",
"close",
"(",
")",
"profile",
"=",
"Profile",
"(",
")",
"profile",
"[",
"TIME",
"]",
"=",
"0.0",
"cycles",
"=",
"{",
"}",
"for",
"index",
"in",
"self",
".",
"cycles",
":",
"cycles",
"[",
"index",
"]",
"=",
"Cycle",
"(",
")",
"for",
"entry",
"in",
"compat_itervalues",
"(",
"self",
".",
"functions",
")",
":",
"# populate the function",
"function",
"=",
"Function",
"(",
"entry",
".",
"index",
",",
"entry",
".",
"name",
")",
"function",
"[",
"TIME",
"]",
"=",
"entry",
".",
"self",
"if",
"entry",
".",
"called",
"is",
"not",
"None",
":",
"function",
".",
"called",
"=",
"entry",
".",
"called",
"if",
"entry",
".",
"called_self",
"is",
"not",
"None",
":",
"call",
"=",
"Call",
"(",
"entry",
".",
"index",
")",
"call",
"[",
"CALLS",
"]",
"=",
"entry",
".",
"called_self",
"function",
".",
"called",
"+=",
"entry",
".",
"called_self",
"# populate the function calls",
"for",
"child",
"in",
"entry",
".",
"children",
":",
"call",
"=",
"Call",
"(",
"child",
".",
"index",
")",
"assert",
"child",
".",
"called",
"is",
"not",
"None",
"call",
"[",
"CALLS",
"]",
"=",
"child",
".",
"called",
"if",
"child",
".",
"index",
"not",
"in",
"self",
".",
"functions",
":",
"# NOTE: functions that were never called but were discovered by gprof's ",
"# static call graph analysis dont have a call graph entry so we need",
"# to add them here",
"missing",
"=",
"Function",
"(",
"child",
".",
"index",
",",
"child",
".",
"name",
")",
"function",
"[",
"TIME",
"]",
"=",
"0.0",
"function",
".",
"called",
"=",
"0",
"profile",
".",
"add_function",
"(",
"missing",
")",
"function",
".",
"add_call",
"(",
"call",
")",
"profile",
".",
"add_function",
"(",
"function",
")",
"if",
"entry",
".",
"cycle",
"is",
"not",
"None",
":",
"try",
":",
"cycle",
"=",
"cycles",
"[",
"entry",
".",
"cycle",
"]",
"except",
"KeyError",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"'warning: <cycle %u as a whole> entry missing\\n'",
"%",
"entry",
".",
"cycle",
")",
"cycle",
"=",
"Cycle",
"(",
")",
"cycles",
"[",
"entry",
".",
"cycle",
"]",
"=",
"cycle",
"cycle",
".",
"add_function",
"(",
"function",
")",
"profile",
"[",
"TIME",
"]",
"=",
"profile",
"[",
"TIME",
"]",
"+",
"function",
"[",
"TIME",
"]",
"for",
"cycle",
"in",
"compat_itervalues",
"(",
"cycles",
")",
":",
"profile",
".",
"add_cycle",
"(",
"cycle",
")",
"# Compute derived events",
"profile",
".",
"validate",
"(",
")",
"profile",
".",
"ratio",
"(",
"TIME_RATIO",
",",
"TIME",
")",
"profile",
".",
"call_ratios",
"(",
"CALLS",
")",
"profile",
".",
"integrate",
"(",
"TOTAL_TIME",
",",
"TIME",
")",
"profile",
".",
"ratio",
"(",
"TOTAL_TIME_RATIO",
",",
"TOTAL_TIME",
")",
"return",
"profile"
] |
https://github.com/wucng/TensorExpand/blob/4ea58f64f5c5082b278229b799c9f679536510b7/TensorExpand/Object detection/Mask RCNN/CharlesShang_FastMaskRCNN/libs/boxes/gprof2dot.py#L1256-L1319
|
|||
perone/Pyevolve
|
589b6a9b92ed1fd9ef00987bf4bfe807c4a7b7e0
|
pyevolve/Util.py
|
python
|
Graph.__iadd__
|
(self, node)
|
return self
|
Add a node using the += operator
|
Add a node using the += operator
|
[
"Add",
"a",
"node",
"using",
"the",
"+",
"=",
"operator"
] |
def __iadd__(self, node):
""" Add a node using the += operator """
self.addNode(node)
return self
|
[
"def",
"__iadd__",
"(",
"self",
",",
"node",
")",
":",
"self",
".",
"addNode",
"(",
"node",
")",
"return",
"self"
] |
https://github.com/perone/Pyevolve/blob/589b6a9b92ed1fd9ef00987bf4bfe807c4a7b7e0/pyevolve/Util.py#L251-L254
|
|
nanoporetech/medaka
|
2b83074fe3b6a6ec971614bfc6804f543fe1e5f0
|
medaka/datastore.py
|
python
|
ModelStoreTF.unpack
|
(self)
|
return self
|
Unpack model files from archive.
|
Unpack model files from archive.
|
[
"Unpack",
"model",
"files",
"from",
"archive",
"."
] |
def unpack(self):
"""Unpack model files from archive."""
if self.tmpdir is None:
# tmpdir is removed by .cleanup()
self.tmpdir = tempfile.TemporaryDirectory()
self._exitstack = contextlib.ExitStack()
self._exitstack.enter_context(self.tmpdir)
with tarfile.open(self.filepath) as tar:
tar.extractall(path=self.tmpdir.name)
meta_file = os.path.join(
self.tmpdir.name, self.top_level_dir, 'meta.pkl')
with open(meta_file, 'rb') as fh:
self.meta = pickle.load(fh)
return self
|
[
"def",
"unpack",
"(",
"self",
")",
":",
"if",
"self",
".",
"tmpdir",
"is",
"None",
":",
"# tmpdir is removed by .cleanup()",
"self",
".",
"tmpdir",
"=",
"tempfile",
".",
"TemporaryDirectory",
"(",
")",
"self",
".",
"_exitstack",
"=",
"contextlib",
".",
"ExitStack",
"(",
")",
"self",
".",
"_exitstack",
".",
"enter_context",
"(",
"self",
".",
"tmpdir",
")",
"with",
"tarfile",
".",
"open",
"(",
"self",
".",
"filepath",
")",
"as",
"tar",
":",
"tar",
".",
"extractall",
"(",
"path",
"=",
"self",
".",
"tmpdir",
".",
"name",
")",
"meta_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"tmpdir",
".",
"name",
",",
"self",
".",
"top_level_dir",
",",
"'meta.pkl'",
")",
"with",
"open",
"(",
"meta_file",
",",
"'rb'",
")",
"as",
"fh",
":",
"self",
".",
"meta",
"=",
"pickle",
".",
"load",
"(",
"fh",
")",
"return",
"self"
] |
https://github.com/nanoporetech/medaka/blob/2b83074fe3b6a6ec971614bfc6804f543fe1e5f0/medaka/datastore.py#L111-L124
|
|
oilshell/oil
|
94388e7d44a9ad879b12615f6203b38596b5a2d3
|
Python-2.7.13/Lib/plat-mac/ic.py
|
python
|
_code_default
|
(data, key)
|
return chr(len(data)) + data
|
[] |
def _code_default(data, key):
return chr(len(data)) + data
|
[
"def",
"_code_default",
"(",
"data",
",",
"key",
")",
":",
"return",
"chr",
"(",
"len",
"(",
"data",
")",
")",
"+",
"data"
] |
https://github.com/oilshell/oil/blob/94388e7d44a9ad879b12615f6203b38596b5a2d3/Python-2.7.13/Lib/plat-mac/ic.py#L84-L85
|
|||
dustin/twitty-twister
|
8524750ee73adb57bbe14ef0cfd8aa08e1e59fb3
|
twittytwister/twitter.py
|
python
|
Twitter.update_profile_image
|
(self, filename, image)
|
return self.__postMultipart('/account/update_profile_image.xml',
files=(('image', filename, image),))
|
Update the profile image of an authenticated user.
The image parameter must be raw data.
Returns no useful data.
|
Update the profile image of an authenticated user.
The image parameter must be raw data.
|
[
"Update",
"the",
"profile",
"image",
"of",
"an",
"authenticated",
"user",
".",
"The",
"image",
"parameter",
"must",
"be",
"raw",
"data",
"."
] |
def update_profile_image(self, filename, image):
"""Update the profile image of an authenticated user.
The image parameter must be raw data.
Returns no useful data."""
return self.__postMultipart('/account/update_profile_image.xml',
files=(('image', filename, image),))
|
[
"def",
"update_profile_image",
"(",
"self",
",",
"filename",
",",
"image",
")",
":",
"return",
"self",
".",
"__postMultipart",
"(",
"'/account/update_profile_image.xml'",
",",
"files",
"=",
"(",
"(",
"'image'",
",",
"filename",
",",
"image",
")",
",",
")",
")"
] |
https://github.com/dustin/twitty-twister/blob/8524750ee73adb57bbe14ef0cfd8aa08e1e59fb3/twittytwister/twitter.py#L527-L534
|
|
demisto/content
|
5c664a65b992ac8ca90ac3f11b1b2cdf11ee9b07
|
Packs/ProofpointThreatResponse/Integrations/ProofpointThreatResponse/ProofpointThreatResponse.py
|
python
|
get_emails_context
|
(event)
|
return emails_context
|
Returns the context of the emails in the event
Args:
event (dict): The event to parse the emails from
Returns:
list. The parsed emails list from the event
|
Returns the context of the emails in the event
|
[
"Returns",
"the",
"context",
"of",
"the",
"emails",
"in",
"the",
"event"
] |
def get_emails_context(event):
"""Returns the context of the emails in the event
Args:
event (dict): The event to parse the emails from
Returns:
list. The parsed emails list from the event
"""
emails_context = []
for email in event.get('emails', []):
emails_context.append(
assign_params(**{
'sender': email.get('sender', {}).get('email'),
'recipient': email.get('recipient', {}).get('email'),
'subject': email.get('subject'),
'message_id': email.get('messageId'),
'message_delivery_time': email.get('messageDeliveryTime', {}).get('millis'),
'body': email.get('body'),
'body_type': email.get('bodyType'),
'headers': email.get('headers'),
'urls': email.get('urls'),
'sender_vap': email.get('sender', {}).get('vap'),
'recipient_vap': email.get('recipient', {}).get('vap'),
'attachments': email.get('attachments'),
}))
return emails_context
|
[
"def",
"get_emails_context",
"(",
"event",
")",
":",
"emails_context",
"=",
"[",
"]",
"for",
"email",
"in",
"event",
".",
"get",
"(",
"'emails'",
",",
"[",
"]",
")",
":",
"emails_context",
".",
"append",
"(",
"assign_params",
"(",
"*",
"*",
"{",
"'sender'",
":",
"email",
".",
"get",
"(",
"'sender'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'email'",
")",
",",
"'recipient'",
":",
"email",
".",
"get",
"(",
"'recipient'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'email'",
")",
",",
"'subject'",
":",
"email",
".",
"get",
"(",
"'subject'",
")",
",",
"'message_id'",
":",
"email",
".",
"get",
"(",
"'messageId'",
")",
",",
"'message_delivery_time'",
":",
"email",
".",
"get",
"(",
"'messageDeliveryTime'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'millis'",
")",
",",
"'body'",
":",
"email",
".",
"get",
"(",
"'body'",
")",
",",
"'body_type'",
":",
"email",
".",
"get",
"(",
"'bodyType'",
")",
",",
"'headers'",
":",
"email",
".",
"get",
"(",
"'headers'",
")",
",",
"'urls'",
":",
"email",
".",
"get",
"(",
"'urls'",
")",
",",
"'sender_vap'",
":",
"email",
".",
"get",
"(",
"'sender'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'vap'",
")",
",",
"'recipient_vap'",
":",
"email",
".",
"get",
"(",
"'recipient'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'vap'",
")",
",",
"'attachments'",
":",
"email",
".",
"get",
"(",
"'attachments'",
")",
",",
"}",
")",
")",
"return",
"emails_context"
] |
https://github.com/demisto/content/blob/5c664a65b992ac8ca90ac3f11b1b2cdf11ee9b07/Packs/ProofpointThreatResponse/Integrations/ProofpointThreatResponse/ProofpointThreatResponse.py#L229-L256
|
|
trakt/Plex-Trakt-Scrobbler
|
aeb0bfbe62fad4b06c164f1b95581da7f35dce0b
|
Trakttv.bundle/Contents/Libraries/Linux/armv7_hf/marvell-pj4/ucs4/cryptography/x509/base.py
|
python
|
CertificateBuilder.serial_number
|
(self, number)
|
return CertificateBuilder(
self._issuer_name, self._subject_name,
self._public_key, number, self._not_valid_before,
self._not_valid_after, self._extensions
)
|
Sets the certificate serial number.
|
Sets the certificate serial number.
|
[
"Sets",
"the",
"certificate",
"serial",
"number",
"."
] |
def serial_number(self, number):
"""
Sets the certificate serial number.
"""
if not isinstance(number, six.integer_types):
raise TypeError('Serial number must be of integral type.')
if self._serial_number is not None:
raise ValueError('The serial number may only be set once.')
if number <= 0:
raise ValueError('The serial number should be positive.')
# ASN.1 integers are always signed, so most significant bit must be
# zero.
if utils.bit_length(number) >= 160: # As defined in RFC 5280
raise ValueError('The serial number should not be more than 159 '
'bits.')
return CertificateBuilder(
self._issuer_name, self._subject_name,
self._public_key, number, self._not_valid_before,
self._not_valid_after, self._extensions
)
|
[
"def",
"serial_number",
"(",
"self",
",",
"number",
")",
":",
"if",
"not",
"isinstance",
"(",
"number",
",",
"six",
".",
"integer_types",
")",
":",
"raise",
"TypeError",
"(",
"'Serial number must be of integral type.'",
")",
"if",
"self",
".",
"_serial_number",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"'The serial number may only be set once.'",
")",
"if",
"number",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"'The serial number should be positive.'",
")",
"# ASN.1 integers are always signed, so most significant bit must be",
"# zero.",
"if",
"utils",
".",
"bit_length",
"(",
"number",
")",
">=",
"160",
":",
"# As defined in RFC 5280",
"raise",
"ValueError",
"(",
"'The serial number should not be more than 159 '",
"'bits.'",
")",
"return",
"CertificateBuilder",
"(",
"self",
".",
"_issuer_name",
",",
"self",
".",
"_subject_name",
",",
"self",
".",
"_public_key",
",",
"number",
",",
"self",
".",
"_not_valid_before",
",",
"self",
".",
"_not_valid_after",
",",
"self",
".",
"_extensions",
")"
] |
https://github.com/trakt/Plex-Trakt-Scrobbler/blob/aeb0bfbe62fad4b06c164f1b95581da7f35dce0b/Trakttv.bundle/Contents/Libraries/Linux/armv7_hf/marvell-pj4/ucs4/cryptography/x509/base.py#L434-L454
|
|
staticafi/symbiotic
|
792b3b8112cb837a58d878129e8bc7a919d1a2f7
|
lib/symbioticpy/symbiotic/benchexec/tools/template.py
|
python
|
BaseTool.working_directory
|
(self, executable)
|
return os.curdir
|
OPTIONAL, this method is only necessary for situations
when the tool needs a separate working directory.
@param executable: the path to the executable of the tool (typically the result of executable())
@return a string pointing to a directory
|
OPTIONAL, this method is only necessary for situations
when the tool needs a separate working directory.
|
[
"OPTIONAL",
"this",
"method",
"is",
"only",
"necessary",
"for",
"situations",
"when",
"the",
"tool",
"needs",
"a",
"separate",
"working",
"directory",
"."
] |
def working_directory(self, executable):
"""
OPTIONAL, this method is only necessary for situations
when the tool needs a separate working directory.
@param executable: the path to the executable of the tool (typically the result of executable())
@return a string pointing to a directory
"""
return os.curdir
|
[
"def",
"working_directory",
"(",
"self",
",",
"executable",
")",
":",
"return",
"os",
".",
"curdir"
] |
https://github.com/staticafi/symbiotic/blob/792b3b8112cb837a58d878129e8bc7a919d1a2f7/lib/symbioticpy/symbiotic/benchexec/tools/template.py#L171-L178
|
|
mrkipling/maraschino
|
c6be9286937783ae01df2d6d8cebfc8b2734a7d7
|
lib/rtorrent/torrent.py
|
python
|
Torrent.stop
|
(self)
|
return(self.active)
|
Stop the torrent
|
Stop the torrent
|
[
"Stop",
"the",
"torrent"
] |
def stop(self):
""""Stop the torrent"""
m = rtorrent.rpc.Multicall(self)
self.multicall_add(m, "d.try_stop")
self.multicall_add(m, "d.is_active")
self.active = m.call()[-1]
return(self.active)
|
[
"def",
"stop",
"(",
"self",
")",
":",
"m",
"=",
"rtorrent",
".",
"rpc",
".",
"Multicall",
"(",
"self",
")",
"self",
".",
"multicall_add",
"(",
"m",
",",
"\"d.try_stop\"",
")",
"self",
".",
"multicall_add",
"(",
"m",
",",
"\"d.is_active\"",
")",
"self",
".",
"active",
"=",
"m",
".",
"call",
"(",
")",
"[",
"-",
"1",
"]",
"return",
"(",
"self",
".",
"active",
")"
] |
https://github.com/mrkipling/maraschino/blob/c6be9286937783ae01df2d6d8cebfc8b2734a7d7/lib/rtorrent/torrent.py#L195-L202
|
|
edfungus/Crouton
|
ada98b3930192938a48909072b45cb84b945f875
|
clients/esp8266_clients/venv/lib/python2.7/site-packages/pip/_vendor/requests/utils.py
|
python
|
parse_header_links
|
(value)
|
return links
|
Return a dict of parsed link headers proxies.
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
|
Return a dict of parsed link headers proxies.
|
[
"Return",
"a",
"dict",
"of",
"parsed",
"link",
"headers",
"proxies",
"."
] |
def parse_header_links(value):
"""Return a dict of parsed link headers proxies.
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
"""
links = []
replace_chars = " '\""
for val in re.split(", *<", value):
try:
url, params = val.split(";", 1)
except ValueError:
url, params = val, ''
link = {}
link["url"] = url.strip("<> '\"")
for param in params.split(";"):
try:
key, value = param.split("=")
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links.append(link)
return links
|
[
"def",
"parse_header_links",
"(",
"value",
")",
":",
"links",
"=",
"[",
"]",
"replace_chars",
"=",
"\" '\\\"\"",
"for",
"val",
"in",
"re",
".",
"split",
"(",
"\", *<\"",
",",
"value",
")",
":",
"try",
":",
"url",
",",
"params",
"=",
"val",
".",
"split",
"(",
"\";\"",
",",
"1",
")",
"except",
"ValueError",
":",
"url",
",",
"params",
"=",
"val",
",",
"''",
"link",
"=",
"{",
"}",
"link",
"[",
"\"url\"",
"]",
"=",
"url",
".",
"strip",
"(",
"\"<> '\\\"\"",
")",
"for",
"param",
"in",
"params",
".",
"split",
"(",
"\";\"",
")",
":",
"try",
":",
"key",
",",
"value",
"=",
"param",
".",
"split",
"(",
"\"=\"",
")",
"except",
"ValueError",
":",
"break",
"link",
"[",
"key",
".",
"strip",
"(",
"replace_chars",
")",
"]",
"=",
"value",
".",
"strip",
"(",
"replace_chars",
")",
"links",
".",
"append",
"(",
"link",
")",
"return",
"links"
] |
https://github.com/edfungus/Crouton/blob/ada98b3930192938a48909072b45cb84b945f875/clients/esp8266_clients/venv/lib/python2.7/site-packages/pip/_vendor/requests/utils.py#L580-L611
|
|
DataDog/integrations-core
|
934674b29d94b70ccc008f76ea172d0cdae05e1e
|
couch/datadog_checks/couch/couch.py
|
python
|
CouchDb.get_server
|
(self)
|
return server
|
[] |
def get_server(self):
server = self.instance.get('server')
if server is None:
raise ConfigurationError("A server must be specified")
return server
|
[
"def",
"get_server",
"(",
"self",
")",
":",
"server",
"=",
"self",
".",
"instance",
".",
"get",
"(",
"'server'",
")",
"if",
"server",
"is",
"None",
":",
"raise",
"ConfigurationError",
"(",
"\"A server must be specified\"",
")",
"return",
"server"
] |
https://github.com/DataDog/integrations-core/blob/934674b29d94b70ccc008f76ea172d0cdae05e1e/couch/datadog_checks/couch/couch.py#L90-L94
|
|||
packetloop/packetpig
|
6e101090224df219123ff5f6ab4c37524637571f
|
lib/scripts/impacket/impacket/dot11.py
|
python
|
Dot11WEP.set_keyid
|
(self, value)
|
Set the \'WEP KEY ID\' field
|
Set the \'WEP KEY ID\' field
|
[
"Set",
"the",
"\\",
"WEP",
"KEY",
"ID",
"\\",
"field"
] |
def set_keyid(self, value):
'Set the \'WEP KEY ID\' field'
# clear the bits
mask = (~0xC0) & 0xFF
masked = self.header.get_byte(3) & mask
# set the bits
nb = masked | ((value & 0x03) << 6)
self.header.set_byte(3, nb)
|
[
"def",
"set_keyid",
"(",
"self",
",",
"value",
")",
":",
"# clear the bits",
"mask",
"=",
"(",
"~",
"0xC0",
")",
"&",
"0xFF",
"masked",
"=",
"self",
".",
"header",
".",
"get_byte",
"(",
"3",
")",
"&",
"mask",
"# set the bits",
"nb",
"=",
"masked",
"|",
"(",
"(",
"value",
"&",
"0x03",
")",
"<<",
"6",
")",
"self",
".",
"header",
".",
"set_byte",
"(",
"3",
",",
"nb",
")"
] |
https://github.com/packetloop/packetpig/blob/6e101090224df219123ff5f6ab4c37524637571f/lib/scripts/impacket/impacket/dot11.py#L1057-L1064
|
||
benhoyt/pygit
|
aa8d8bb62ae273ae2f4f167e36f24f40a11634b9
|
pygit.py
|
python
|
encode_pack_object
|
(obj)
|
return bytes(header) + zlib.compress(data)
|
Encode a single object for a pack file and return bytes (variable-
length header followed by compressed data bytes).
|
Encode a single object for a pack file and return bytes (variable-
length header followed by compressed data bytes).
|
[
"Encode",
"a",
"single",
"object",
"for",
"a",
"pack",
"file",
"and",
"return",
"bytes",
"(",
"variable",
"-",
"length",
"header",
"followed",
"by",
"compressed",
"data",
"bytes",
")",
"."
] |
def encode_pack_object(obj):
"""Encode a single object for a pack file and return bytes (variable-
length header followed by compressed data bytes).
"""
obj_type, data = read_object(obj)
type_num = ObjectType[obj_type].value
size = len(data)
byte = (type_num << 4) | (size & 0x0f)
size >>= 4
header = []
while size:
header.append(byte | 0x80)
byte = size & 0x7f
size >>= 7
header.append(byte)
return bytes(header) + zlib.compress(data)
|
[
"def",
"encode_pack_object",
"(",
"obj",
")",
":",
"obj_type",
",",
"data",
"=",
"read_object",
"(",
"obj",
")",
"type_num",
"=",
"ObjectType",
"[",
"obj_type",
"]",
".",
"value",
"size",
"=",
"len",
"(",
"data",
")",
"byte",
"=",
"(",
"type_num",
"<<",
"4",
")",
"|",
"(",
"size",
"&",
"0x0f",
")",
"size",
">>=",
"4",
"header",
"=",
"[",
"]",
"while",
"size",
":",
"header",
".",
"append",
"(",
"byte",
"|",
"0x80",
")",
"byte",
"=",
"size",
"&",
"0x7f",
"size",
">>=",
"7",
"header",
".",
"append",
"(",
"byte",
")",
"return",
"bytes",
"(",
"header",
")",
"+",
"zlib",
".",
"compress",
"(",
"data",
")"
] |
https://github.com/benhoyt/pygit/blob/aa8d8bb62ae273ae2f4f167e36f24f40a11634b9/pygit.py#L441-L456
|
|
freewym/espresso
|
6671c507350295269e38add57dbe601dcb8e6ecf
|
espresso/tools/specaug_interpolate.py
|
python
|
freq_mask
|
(spec, F=30, num_masks=1, pad_value=0.0)
|
return cloned
|
Frequency masking
Args:
spec (torch.Tensor): input tensor of shape `(dim, T)`
F (int): maximum width of each mask
num_masks (int): number of masks
pad_value (float): value for padding
Returns:
freq masked tensor (torch.Tensor): output tensor of shape `(dim, T)`
|
Frequency masking
|
[
"Frequency",
"masking"
] |
def freq_mask(spec, F=30, num_masks=1, pad_value=0.0):
"""Frequency masking
Args:
spec (torch.Tensor): input tensor of shape `(dim, T)`
F (int): maximum width of each mask
num_masks (int): number of masks
pad_value (float): value for padding
Returns:
freq masked tensor (torch.Tensor): output tensor of shape `(dim, T)`
"""
cloned = spec.clone()
num_mel_channels = cloned.size(0)
for i in range(num_masks):
f = np.random.randint(0, F + 1)
f_zero = np.random.randint(0, num_mel_channels - f + 1)
if f == 0:
return cloned
cloned[f_zero : f_zero + f] = pad_value
return cloned
|
[
"def",
"freq_mask",
"(",
"spec",
",",
"F",
"=",
"30",
",",
"num_masks",
"=",
"1",
",",
"pad_value",
"=",
"0.0",
")",
":",
"cloned",
"=",
"spec",
".",
"clone",
"(",
")",
"num_mel_channels",
"=",
"cloned",
".",
"size",
"(",
"0",
")",
"for",
"i",
"in",
"range",
"(",
"num_masks",
")",
":",
"f",
"=",
"np",
".",
"random",
".",
"randint",
"(",
"0",
",",
"F",
"+",
"1",
")",
"f_zero",
"=",
"np",
".",
"random",
".",
"randint",
"(",
"0",
",",
"num_mel_channels",
"-",
"f",
"+",
"1",
")",
"if",
"f",
"==",
"0",
":",
"return",
"cloned",
"cloned",
"[",
"f_zero",
":",
"f_zero",
"+",
"f",
"]",
"=",
"pad_value",
"return",
"cloned"
] |
https://github.com/freewym/espresso/blob/6671c507350295269e38add57dbe601dcb8e6ecf/espresso/tools/specaug_interpolate.py#L97-L119
|
|
pyscf/pyscf
|
0adfb464333f5ceee07b664f291d4084801bae64
|
pyscf/pbc/dft/numint.py
|
python
|
KNumInt.nr_vxc
|
(self, cell, grids, xc_code, dms, spin=0, relativity=0, hermi=0,
kpts=None, kpts_band=None, max_memory=2000, verbose=None)
|
Evaluate RKS/UKS XC functional and potential matrix.
See :func:`nr_rks` and :func:`nr_uks` for more details.
|
Evaluate RKS/UKS XC functional and potential matrix.
See :func:`nr_rks` and :func:`nr_uks` for more details.
|
[
"Evaluate",
"RKS",
"/",
"UKS",
"XC",
"functional",
"and",
"potential",
"matrix",
".",
"See",
":",
"func",
":",
"nr_rks",
"and",
":",
"func",
":",
"nr_uks",
"for",
"more",
"details",
"."
] |
def nr_vxc(self, cell, grids, xc_code, dms, spin=0, relativity=0, hermi=0,
kpts=None, kpts_band=None, max_memory=2000, verbose=None):
'''Evaluate RKS/UKS XC functional and potential matrix.
See :func:`nr_rks` and :func:`nr_uks` for more details.
'''
if spin == 0:
return self.nr_rks(cell, grids, xc_code, dms, hermi,
kpts, kpts_band, max_memory, verbose)
else:
return self.nr_uks(cell, grids, xc_code, dms, hermi,
kpts, kpts_band, max_memory, verbose)
|
[
"def",
"nr_vxc",
"(",
"self",
",",
"cell",
",",
"grids",
",",
"xc_code",
",",
"dms",
",",
"spin",
"=",
"0",
",",
"relativity",
"=",
"0",
",",
"hermi",
"=",
"0",
",",
"kpts",
"=",
"None",
",",
"kpts_band",
"=",
"None",
",",
"max_memory",
"=",
"2000",
",",
"verbose",
"=",
"None",
")",
":",
"if",
"spin",
"==",
"0",
":",
"return",
"self",
".",
"nr_rks",
"(",
"cell",
",",
"grids",
",",
"xc_code",
",",
"dms",
",",
"hermi",
",",
"kpts",
",",
"kpts_band",
",",
"max_memory",
",",
"verbose",
")",
"else",
":",
"return",
"self",
".",
"nr_uks",
"(",
"cell",
",",
"grids",
",",
"xc_code",
",",
"dms",
",",
"hermi",
",",
"kpts",
",",
"kpts_band",
",",
"max_memory",
",",
"verbose",
")"
] |
https://github.com/pyscf/pyscf/blob/0adfb464333f5ceee07b664f291d4084801bae64/pyscf/pbc/dft/numint.py#L1110-L1120
|
||
BasioMeusPuga/Lector
|
1b1d87739a8c14e0e22009435350b155fc3e3b77
|
lector/parsers/txt.py
|
python
|
ParseTXT.generate_content
|
(self)
|
return toc, content, False
|
Generate content of the book.
|
Generate content of the book.
|
[
"Generate",
"content",
"of",
"the",
"book",
"."
] |
def generate_content(self):
"""Generate content of the book."""
with open(self.filename, 'rt') as txt:
text = txt.read()
content = [textile.textile(text)]
toc = [(1, 'Text', 1)]
return toc, content, False
|
[
"def",
"generate_content",
"(",
"self",
")",
":",
"with",
"open",
"(",
"self",
".",
"filename",
",",
"'rt'",
")",
"as",
"txt",
":",
"text",
"=",
"txt",
".",
"read",
"(",
")",
"content",
"=",
"[",
"textile",
".",
"textile",
"(",
"text",
")",
"]",
"toc",
"=",
"[",
"(",
"1",
",",
"'Text'",
",",
"1",
")",
"]",
"return",
"toc",
",",
"content",
",",
"False"
] |
https://github.com/BasioMeusPuga/Lector/blob/1b1d87739a8c14e0e22009435350b155fc3e3b77/lector/parsers/txt.py#L47-L55
|
|
dropbox/dropbox-sdk-python
|
015437429be224732990041164a21a0501235db1
|
dropbox/team_log.py
|
python
|
EventType.get_sign_in_as_session_end
|
(self)
|
return self._value
|
(logins) Ended admin sign-in-as session
Only call this if :meth:`is_sign_in_as_session_end` is true.
:rtype: SignInAsSessionEndType
|
(logins) Ended admin sign-in-as session
|
[
"(",
"logins",
")",
"Ended",
"admin",
"sign",
"-",
"in",
"-",
"as",
"session"
] |
def get_sign_in_as_session_end(self):
"""
(logins) Ended admin sign-in-as session
Only call this if :meth:`is_sign_in_as_session_end` is true.
:rtype: SignInAsSessionEndType
"""
if not self.is_sign_in_as_session_end():
raise AttributeError("tag 'sign_in_as_session_end' not set")
return self._value
|
[
"def",
"get_sign_in_as_session_end",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"is_sign_in_as_session_end",
"(",
")",
":",
"raise",
"AttributeError",
"(",
"\"tag 'sign_in_as_session_end' not set\"",
")",
"return",
"self",
".",
"_value"
] |
https://github.com/dropbox/dropbox-sdk-python/blob/015437429be224732990041164a21a0501235db1/dropbox/team_log.py#L33747-L33757
|
|
eddyhkchiu/mahalanobis_3d_multi_object_tracking
|
25c098a1e56744e65b9cbcfde94bff2b296a645b
|
get_nuscenes_stats.py
|
python
|
get_mean
|
(tracks)
|
return mean, std, var
|
Input:
tracks: {scene_token: {t: [TrackingBox]}}
|
Input:
tracks: {scene_token: {t: [TrackingBox]}}
|
[
"Input",
":",
"tracks",
":",
"{",
"scene_token",
":",
"{",
"t",
":",
"[",
"TrackingBox",
"]",
"}}"
] |
def get_mean(tracks):
'''
Input:
tracks: {scene_token: {t: [TrackingBox]}}
'''
print('len(tracks.keys()): ', len(tracks.keys()))
# gt_trajectory_map to compute residual or velocity
# tracking_name: {scene_token -> {tracking_id: {t_idx -> det_data}}
# [h, w, l, x, y, z, yaw] #x_dot, y_dot, z_dot, yaw_dot]
gt_trajectory_map = {tracking_name: {scene_token: {} for scene_token in tracks.keys()} for tracking_name in NUSCENES_TRACKING_NAMES}
# store every detection data to compute mean and variance
gt_box_data = {tracking_name: [] for tracking_name in NUSCENES_TRACKING_NAMES}
for scene_token in tracks.keys():
#print('scene_token: ', scene_token)
#print('tracks[scene_token].keys(): ', tracks[scene_token].keys())
for t_idx in range(len(tracks[scene_token].keys())):
#print('t_idx: ', t_idx)
t = sorted(tracks[scene_token].keys())[t_idx]
for box_id in range(len(tracks[scene_token][t])):
#print('box_id: ', box_id)
box = tracks[scene_token][t][box_id]
#print('box: ', box)
if box.tracking_name not in NUSCENES_TRACKING_NAMES:
continue
# box: {'sample_token': '6a808b09e5f34d33ba1de76cc8dab423', 'translation': [2131.657, 1108.874, 3.453], 'size': [3.078, 6.558, 2.95], 'rotation': [0.8520240186812739, 0.0, 0.0, 0.5235026949216329], 'velocity': array([-0.01800415, 0.0100023 ]), 'ego_dist': 54.20556415873658, 'num_pts': 4, 'tracking_id': 'cbaabbf2a83a4177b2145ab1317e296e', 'tracking_name': 'truck', 'tracking_score': -1.0}
# [h, w, l, x, y, z, ry,
# x_t - x_{t-1}, ..., for [x,y,z,ry]
# (x_t - x_{t-1}) - (x_{t-1} - x_{t-2}), ..., for [x,y,z,ry]
box_data = np.array([
box.size[2], box.size[0], box.size[1],
box.translation[0], box.translation[1], box.translation[2],
rotation_to_positive_z_angle(box.rotation),
0, 0, 0, 0,
0, 0, 0, 0])
if box.tracking_id not in gt_trajectory_map[box.tracking_name][scene_token]:
gt_trajectory_map[box.tracking_name][scene_token][box.tracking_id] = {t_idx: box_data}
else:
gt_trajectory_map[box.tracking_name][scene_token][box.tracking_id][t_idx] = box_data
# if we can find the same object in the previous frame, get the velocity
if box.tracking_id in gt_trajectory_map[box.tracking_name][scene_token] and t_idx-1 in gt_trajectory_map[box.tracking_name][scene_token][box.tracking_id]:
residual_vel = box_data[3:7] - gt_trajectory_map[box.tracking_name][scene_token][box.tracking_id][t_idx-1][3:7]
box_data[7:11] = residual_vel
gt_trajectory_map[box.tracking_name][scene_token][box.tracking_id][t_idx] = box_data
# back fill
if gt_trajectory_map[box.tracking_name][scene_token][box.tracking_id][t_idx-1][7] == 0:
gt_trajectory_map[box.tracking_name][scene_token][box.tracking_id][t_idx-1][7:11] = residual_vel
# if we can find the same object in the previous two frames, get the acceleration
if box.tracking_id in gt_trajectory_map[box.tracking_name][scene_token] and t_idx-2 in gt_trajectory_map[box.tracking_name][scene_token][box.tracking_id]:
residual_a = residual_vel - (gt_trajectory_map[box.tracking_name][scene_token][box.tracking_id][t_idx-1][3:7] - gt_trajectory_map[box.tracking_name][scene_token][box.tracking_id][t_idx-2][3:7])
box_data[11:15] = residual_a
gt_trajectory_map[box.tracking_name][scene_token][box.tracking_id][t_idx] = box_data
# back fill
if gt_trajectory_map[box.tracking_name][scene_token][box.tracking_id][t_idx-1][11] == 0:
gt_trajectory_map[box.tracking_name][scene_token][box.tracking_id][t_idx-1][11:15] = residual_a
if gt_trajectory_map[box.tracking_name][scene_token][box.tracking_id][t_idx-2][11] == 0:
gt_trajectory_map[box.tracking_name][scene_token][box.tracking_id][t_idx-2][11:15] = residual_a
#print(det_data)
gt_box_data[box.tracking_name].append(box_data)
gt_box_data = {tracking_name: np.stack(gt_box_data[tracking_name], axis=0) for tracking_name in NUSCENES_TRACKING_NAMES}
mean = {tracking_name: np.mean(gt_box_data[tracking_name], axis=0) for tracking_name in NUSCENES_TRACKING_NAMES}
std = {tracking_name: np.std(gt_box_data[tracking_name], axis=0) for tracking_name in NUSCENES_TRACKING_NAMES}
var = {tracking_name: np.var(gt_box_data[tracking_name], axis=0) for tracking_name in NUSCENES_TRACKING_NAMES}
return mean, std, var
|
[
"def",
"get_mean",
"(",
"tracks",
")",
":",
"print",
"(",
"'len(tracks.keys()): '",
",",
"len",
"(",
"tracks",
".",
"keys",
"(",
")",
")",
")",
"# gt_trajectory_map to compute residual or velocity",
"# tracking_name: {scene_token -> {tracking_id: {t_idx -> det_data}}",
"# [h, w, l, x, y, z, yaw] #x_dot, y_dot, z_dot, yaw_dot]",
"gt_trajectory_map",
"=",
"{",
"tracking_name",
":",
"{",
"scene_token",
":",
"{",
"}",
"for",
"scene_token",
"in",
"tracks",
".",
"keys",
"(",
")",
"}",
"for",
"tracking_name",
"in",
"NUSCENES_TRACKING_NAMES",
"}",
"# store every detection data to compute mean and variance",
"gt_box_data",
"=",
"{",
"tracking_name",
":",
"[",
"]",
"for",
"tracking_name",
"in",
"NUSCENES_TRACKING_NAMES",
"}",
"for",
"scene_token",
"in",
"tracks",
".",
"keys",
"(",
")",
":",
"#print('scene_token: ', scene_token)",
"#print('tracks[scene_token].keys(): ', tracks[scene_token].keys())",
"for",
"t_idx",
"in",
"range",
"(",
"len",
"(",
"tracks",
"[",
"scene_token",
"]",
".",
"keys",
"(",
")",
")",
")",
":",
"#print('t_idx: ', t_idx)",
"t",
"=",
"sorted",
"(",
"tracks",
"[",
"scene_token",
"]",
".",
"keys",
"(",
")",
")",
"[",
"t_idx",
"]",
"for",
"box_id",
"in",
"range",
"(",
"len",
"(",
"tracks",
"[",
"scene_token",
"]",
"[",
"t",
"]",
")",
")",
":",
"#print('box_id: ', box_id)",
"box",
"=",
"tracks",
"[",
"scene_token",
"]",
"[",
"t",
"]",
"[",
"box_id",
"]",
"#print('box: ', box)",
"if",
"box",
".",
"tracking_name",
"not",
"in",
"NUSCENES_TRACKING_NAMES",
":",
"continue",
"# box: {'sample_token': '6a808b09e5f34d33ba1de76cc8dab423', 'translation': [2131.657, 1108.874, 3.453], 'size': [3.078, 6.558, 2.95], 'rotation': [0.8520240186812739, 0.0, 0.0, 0.5235026949216329], 'velocity': array([-0.01800415, 0.0100023 ]), 'ego_dist': 54.20556415873658, 'num_pts': 4, 'tracking_id': 'cbaabbf2a83a4177b2145ab1317e296e', 'tracking_name': 'truck', 'tracking_score': -1.0}",
"# [h, w, l, x, y, z, ry, ",
"# x_t - x_{t-1}, ..., for [x,y,z,ry]",
"# (x_t - x_{t-1}) - (x_{t-1} - x_{t-2}), ..., for [x,y,z,ry]",
"box_data",
"=",
"np",
".",
"array",
"(",
"[",
"box",
".",
"size",
"[",
"2",
"]",
",",
"box",
".",
"size",
"[",
"0",
"]",
",",
"box",
".",
"size",
"[",
"1",
"]",
",",
"box",
".",
"translation",
"[",
"0",
"]",
",",
"box",
".",
"translation",
"[",
"1",
"]",
",",
"box",
".",
"translation",
"[",
"2",
"]",
",",
"rotation_to_positive_z_angle",
"(",
"box",
".",
"rotation",
")",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
"]",
")",
"if",
"box",
".",
"tracking_id",
"not",
"in",
"gt_trajectory_map",
"[",
"box",
".",
"tracking_name",
"]",
"[",
"scene_token",
"]",
":",
"gt_trajectory_map",
"[",
"box",
".",
"tracking_name",
"]",
"[",
"scene_token",
"]",
"[",
"box",
".",
"tracking_id",
"]",
"=",
"{",
"t_idx",
":",
"box_data",
"}",
"else",
":",
"gt_trajectory_map",
"[",
"box",
".",
"tracking_name",
"]",
"[",
"scene_token",
"]",
"[",
"box",
".",
"tracking_id",
"]",
"[",
"t_idx",
"]",
"=",
"box_data",
"# if we can find the same object in the previous frame, get the velocity",
"if",
"box",
".",
"tracking_id",
"in",
"gt_trajectory_map",
"[",
"box",
".",
"tracking_name",
"]",
"[",
"scene_token",
"]",
"and",
"t_idx",
"-",
"1",
"in",
"gt_trajectory_map",
"[",
"box",
".",
"tracking_name",
"]",
"[",
"scene_token",
"]",
"[",
"box",
".",
"tracking_id",
"]",
":",
"residual_vel",
"=",
"box_data",
"[",
"3",
":",
"7",
"]",
"-",
"gt_trajectory_map",
"[",
"box",
".",
"tracking_name",
"]",
"[",
"scene_token",
"]",
"[",
"box",
".",
"tracking_id",
"]",
"[",
"t_idx",
"-",
"1",
"]",
"[",
"3",
":",
"7",
"]",
"box_data",
"[",
"7",
":",
"11",
"]",
"=",
"residual_vel",
"gt_trajectory_map",
"[",
"box",
".",
"tracking_name",
"]",
"[",
"scene_token",
"]",
"[",
"box",
".",
"tracking_id",
"]",
"[",
"t_idx",
"]",
"=",
"box_data",
"# back fill",
"if",
"gt_trajectory_map",
"[",
"box",
".",
"tracking_name",
"]",
"[",
"scene_token",
"]",
"[",
"box",
".",
"tracking_id",
"]",
"[",
"t_idx",
"-",
"1",
"]",
"[",
"7",
"]",
"==",
"0",
":",
"gt_trajectory_map",
"[",
"box",
".",
"tracking_name",
"]",
"[",
"scene_token",
"]",
"[",
"box",
".",
"tracking_id",
"]",
"[",
"t_idx",
"-",
"1",
"]",
"[",
"7",
":",
"11",
"]",
"=",
"residual_vel",
"# if we can find the same object in the previous two frames, get the acceleration",
"if",
"box",
".",
"tracking_id",
"in",
"gt_trajectory_map",
"[",
"box",
".",
"tracking_name",
"]",
"[",
"scene_token",
"]",
"and",
"t_idx",
"-",
"2",
"in",
"gt_trajectory_map",
"[",
"box",
".",
"tracking_name",
"]",
"[",
"scene_token",
"]",
"[",
"box",
".",
"tracking_id",
"]",
":",
"residual_a",
"=",
"residual_vel",
"-",
"(",
"gt_trajectory_map",
"[",
"box",
".",
"tracking_name",
"]",
"[",
"scene_token",
"]",
"[",
"box",
".",
"tracking_id",
"]",
"[",
"t_idx",
"-",
"1",
"]",
"[",
"3",
":",
"7",
"]",
"-",
"gt_trajectory_map",
"[",
"box",
".",
"tracking_name",
"]",
"[",
"scene_token",
"]",
"[",
"box",
".",
"tracking_id",
"]",
"[",
"t_idx",
"-",
"2",
"]",
"[",
"3",
":",
"7",
"]",
")",
"box_data",
"[",
"11",
":",
"15",
"]",
"=",
"residual_a",
"gt_trajectory_map",
"[",
"box",
".",
"tracking_name",
"]",
"[",
"scene_token",
"]",
"[",
"box",
".",
"tracking_id",
"]",
"[",
"t_idx",
"]",
"=",
"box_data",
"# back fill",
"if",
"gt_trajectory_map",
"[",
"box",
".",
"tracking_name",
"]",
"[",
"scene_token",
"]",
"[",
"box",
".",
"tracking_id",
"]",
"[",
"t_idx",
"-",
"1",
"]",
"[",
"11",
"]",
"==",
"0",
":",
"gt_trajectory_map",
"[",
"box",
".",
"tracking_name",
"]",
"[",
"scene_token",
"]",
"[",
"box",
".",
"tracking_id",
"]",
"[",
"t_idx",
"-",
"1",
"]",
"[",
"11",
":",
"15",
"]",
"=",
"residual_a",
"if",
"gt_trajectory_map",
"[",
"box",
".",
"tracking_name",
"]",
"[",
"scene_token",
"]",
"[",
"box",
".",
"tracking_id",
"]",
"[",
"t_idx",
"-",
"2",
"]",
"[",
"11",
"]",
"==",
"0",
":",
"gt_trajectory_map",
"[",
"box",
".",
"tracking_name",
"]",
"[",
"scene_token",
"]",
"[",
"box",
".",
"tracking_id",
"]",
"[",
"t_idx",
"-",
"2",
"]",
"[",
"11",
":",
"15",
"]",
"=",
"residual_a",
"#print(det_data)",
"gt_box_data",
"[",
"box",
".",
"tracking_name",
"]",
".",
"append",
"(",
"box_data",
")",
"gt_box_data",
"=",
"{",
"tracking_name",
":",
"np",
".",
"stack",
"(",
"gt_box_data",
"[",
"tracking_name",
"]",
",",
"axis",
"=",
"0",
")",
"for",
"tracking_name",
"in",
"NUSCENES_TRACKING_NAMES",
"}",
"mean",
"=",
"{",
"tracking_name",
":",
"np",
".",
"mean",
"(",
"gt_box_data",
"[",
"tracking_name",
"]",
",",
"axis",
"=",
"0",
")",
"for",
"tracking_name",
"in",
"NUSCENES_TRACKING_NAMES",
"}",
"std",
"=",
"{",
"tracking_name",
":",
"np",
".",
"std",
"(",
"gt_box_data",
"[",
"tracking_name",
"]",
",",
"axis",
"=",
"0",
")",
"for",
"tracking_name",
"in",
"NUSCENES_TRACKING_NAMES",
"}",
"var",
"=",
"{",
"tracking_name",
":",
"np",
".",
"var",
"(",
"gt_box_data",
"[",
"tracking_name",
"]",
",",
"axis",
"=",
"0",
")",
"for",
"tracking_name",
"in",
"NUSCENES_TRACKING_NAMES",
"}",
"return",
"mean",
",",
"std",
",",
"var"
] |
https://github.com/eddyhkchiu/mahalanobis_3d_multi_object_tracking/blob/25c098a1e56744e65b9cbcfde94bff2b296a645b/get_nuscenes_stats.py#L35-L110
|
|
F8LEFT/DecLLVM
|
d38e45e3d0dd35634adae1d0cf7f96f3bd96e74c
|
python/idaapi.py
|
python
|
cinsn_t.__le__
|
(self, *args)
|
return _idaapi.cinsn_t___le__(self, *args)
|
__le__(self, r) -> bool
|
__le__(self, r) -> bool
|
[
"__le__",
"(",
"self",
"r",
")",
"-",
">",
"bool"
] |
def __le__(self, *args):
"""
__le__(self, r) -> bool
"""
return _idaapi.cinsn_t___le__(self, *args)
|
[
"def",
"__le__",
"(",
"self",
",",
"*",
"args",
")",
":",
"return",
"_idaapi",
".",
"cinsn_t___le__",
"(",
"self",
",",
"*",
"args",
")"
] |
https://github.com/F8LEFT/DecLLVM/blob/d38e45e3d0dd35634adae1d0cf7f96f3bd96e74c/python/idaapi.py#L38292-L38296
|
|
Walleclipse/ChineseAddress_OCR
|
ca7929c72cbac09c71501f06bf16c387f42f00cf
|
densenet/model.py
|
python
|
predict
|
(img)
|
return out
|
img_array = np.array(img.convert('1'))
boundary_array = np.concatenate((img_array[0, :], img_array[:, width - 1], img_array[31, :], img_array[:, 0]), axis=0)
if np.median(boundary_array) == 0: # 将黑底白字转换为白底黑字
img = ImageOps.invert(img)
|
img_array = np.array(img.convert('1'))
boundary_array = np.concatenate((img_array[0, :], img_array[:, width - 1], img_array[31, :], img_array[:, 0]), axis=0)
if np.median(boundary_array) == 0: # 将黑底白字转换为白底黑字
img = ImageOps.invert(img)
|
[
"img_array",
"=",
"np",
".",
"array",
"(",
"img",
".",
"convert",
"(",
"1",
"))",
"boundary_array",
"=",
"np",
".",
"concatenate",
"((",
"img_array",
"[",
"0",
":",
"]",
"img_array",
"[",
":",
"width",
"-",
"1",
"]",
"img_array",
"[",
"31",
":",
"]",
"img_array",
"[",
":",
"0",
"]",
")",
"axis",
"=",
"0",
")",
"if",
"np",
".",
"median",
"(",
"boundary_array",
")",
"==",
"0",
":",
"#",
"将黑底白字转换为白底黑字",
"img",
"=",
"ImageOps",
".",
"invert",
"(",
"img",
")"
] |
def predict(img):
width, height = img.size[0], img.size[1]
scale = height * 1.0 / 32
width = int(width / scale)
img = img.resize([width, 32], Image.ANTIALIAS)
'''
img_array = np.array(img.convert('1'))
boundary_array = np.concatenate((img_array[0, :], img_array[:, width - 1], img_array[31, :], img_array[:, 0]), axis=0)
if np.median(boundary_array) == 0: # 将黑底白字转换为白底黑字
img = ImageOps.invert(img)
'''
img = np.array(img).astype(np.float32) / 255.0 - 0.5
X = img.reshape([1, 32, width, 1])
y_pred = basemodel.predict(X)
y_pred = y_pred[:, :, :]
# out = K.get_value(K.ctc_decode(y_pred, input_length=np.ones(y_pred.shape[0]) * y_pred.shape[1])[0][0])[:, :]
# out = u''.join([characters[x] for x in out[0]])
out = decode(y_pred)
return out
|
[
"def",
"predict",
"(",
"img",
")",
":",
"width",
",",
"height",
"=",
"img",
".",
"size",
"[",
"0",
"]",
",",
"img",
".",
"size",
"[",
"1",
"]",
"scale",
"=",
"height",
"*",
"1.0",
"/",
"32",
"width",
"=",
"int",
"(",
"width",
"/",
"scale",
")",
"img",
"=",
"img",
".",
"resize",
"(",
"[",
"width",
",",
"32",
"]",
",",
"Image",
".",
"ANTIALIAS",
")",
"img",
"=",
"np",
".",
"array",
"(",
"img",
")",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"/",
"255.0",
"-",
"0.5",
"X",
"=",
"img",
".",
"reshape",
"(",
"[",
"1",
",",
"32",
",",
"width",
",",
"1",
"]",
")",
"y_pred",
"=",
"basemodel",
".",
"predict",
"(",
"X",
")",
"y_pred",
"=",
"y_pred",
"[",
":",
",",
":",
",",
":",
"]",
"# out = K.get_value(K.ctc_decode(y_pred, input_length=np.ones(y_pred.shape[0]) * y_pred.shape[1])[0][0])[:, :]",
"# out = u''.join([characters[x] for x in out[0]])",
"out",
"=",
"decode",
"(",
"y_pred",
")",
"return",
"out"
] |
https://github.com/Walleclipse/ChineseAddress_OCR/blob/ca7929c72cbac09c71501f06bf16c387f42f00cf/densenet/model.py#L47-L72
|
|
jrzaurin/pytorch-widedeep
|
8b4c3a8acbf06b385c821d7111b1139a16b4f480
|
pytorch_widedeep/preprocessing/wide_preprocessor.py
|
python
|
WidePreprocessor.fit
|
(self, df: pd.DataFrame)
|
return self
|
r"""Fits the Preprocessor and creates required attributes
|
r"""Fits the Preprocessor and creates required attributes
|
[
"r",
"Fits",
"the",
"Preprocessor",
"and",
"creates",
"required",
"attributes"
] |
def fit(self, df: pd.DataFrame) -> BasePreprocessor:
r"""Fits the Preprocessor and creates required attributes"""
df_wide = self._prepare_wide(df)
self.wide_crossed_cols = df_wide.columns.tolist()
glob_feature_list = self._make_global_feature_list(
df_wide[self.wide_crossed_cols]
)
# leave 0 for padding/"unseen" categories
self.encoding_dict = {v: i + 1 for i, v in enumerate(glob_feature_list)}
self.wide_dim = len(self.encoding_dict)
self.inverse_encoding_dict = {k: v for v, k in self.encoding_dict.items()}
self.inverse_encoding_dict[0] = "unseen"
return self
|
[
"def",
"fit",
"(",
"self",
",",
"df",
":",
"pd",
".",
"DataFrame",
")",
"->",
"BasePreprocessor",
":",
"df_wide",
"=",
"self",
".",
"_prepare_wide",
"(",
"df",
")",
"self",
".",
"wide_crossed_cols",
"=",
"df_wide",
".",
"columns",
".",
"tolist",
"(",
")",
"glob_feature_list",
"=",
"self",
".",
"_make_global_feature_list",
"(",
"df_wide",
"[",
"self",
".",
"wide_crossed_cols",
"]",
")",
"# leave 0 for padding/\"unseen\" categories",
"self",
".",
"encoding_dict",
"=",
"{",
"v",
":",
"i",
"+",
"1",
"for",
"i",
",",
"v",
"in",
"enumerate",
"(",
"glob_feature_list",
")",
"}",
"self",
".",
"wide_dim",
"=",
"len",
"(",
"self",
".",
"encoding_dict",
")",
"self",
".",
"inverse_encoding_dict",
"=",
"{",
"k",
":",
"v",
"for",
"v",
",",
"k",
"in",
"self",
".",
"encoding_dict",
".",
"items",
"(",
")",
"}",
"self",
".",
"inverse_encoding_dict",
"[",
"0",
"]",
"=",
"\"unseen\"",
"return",
"self"
] |
https://github.com/jrzaurin/pytorch-widedeep/blob/8b4c3a8acbf06b385c821d7111b1139a16b4f480/pytorch_widedeep/preprocessing/wide_preprocessor.py#L69-L81
|
|
scikit-learn-contrib/category_encoders
|
55636b5ae11dc45075a0c248028f17f9df93bbb9
|
category_encoders/utils.py
|
python
|
get_obj_cols
|
(df)
|
return obj_cols
|
Returns names of 'object' columns in the DataFrame.
|
Returns names of 'object' columns in the DataFrame.
|
[
"Returns",
"names",
"of",
"object",
"columns",
"in",
"the",
"DataFrame",
"."
] |
def get_obj_cols(df):
"""
Returns names of 'object' columns in the DataFrame.
"""
obj_cols = []
for idx, dt in enumerate(df.dtypes):
if dt == 'object' or is_category(dt):
obj_cols.append(df.columns.values[idx])
return obj_cols
|
[
"def",
"get_obj_cols",
"(",
"df",
")",
":",
"obj_cols",
"=",
"[",
"]",
"for",
"idx",
",",
"dt",
"in",
"enumerate",
"(",
"df",
".",
"dtypes",
")",
":",
"if",
"dt",
"==",
"'object'",
"or",
"is_category",
"(",
"dt",
")",
":",
"obj_cols",
".",
"append",
"(",
"df",
".",
"columns",
".",
"values",
"[",
"idx",
"]",
")",
"return",
"obj_cols"
] |
https://github.com/scikit-learn-contrib/category_encoders/blob/55636b5ae11dc45075a0c248028f17f9df93bbb9/category_encoders/utils.py#L27-L36
|
|
hubblestack/hubble
|
763142474edcecdec5fd25591dc29c3536e8f969
|
hubblestack/audit/readfile.py
|
python
|
_check_pattern
|
(line, pattern, ignore_pattern)
|
return keep
|
Check a given line against both a pattern and an ignore_pattern and return
True or False based on whether that line should be used.
|
Check a given line against both a pattern and an ignore_pattern and return
True or False based on whether that line should be used.
|
[
"Check",
"a",
"given",
"line",
"against",
"both",
"a",
"pattern",
"and",
"an",
"ignore_pattern",
"and",
"return",
"True",
"or",
"False",
"based",
"on",
"whether",
"that",
"line",
"should",
"be",
"used",
"."
] |
def _check_pattern(line, pattern, ignore_pattern):
"""
Check a given line against both a pattern and an ignore_pattern and return
True or False based on whether that line should be used.
"""
keep = False
if pattern is None:
keep = True
elif re.match(pattern, line):
keep = True
if ignore_pattern is not None and re.match(ignore_pattern, line):
keep = False
return keep
|
[
"def",
"_check_pattern",
"(",
"line",
",",
"pattern",
",",
"ignore_pattern",
")",
":",
"keep",
"=",
"False",
"if",
"pattern",
"is",
"None",
":",
"keep",
"=",
"True",
"elif",
"re",
".",
"match",
"(",
"pattern",
",",
"line",
")",
":",
"keep",
"=",
"True",
"if",
"ignore_pattern",
"is",
"not",
"None",
"and",
"re",
".",
"match",
"(",
"ignore_pattern",
",",
"line",
")",
":",
"keep",
"=",
"False",
"return",
"keep"
] |
https://github.com/hubblestack/hubble/blob/763142474edcecdec5fd25591dc29c3536e8f969/hubblestack/audit/readfile.py#L556-L571
|
|
AI4Finance-Foundation/ElegantRL
|
74103d9cc4ce9c573f83bc42d9129ff15b9ff018
|
elegantrl/agents/net.py
|
python
|
ActorPPO.forward
|
(self, state)
|
return self.net(state).tanh()
|
The forward function.
:param state: [tensor] the input state.
:return: the output tensor.
|
The forward function.
:param state: [tensor] the input state.
:return: the output tensor.
|
[
"The",
"forward",
"function",
".",
":",
"param",
"state",
":",
"[",
"tensor",
"]",
"the",
"input",
"state",
".",
":",
"return",
":",
"the",
"output",
"tensor",
"."
] |
def forward(self, state):
"""
The forward function.
:param state: [tensor] the input state.
:return: the output tensor.
"""
return self.net(state).tanh()
|
[
"def",
"forward",
"(",
"self",
",",
"state",
")",
":",
"return",
"self",
".",
"net",
"(",
"state",
")",
".",
"tanh",
"(",
")"
] |
https://github.com/AI4Finance-Foundation/ElegantRL/blob/74103d9cc4ce9c573f83bc42d9129ff15b9ff018/elegantrl/agents/net.py#L318-L325
|
|
CheckPointSW/Karta
|
b845928487b50a5b41acd532ae0399177a4356aa
|
src/thumbs_up/utils/code_regions.py
|
python
|
CodeRegions.insert
|
(self, region)
|
Insert the given region at it's suitable (sorted) place.
Args:
region (CodeRegion): new code region to be inserted
|
Insert the given region at it's suitable (sorted) place.
|
[
"Insert",
"the",
"given",
"region",
"at",
"it",
"s",
"suitable",
"(",
"sorted",
")",
"place",
"."
] |
def insert(self, region):
"""Insert the given region at it's suitable (sorted) place.
Args:
region (CodeRegion): new code region to be inserted
"""
# Check if we are the first (the easy case)
if len(self._regions) == 0:
# Insert the element
self._regions.append(region)
return
# Check if we can merge them together
prev_region = self._regions[-1]
if prev_region.end == region.start and prev_region.code_type == region.code_type:
prev_region.end = region.end
# Otherwise, insert and link the region
else:
prev_region.link(region)
self._regions.append(region)
|
[
"def",
"insert",
"(",
"self",
",",
"region",
")",
":",
"# Check if we are the first (the easy case)",
"if",
"len",
"(",
"self",
".",
"_regions",
")",
"==",
"0",
":",
"# Insert the element",
"self",
".",
"_regions",
".",
"append",
"(",
"region",
")",
"return",
"# Check if we can merge them together",
"prev_region",
"=",
"self",
".",
"_regions",
"[",
"-",
"1",
"]",
"if",
"prev_region",
".",
"end",
"==",
"region",
".",
"start",
"and",
"prev_region",
".",
"code_type",
"==",
"region",
".",
"code_type",
":",
"prev_region",
".",
"end",
"=",
"region",
".",
"end",
"# Otherwise, insert and link the region",
"else",
":",
"prev_region",
".",
"link",
"(",
"region",
")",
"self",
".",
"_regions",
".",
"append",
"(",
"region",
")"
] |
https://github.com/CheckPointSW/Karta/blob/b845928487b50a5b41acd532ae0399177a4356aa/src/thumbs_up/utils/code_regions.py#L78-L96
|
||
haiwen/seahub
|
e92fcd44e3e46260597d8faa9347cb8222b8b10d
|
seahub/utils/__init__.py
|
python
|
gen_file_share_link
|
(token)
|
return gen_shared_link(token, 'f')
|
Generate file share link.
|
Generate file share link.
|
[
"Generate",
"file",
"share",
"link",
"."
] |
def gen_file_share_link(token):
"""Generate file share link.
"""
return gen_shared_link(token, 'f')
|
[
"def",
"gen_file_share_link",
"(",
"token",
")",
":",
"return",
"gen_shared_link",
"(",
"token",
",",
"'f'",
")"
] |
https://github.com/haiwen/seahub/blob/e92fcd44e3e46260597d8faa9347cb8222b8b10d/seahub/utils/__init__.py#L961-L964
|
|
inkandswitch/livebook
|
93c8d467734787366ad084fc3566bf5cbe249c51
|
public/pypyjs/modules/email/_parseaddr.py
|
python
|
AddrlistClass.getdomainliteral
|
(self)
|
return '[%s]' % self.getdelimited('[', ']\r', False)
|
Parse an RFC 2822 domain-literal.
|
Parse an RFC 2822 domain-literal.
|
[
"Parse",
"an",
"RFC",
"2822",
"domain",
"-",
"literal",
"."
] |
def getdomainliteral(self):
"""Parse an RFC 2822 domain-literal."""
return '[%s]' % self.getdelimited('[', ']\r', False)
|
[
"def",
"getdomainliteral",
"(",
"self",
")",
":",
"return",
"'[%s]'",
"%",
"self",
".",
"getdelimited",
"(",
"'['",
",",
"']\\r'",
",",
"False",
")"
] |
https://github.com/inkandswitch/livebook/blob/93c8d467734787366ad084fc3566bf5cbe249c51/public/pypyjs/modules/email/_parseaddr.py#L405-L407
|
|
phage-nz/ph0neutria
|
865aae37d8503d3f580f6762aa67f65958355ba7
|
core/malware_utils.py
|
python
|
get_plugin_malware_hosts
|
()
|
return host_list
|
Produce a list of malware hosts.
Returns:
- host_list: (type: MalwareHost list) list of malware hosts.
|
Produce a list of malware hosts.
|
[
"Produce",
"a",
"list",
"of",
"malware",
"hosts",
"."
] |
def get_plugin_malware_hosts():
"""Produce a list of malware hosts.
Returns:
- host_list: (type: MalwareHost list) list of malware hosts.
"""
host_list = []
LOGGING.info('Loading malware host plugins...')
for plugin in load_plugins():
try:
if 'malware-host' in plugin.TYPES and plugin.DISABLED == False:
LOGGING.info('Running plugin: {0}'.format(plugin.NAME))
plugin_list = plugin.get_malwarehost_list()
if len(plugin_list) > 0:
host_list.extend(plugin_list)
except Exception as e:
LOGGING.error('Problem running plugin. Aborting task.')
LOGGING.exception(sys.exc_info())
LOGGING.exception(type(e))
LOGGING.exception(e.args)
LOGGING.exception(e)
return host_list
|
[
"def",
"get_plugin_malware_hosts",
"(",
")",
":",
"host_list",
"=",
"[",
"]",
"LOGGING",
".",
"info",
"(",
"'Loading malware host plugins...'",
")",
"for",
"plugin",
"in",
"load_plugins",
"(",
")",
":",
"try",
":",
"if",
"'malware-host'",
"in",
"plugin",
".",
"TYPES",
"and",
"plugin",
".",
"DISABLED",
"==",
"False",
":",
"LOGGING",
".",
"info",
"(",
"'Running plugin: {0}'",
".",
"format",
"(",
"plugin",
".",
"NAME",
")",
")",
"plugin_list",
"=",
"plugin",
".",
"get_malwarehost_list",
"(",
")",
"if",
"len",
"(",
"plugin_list",
")",
">",
"0",
":",
"host_list",
".",
"extend",
"(",
"plugin_list",
")",
"except",
"Exception",
"as",
"e",
":",
"LOGGING",
".",
"error",
"(",
"'Problem running plugin. Aborting task.'",
")",
"LOGGING",
".",
"exception",
"(",
"sys",
".",
"exc_info",
"(",
")",
")",
"LOGGING",
".",
"exception",
"(",
"type",
"(",
"e",
")",
")",
"LOGGING",
".",
"exception",
"(",
"e",
".",
"args",
")",
"LOGGING",
".",
"exception",
"(",
"e",
")",
"return",
"host_list"
] |
https://github.com/phage-nz/ph0neutria/blob/865aae37d8503d3f580f6762aa67f65958355ba7/core/malware_utils.py#L51-L77
|
|
cloudera/hue
|
23f02102d4547c17c32bd5ea0eb24e9eadd657a4
|
desktop/core/ext-py/Django-1.11.29/django/contrib/sites/models.py
|
python
|
SiteManager.get_current
|
(self, request=None)
|
Returns the current Site based on the SITE_ID in the project's settings.
If SITE_ID isn't defined, it returns the site with domain matching
request.get_host(). The ``Site`` object is cached the first time it's
retrieved from the database.
|
Returns the current Site based on the SITE_ID in the project's settings.
If SITE_ID isn't defined, it returns the site with domain matching
request.get_host(). The ``Site`` object is cached the first time it's
retrieved from the database.
|
[
"Returns",
"the",
"current",
"Site",
"based",
"on",
"the",
"SITE_ID",
"in",
"the",
"project",
"s",
"settings",
".",
"If",
"SITE_ID",
"isn",
"t",
"defined",
"it",
"returns",
"the",
"site",
"with",
"domain",
"matching",
"request",
".",
"get_host",
"()",
".",
"The",
"Site",
"object",
"is",
"cached",
"the",
"first",
"time",
"it",
"s",
"retrieved",
"from",
"the",
"database",
"."
] |
def get_current(self, request=None):
"""
Returns the current Site based on the SITE_ID in the project's settings.
If SITE_ID isn't defined, it returns the site with domain matching
request.get_host(). The ``Site`` object is cached the first time it's
retrieved from the database.
"""
from django.conf import settings
if getattr(settings, 'SITE_ID', ''):
site_id = settings.SITE_ID
return self._get_site_by_id(site_id)
elif request:
return self._get_site_by_request(request)
raise ImproperlyConfigured(
"You're using the Django \"sites framework\" without having "
"set the SITE_ID setting. Create a site in your database and "
"set the SITE_ID setting or pass a request to "
"Site.objects.get_current() to fix this error."
)
|
[
"def",
"get_current",
"(",
"self",
",",
"request",
"=",
"None",
")",
":",
"from",
"django",
".",
"conf",
"import",
"settings",
"if",
"getattr",
"(",
"settings",
",",
"'SITE_ID'",
",",
"''",
")",
":",
"site_id",
"=",
"settings",
".",
"SITE_ID",
"return",
"self",
".",
"_get_site_by_id",
"(",
"site_id",
")",
"elif",
"request",
":",
"return",
"self",
".",
"_get_site_by_request",
"(",
"request",
")",
"raise",
"ImproperlyConfigured",
"(",
"\"You're using the Django \\\"sites framework\\\" without having \"",
"\"set the SITE_ID setting. Create a site in your database and \"",
"\"set the SITE_ID setting or pass a request to \"",
"\"Site.objects.get_current() to fix this error.\"",
")"
] |
https://github.com/cloudera/hue/blob/23f02102d4547c17c32bd5ea0eb24e9eadd657a4/desktop/core/ext-py/Django-1.11.29/django/contrib/sites/models.py#L53-L72
|
||
STIXProject/python-stix
|
5ab382b1a3c19364fb8c3e5219addab9e3b64ee9
|
stix/exploit_target/weakness.py
|
python
|
Weakness.add_description
|
(self, description)
|
Adds a description to the ``descriptions`` collection.
This is the same as calling "foo.descriptions.add(bar)".
|
Adds a description to the ``descriptions`` collection.
|
[
"Adds",
"a",
"description",
"to",
"the",
"descriptions",
"collection",
"."
] |
def add_description(self, description):
"""Adds a description to the ``descriptions`` collection.
This is the same as calling "foo.descriptions.add(bar)".
"""
self.descriptions.add(description)
|
[
"def",
"add_description",
"(",
"self",
",",
"description",
")",
":",
"self",
".",
"descriptions",
".",
"add",
"(",
"description",
")"
] |
https://github.com/STIXProject/python-stix/blob/5ab382b1a3c19364fb8c3e5219addab9e3b64ee9/stix/exploit_target/weakness.py#L48-L53
|
||
sympy/sympy
|
d822fcba181155b85ff2b29fe525adbafb22b448
|
sympy/physics/quantum/operatorordering.py
|
python
|
_normal_ordered_form_factor
|
(product, independent=False, recursive_limit=10,
_recursive_depth=0)
|
Helper function for normal_ordered_form_factor: Write multiplication
expression with bosonic or fermionic operators on normally ordered form,
using the bosonic and fermionic commutation relations. The resulting
operator expression is equivalent to the argument, but will in general be
a sum of operator products instead of a simple product.
|
Helper function for normal_ordered_form_factor: Write multiplication
expression with bosonic or fermionic operators on normally ordered form,
using the bosonic and fermionic commutation relations. The resulting
operator expression is equivalent to the argument, but will in general be
a sum of operator products instead of a simple product.
|
[
"Helper",
"function",
"for",
"normal_ordered_form_factor",
":",
"Write",
"multiplication",
"expression",
"with",
"bosonic",
"or",
"fermionic",
"operators",
"on",
"normally",
"ordered",
"form",
"using",
"the",
"bosonic",
"and",
"fermionic",
"commutation",
"relations",
".",
"The",
"resulting",
"operator",
"expression",
"is",
"equivalent",
"to",
"the",
"argument",
"but",
"will",
"in",
"general",
"be",
"a",
"sum",
"of",
"operator",
"products",
"instead",
"of",
"a",
"simple",
"product",
"."
] |
def _normal_ordered_form_factor(product, independent=False, recursive_limit=10,
_recursive_depth=0):
"""
Helper function for normal_ordered_form_factor: Write multiplication
expression with bosonic or fermionic operators on normally ordered form,
using the bosonic and fermionic commutation relations. The resulting
operator expression is equivalent to the argument, but will in general be
a sum of operator products instead of a simple product.
"""
factors = _expand_powers(product)
new_factors = []
n = 0
while n < len(factors) - 1:
if isinstance(factors[n], BosonOp):
# boson
if not isinstance(factors[n + 1], BosonOp):
new_factors.append(factors[n])
elif factors[n].is_annihilation == factors[n + 1].is_annihilation:
if (independent and
str(factors[n].name) > str(factors[n + 1].name)):
new_factors.append(factors[n + 1])
new_factors.append(factors[n])
n += 1
else:
new_factors.append(factors[n])
elif not factors[n].is_annihilation:
new_factors.append(factors[n])
else:
if factors[n + 1].is_annihilation:
new_factors.append(factors[n])
else:
if factors[n].args[0] != factors[n + 1].args[0]:
if independent:
c = 0
else:
c = Commutator(factors[n], factors[n + 1])
new_factors.append(factors[n + 1] * factors[n] + c)
else:
c = Commutator(factors[n], factors[n + 1])
new_factors.append(
factors[n + 1] * factors[n] + c.doit())
n += 1
elif isinstance(factors[n], FermionOp):
# fermion
if not isinstance(factors[n + 1], FermionOp):
new_factors.append(factors[n])
elif factors[n].is_annihilation == factors[n + 1].is_annihilation:
if (independent and
str(factors[n].name) > str(factors[n + 1].name)):
new_factors.append(factors[n + 1])
new_factors.append(factors[n])
n += 1
else:
new_factors.append(factors[n])
elif not factors[n].is_annihilation:
new_factors.append(factors[n])
else:
if factors[n + 1].is_annihilation:
new_factors.append(factors[n])
else:
if factors[n].args[0] != factors[n + 1].args[0]:
if independent:
c = 0
else:
c = AntiCommutator(factors[n], factors[n + 1])
new_factors.append(-factors[n + 1] * factors[n] + c)
else:
c = AntiCommutator(factors[n], factors[n + 1])
new_factors.append(
-factors[n + 1] * factors[n] + c.doit())
n += 1
elif isinstance(factors[n], Operator):
if isinstance(factors[n + 1], (BosonOp, FermionOp)):
new_factors.append(factors[n + 1])
new_factors.append(factors[n])
n += 1
else:
new_factors.append(factors[n])
else:
new_factors.append(factors[n])
n += 1
if n == len(factors) - 1:
new_factors.append(factors[-1])
if new_factors == factors:
return product
else:
expr = Mul(*new_factors).expand()
return normal_ordered_form(expr,
recursive_limit=recursive_limit,
_recursive_depth=_recursive_depth + 1,
independent=independent)
|
[
"def",
"_normal_ordered_form_factor",
"(",
"product",
",",
"independent",
"=",
"False",
",",
"recursive_limit",
"=",
"10",
",",
"_recursive_depth",
"=",
"0",
")",
":",
"factors",
"=",
"_expand_powers",
"(",
"product",
")",
"new_factors",
"=",
"[",
"]",
"n",
"=",
"0",
"while",
"n",
"<",
"len",
"(",
"factors",
")",
"-",
"1",
":",
"if",
"isinstance",
"(",
"factors",
"[",
"n",
"]",
",",
"BosonOp",
")",
":",
"# boson",
"if",
"not",
"isinstance",
"(",
"factors",
"[",
"n",
"+",
"1",
"]",
",",
"BosonOp",
")",
":",
"new_factors",
".",
"append",
"(",
"factors",
"[",
"n",
"]",
")",
"elif",
"factors",
"[",
"n",
"]",
".",
"is_annihilation",
"==",
"factors",
"[",
"n",
"+",
"1",
"]",
".",
"is_annihilation",
":",
"if",
"(",
"independent",
"and",
"str",
"(",
"factors",
"[",
"n",
"]",
".",
"name",
")",
">",
"str",
"(",
"factors",
"[",
"n",
"+",
"1",
"]",
".",
"name",
")",
")",
":",
"new_factors",
".",
"append",
"(",
"factors",
"[",
"n",
"+",
"1",
"]",
")",
"new_factors",
".",
"append",
"(",
"factors",
"[",
"n",
"]",
")",
"n",
"+=",
"1",
"else",
":",
"new_factors",
".",
"append",
"(",
"factors",
"[",
"n",
"]",
")",
"elif",
"not",
"factors",
"[",
"n",
"]",
".",
"is_annihilation",
":",
"new_factors",
".",
"append",
"(",
"factors",
"[",
"n",
"]",
")",
"else",
":",
"if",
"factors",
"[",
"n",
"+",
"1",
"]",
".",
"is_annihilation",
":",
"new_factors",
".",
"append",
"(",
"factors",
"[",
"n",
"]",
")",
"else",
":",
"if",
"factors",
"[",
"n",
"]",
".",
"args",
"[",
"0",
"]",
"!=",
"factors",
"[",
"n",
"+",
"1",
"]",
".",
"args",
"[",
"0",
"]",
":",
"if",
"independent",
":",
"c",
"=",
"0",
"else",
":",
"c",
"=",
"Commutator",
"(",
"factors",
"[",
"n",
"]",
",",
"factors",
"[",
"n",
"+",
"1",
"]",
")",
"new_factors",
".",
"append",
"(",
"factors",
"[",
"n",
"+",
"1",
"]",
"*",
"factors",
"[",
"n",
"]",
"+",
"c",
")",
"else",
":",
"c",
"=",
"Commutator",
"(",
"factors",
"[",
"n",
"]",
",",
"factors",
"[",
"n",
"+",
"1",
"]",
")",
"new_factors",
".",
"append",
"(",
"factors",
"[",
"n",
"+",
"1",
"]",
"*",
"factors",
"[",
"n",
"]",
"+",
"c",
".",
"doit",
"(",
")",
")",
"n",
"+=",
"1",
"elif",
"isinstance",
"(",
"factors",
"[",
"n",
"]",
",",
"FermionOp",
")",
":",
"# fermion",
"if",
"not",
"isinstance",
"(",
"factors",
"[",
"n",
"+",
"1",
"]",
",",
"FermionOp",
")",
":",
"new_factors",
".",
"append",
"(",
"factors",
"[",
"n",
"]",
")",
"elif",
"factors",
"[",
"n",
"]",
".",
"is_annihilation",
"==",
"factors",
"[",
"n",
"+",
"1",
"]",
".",
"is_annihilation",
":",
"if",
"(",
"independent",
"and",
"str",
"(",
"factors",
"[",
"n",
"]",
".",
"name",
")",
">",
"str",
"(",
"factors",
"[",
"n",
"+",
"1",
"]",
".",
"name",
")",
")",
":",
"new_factors",
".",
"append",
"(",
"factors",
"[",
"n",
"+",
"1",
"]",
")",
"new_factors",
".",
"append",
"(",
"factors",
"[",
"n",
"]",
")",
"n",
"+=",
"1",
"else",
":",
"new_factors",
".",
"append",
"(",
"factors",
"[",
"n",
"]",
")",
"elif",
"not",
"factors",
"[",
"n",
"]",
".",
"is_annihilation",
":",
"new_factors",
".",
"append",
"(",
"factors",
"[",
"n",
"]",
")",
"else",
":",
"if",
"factors",
"[",
"n",
"+",
"1",
"]",
".",
"is_annihilation",
":",
"new_factors",
".",
"append",
"(",
"factors",
"[",
"n",
"]",
")",
"else",
":",
"if",
"factors",
"[",
"n",
"]",
".",
"args",
"[",
"0",
"]",
"!=",
"factors",
"[",
"n",
"+",
"1",
"]",
".",
"args",
"[",
"0",
"]",
":",
"if",
"independent",
":",
"c",
"=",
"0",
"else",
":",
"c",
"=",
"AntiCommutator",
"(",
"factors",
"[",
"n",
"]",
",",
"factors",
"[",
"n",
"+",
"1",
"]",
")",
"new_factors",
".",
"append",
"(",
"-",
"factors",
"[",
"n",
"+",
"1",
"]",
"*",
"factors",
"[",
"n",
"]",
"+",
"c",
")",
"else",
":",
"c",
"=",
"AntiCommutator",
"(",
"factors",
"[",
"n",
"]",
",",
"factors",
"[",
"n",
"+",
"1",
"]",
")",
"new_factors",
".",
"append",
"(",
"-",
"factors",
"[",
"n",
"+",
"1",
"]",
"*",
"factors",
"[",
"n",
"]",
"+",
"c",
".",
"doit",
"(",
")",
")",
"n",
"+=",
"1",
"elif",
"isinstance",
"(",
"factors",
"[",
"n",
"]",
",",
"Operator",
")",
":",
"if",
"isinstance",
"(",
"factors",
"[",
"n",
"+",
"1",
"]",
",",
"(",
"BosonOp",
",",
"FermionOp",
")",
")",
":",
"new_factors",
".",
"append",
"(",
"factors",
"[",
"n",
"+",
"1",
"]",
")",
"new_factors",
".",
"append",
"(",
"factors",
"[",
"n",
"]",
")",
"n",
"+=",
"1",
"else",
":",
"new_factors",
".",
"append",
"(",
"factors",
"[",
"n",
"]",
")",
"else",
":",
"new_factors",
".",
"append",
"(",
"factors",
"[",
"n",
"]",
")",
"n",
"+=",
"1",
"if",
"n",
"==",
"len",
"(",
"factors",
")",
"-",
"1",
":",
"new_factors",
".",
"append",
"(",
"factors",
"[",
"-",
"1",
"]",
")",
"if",
"new_factors",
"==",
"factors",
":",
"return",
"product",
"else",
":",
"expr",
"=",
"Mul",
"(",
"*",
"new_factors",
")",
".",
"expand",
"(",
")",
"return",
"normal_ordered_form",
"(",
"expr",
",",
"recursive_limit",
"=",
"recursive_limit",
",",
"_recursive_depth",
"=",
"_recursive_depth",
"+",
"1",
",",
"independent",
"=",
"independent",
")"
] |
https://github.com/sympy/sympy/blob/d822fcba181155b85ff2b29fe525adbafb22b448/sympy/physics/quantum/operatorordering.py#L39-L145
|
||
TarrySingh/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials
|
5bb97d7e3ffd913abddb4cfa7d78a1b4c868890e
|
tensorflow_dl_models/research/object_detection/core/preprocessor.py
|
python
|
random_pad_image
|
(image,
boxes,
min_image_size=None,
max_image_size=None,
pad_color=None,
seed=None)
|
return new_image, new_boxes
|
Randomly pads the image.
This function randomly pads the image with zeros. The final size of the
padded image will be between min_image_size and max_image_size.
if min_image_size is smaller than the input image size, min_image_size will
be set to the input image size. The same for max_image_size. The input image
will be located at a uniformly random location inside the padded image.
The relative location of the boxes to the original image will remain the same.
Args:
image: rank 3 float32 tensor containing 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
min_image_size: a tensor of size [min_height, min_width], type tf.int32.
If passed as None, will be set to image size
[height, width].
max_image_size: a tensor of size [max_height, max_width], type tf.int32.
If passed as None, will be set to twice the
image [height * 2, width * 2].
pad_color: padding color. A rank 1 tensor of [3] with dtype=tf.float32.
if set as None, it will be set to average color of the input
image.
seed: random seed.
Returns:
image: Image shape will be [new_height, new_width, channels].
boxes: boxes which is the same rank as input boxes. Boxes are in normalized
form.
|
Randomly pads the image.
|
[
"Randomly",
"pads",
"the",
"image",
"."
] |
def random_pad_image(image,
boxes,
min_image_size=None,
max_image_size=None,
pad_color=None,
seed=None):
"""Randomly pads the image.
This function randomly pads the image with zeros. The final size of the
padded image will be between min_image_size and max_image_size.
if min_image_size is smaller than the input image size, min_image_size will
be set to the input image size. The same for max_image_size. The input image
will be located at a uniformly random location inside the padded image.
The relative location of the boxes to the original image will remain the same.
Args:
image: rank 3 float32 tensor containing 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
min_image_size: a tensor of size [min_height, min_width], type tf.int32.
If passed as None, will be set to image size
[height, width].
max_image_size: a tensor of size [max_height, max_width], type tf.int32.
If passed as None, will be set to twice the
image [height * 2, width * 2].
pad_color: padding color. A rank 1 tensor of [3] with dtype=tf.float32.
if set as None, it will be set to average color of the input
image.
seed: random seed.
Returns:
image: Image shape will be [new_height, new_width, channels].
boxes: boxes which is the same rank as input boxes. Boxes are in normalized
form.
"""
if pad_color is None:
pad_color = tf.reduce_mean(image, axis=[0, 1])
image_shape = tf.shape(image)
image_height = image_shape[0]
image_width = image_shape[1]
if max_image_size is None:
max_image_size = tf.stack([image_height * 2, image_width * 2])
max_image_size = tf.maximum(max_image_size,
tf.stack([image_height, image_width]))
if min_image_size is None:
min_image_size = tf.stack([image_height, image_width])
min_image_size = tf.maximum(min_image_size,
tf.stack([image_height, image_width]))
target_height = tf.cond(
max_image_size[0] > min_image_size[0],
lambda: _random_integer(min_image_size[0], max_image_size[0], seed),
lambda: max_image_size[0])
target_width = tf.cond(
max_image_size[1] > min_image_size[1],
lambda: _random_integer(min_image_size[1], max_image_size[1], seed),
lambda: max_image_size[1])
offset_height = tf.cond(
target_height > image_height,
lambda: _random_integer(0, target_height - image_height, seed),
lambda: tf.constant(0, dtype=tf.int32))
offset_width = tf.cond(
target_width > image_width,
lambda: _random_integer(0, target_width - image_width, seed),
lambda: tf.constant(0, dtype=tf.int32))
new_image = tf.image.pad_to_bounding_box(
image,
offset_height=offset_height,
offset_width=offset_width,
target_height=target_height,
target_width=target_width)
# Setting color of the padded pixels
image_ones = tf.ones_like(image)
image_ones_padded = tf.image.pad_to_bounding_box(
image_ones,
offset_height=offset_height,
offset_width=offset_width,
target_height=target_height,
target_width=target_width)
image_color_padded = (1.0 - image_ones_padded) * pad_color
new_image += image_color_padded
# setting boxes
new_window = tf.to_float(
tf.stack([
-offset_height, -offset_width, target_height - offset_height,
target_width - offset_width
]))
new_window /= tf.to_float(
tf.stack([image_height, image_width, image_height, image_width]))
boxlist = box_list.BoxList(boxes)
new_boxlist = box_list_ops.change_coordinate_frame(boxlist, new_window)
new_boxes = new_boxlist.get()
return new_image, new_boxes
|
[
"def",
"random_pad_image",
"(",
"image",
",",
"boxes",
",",
"min_image_size",
"=",
"None",
",",
"max_image_size",
"=",
"None",
",",
"pad_color",
"=",
"None",
",",
"seed",
"=",
"None",
")",
":",
"if",
"pad_color",
"is",
"None",
":",
"pad_color",
"=",
"tf",
".",
"reduce_mean",
"(",
"image",
",",
"axis",
"=",
"[",
"0",
",",
"1",
"]",
")",
"image_shape",
"=",
"tf",
".",
"shape",
"(",
"image",
")",
"image_height",
"=",
"image_shape",
"[",
"0",
"]",
"image_width",
"=",
"image_shape",
"[",
"1",
"]",
"if",
"max_image_size",
"is",
"None",
":",
"max_image_size",
"=",
"tf",
".",
"stack",
"(",
"[",
"image_height",
"*",
"2",
",",
"image_width",
"*",
"2",
"]",
")",
"max_image_size",
"=",
"tf",
".",
"maximum",
"(",
"max_image_size",
",",
"tf",
".",
"stack",
"(",
"[",
"image_height",
",",
"image_width",
"]",
")",
")",
"if",
"min_image_size",
"is",
"None",
":",
"min_image_size",
"=",
"tf",
".",
"stack",
"(",
"[",
"image_height",
",",
"image_width",
"]",
")",
"min_image_size",
"=",
"tf",
".",
"maximum",
"(",
"min_image_size",
",",
"tf",
".",
"stack",
"(",
"[",
"image_height",
",",
"image_width",
"]",
")",
")",
"target_height",
"=",
"tf",
".",
"cond",
"(",
"max_image_size",
"[",
"0",
"]",
">",
"min_image_size",
"[",
"0",
"]",
",",
"lambda",
":",
"_random_integer",
"(",
"min_image_size",
"[",
"0",
"]",
",",
"max_image_size",
"[",
"0",
"]",
",",
"seed",
")",
",",
"lambda",
":",
"max_image_size",
"[",
"0",
"]",
")",
"target_width",
"=",
"tf",
".",
"cond",
"(",
"max_image_size",
"[",
"1",
"]",
">",
"min_image_size",
"[",
"1",
"]",
",",
"lambda",
":",
"_random_integer",
"(",
"min_image_size",
"[",
"1",
"]",
",",
"max_image_size",
"[",
"1",
"]",
",",
"seed",
")",
",",
"lambda",
":",
"max_image_size",
"[",
"1",
"]",
")",
"offset_height",
"=",
"tf",
".",
"cond",
"(",
"target_height",
">",
"image_height",
",",
"lambda",
":",
"_random_integer",
"(",
"0",
",",
"target_height",
"-",
"image_height",
",",
"seed",
")",
",",
"lambda",
":",
"tf",
".",
"constant",
"(",
"0",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
")",
"offset_width",
"=",
"tf",
".",
"cond",
"(",
"target_width",
">",
"image_width",
",",
"lambda",
":",
"_random_integer",
"(",
"0",
",",
"target_width",
"-",
"image_width",
",",
"seed",
")",
",",
"lambda",
":",
"tf",
".",
"constant",
"(",
"0",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
")",
"new_image",
"=",
"tf",
".",
"image",
".",
"pad_to_bounding_box",
"(",
"image",
",",
"offset_height",
"=",
"offset_height",
",",
"offset_width",
"=",
"offset_width",
",",
"target_height",
"=",
"target_height",
",",
"target_width",
"=",
"target_width",
")",
"# Setting color of the padded pixels",
"image_ones",
"=",
"tf",
".",
"ones_like",
"(",
"image",
")",
"image_ones_padded",
"=",
"tf",
".",
"image",
".",
"pad_to_bounding_box",
"(",
"image_ones",
",",
"offset_height",
"=",
"offset_height",
",",
"offset_width",
"=",
"offset_width",
",",
"target_height",
"=",
"target_height",
",",
"target_width",
"=",
"target_width",
")",
"image_color_padded",
"=",
"(",
"1.0",
"-",
"image_ones_padded",
")",
"*",
"pad_color",
"new_image",
"+=",
"image_color_padded",
"# setting boxes",
"new_window",
"=",
"tf",
".",
"to_float",
"(",
"tf",
".",
"stack",
"(",
"[",
"-",
"offset_height",
",",
"-",
"offset_width",
",",
"target_height",
"-",
"offset_height",
",",
"target_width",
"-",
"offset_width",
"]",
")",
")",
"new_window",
"/=",
"tf",
".",
"to_float",
"(",
"tf",
".",
"stack",
"(",
"[",
"image_height",
",",
"image_width",
",",
"image_height",
",",
"image_width",
"]",
")",
")",
"boxlist",
"=",
"box_list",
".",
"BoxList",
"(",
"boxes",
")",
"new_boxlist",
"=",
"box_list_ops",
".",
"change_coordinate_frame",
"(",
"boxlist",
",",
"new_window",
")",
"new_boxes",
"=",
"new_boxlist",
".",
"get",
"(",
")",
"return",
"new_image",
",",
"new_boxes"
] |
https://github.com/TarrySingh/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials/blob/5bb97d7e3ffd913abddb4cfa7d78a1b4c868890e/tensorflow_dl_models/research/object_detection/core/preprocessor.py#L1082-L1188
|
|
kensho-technologies/graphql-compiler
|
4318443b7b2512a059f3616112bfc40bbf8eec06
|
graphql_compiler/query_formatting/cypher_formatting.py
|
python
|
_safe_cypher_string
|
(argument_value)
|
return escaped_and_quoted
|
Sanitize and represent a string argument in Cypher.
|
Sanitize and represent a string argument in Cypher.
|
[
"Sanitize",
"and",
"represent",
"a",
"string",
"argument",
"in",
"Cypher",
"."
] |
def _safe_cypher_string(argument_value):
"""Sanitize and represent a string argument in Cypher."""
if not isinstance(argument_value, six.string_types):
if isinstance(argument_value, bytes): # likely to only happen in py2
argument_value = argument_value.decode("utf-8")
else:
raise GraphQLInvalidArgumentError(
"Attempting to convert a non-string into a string: {}".format(argument_value)
)
# Using JSON encoding means that all unicode literals and special chars
# (e.g. newlines and backslashes) are replaced by appropriate escape sequences.
# Unlike with Gremlin, unescaped dollar signs $ are not a problem when contained in a
# string literal in Cypher because they do not allow for arbitrary code execution.
escaped_and_quoted = json.dumps(argument_value)
return escaped_and_quoted
|
[
"def",
"_safe_cypher_string",
"(",
"argument_value",
")",
":",
"if",
"not",
"isinstance",
"(",
"argument_value",
",",
"six",
".",
"string_types",
")",
":",
"if",
"isinstance",
"(",
"argument_value",
",",
"bytes",
")",
":",
"# likely to only happen in py2",
"argument_value",
"=",
"argument_value",
".",
"decode",
"(",
"\"utf-8\"",
")",
"else",
":",
"raise",
"GraphQLInvalidArgumentError",
"(",
"\"Attempting to convert a non-string into a string: {}\"",
".",
"format",
"(",
"argument_value",
")",
")",
"# Using JSON encoding means that all unicode literals and special chars",
"# (e.g. newlines and backslashes) are replaced by appropriate escape sequences.",
"# Unlike with Gremlin, unescaped dollar signs $ are not a problem when contained in a",
"# string literal in Cypher because they do not allow for arbitrary code execution.",
"escaped_and_quoted",
"=",
"json",
".",
"dumps",
"(",
"argument_value",
")",
"return",
"escaped_and_quoted"
] |
https://github.com/kensho-technologies/graphql-compiler/blob/4318443b7b2512a059f3616112bfc40bbf8eec06/graphql_compiler/query_formatting/cypher_formatting.py#L17-L32
|
|
Blizzard/heroprotocol
|
3d36eaf44fc4c8ff3331c2ae2f1dc08a94535f1c
|
heroprotocol/versions/protocol37274.py
|
python
|
decode_replay_game_events
|
(contents)
|
Decodes and yields each game event from the contents byte string.
|
Decodes and yields each game event from the contents byte string.
|
[
"Decodes",
"and",
"yields",
"each",
"game",
"event",
"from",
"the",
"contents",
"byte",
"string",
"."
] |
def decode_replay_game_events(contents):
"""Decodes and yields each game event from the contents byte string."""
decoder = BitPackedDecoder(contents, typeinfos)
for event in _decode_event_stream(decoder,
game_eventid_typeid,
game_event_types,
decode_user_id=True):
yield event
|
[
"def",
"decode_replay_game_events",
"(",
"contents",
")",
":",
"decoder",
"=",
"BitPackedDecoder",
"(",
"contents",
",",
"typeinfos",
")",
"for",
"event",
"in",
"_decode_event_stream",
"(",
"decoder",
",",
"game_eventid_typeid",
",",
"game_event_types",
",",
"decode_user_id",
"=",
"True",
")",
":",
"yield",
"event"
] |
https://github.com/Blizzard/heroprotocol/blob/3d36eaf44fc4c8ff3331c2ae2f1dc08a94535f1c/heroprotocol/versions/protocol37274.py#L415-L422
|
||
lyft/cartography
|
921a790d686c679ab5d8936b07e167fd424ee8d6
|
cartography/driftdetect/get_states.py
|
python
|
get_query_state
|
(session, query_directory, state_serializer, storage, filename)
|
return state
|
Gets the most recent state of a query.
:type session: neo4j session.
:param session: neo4j session to connect to.
:type query_directory: String.
:param query_directory: Path to query directory.
:type state_serializer: Schema
:param state_serializer: Schema to serialize and deserialize states.
:type storage: Storage Object.
:param storage: Storage object to supports loading, writing, and walking.
:type filename: String.
:param filename: Path to filename.
:return: The created state.
|
Gets the most recent state of a query.
|
[
"Gets",
"the",
"most",
"recent",
"state",
"of",
"a",
"query",
"."
] |
def get_query_state(session, query_directory, state_serializer, storage, filename):
"""
Gets the most recent state of a query.
:type session: neo4j session.
:param session: neo4j session to connect to.
:type query_directory: String.
:param query_directory: Path to query directory.
:type state_serializer: Schema
:param state_serializer: Schema to serialize and deserialize states.
:type storage: Storage Object.
:param storage: Storage object to supports loading, writing, and walking.
:type filename: String.
:param filename: Path to filename.
:return: The created state.
"""
state_data = storage.load(os.path.join(query_directory, "template.json"))
state = state_serializer.load(state_data)
get_state(session, state)
new_state_data = state_serializer.dump(state)
fp = os.path.join(query_directory, filename)
storage.write(new_state_data, fp)
return state
|
[
"def",
"get_query_state",
"(",
"session",
",",
"query_directory",
",",
"state_serializer",
",",
"storage",
",",
"filename",
")",
":",
"state_data",
"=",
"storage",
".",
"load",
"(",
"os",
".",
"path",
".",
"join",
"(",
"query_directory",
",",
"\"template.json\"",
")",
")",
"state",
"=",
"state_serializer",
".",
"load",
"(",
"state_data",
")",
"get_state",
"(",
"session",
",",
"state",
")",
"new_state_data",
"=",
"state_serializer",
".",
"dump",
"(",
"state",
")",
"fp",
"=",
"os",
".",
"path",
".",
"join",
"(",
"query_directory",
",",
"filename",
")",
"storage",
".",
"write",
"(",
"new_state_data",
",",
"fp",
")",
"return",
"state"
] |
https://github.com/lyft/cartography/blob/921a790d686c679ab5d8936b07e167fd424ee8d6/cartography/driftdetect/get_states.py#L93-L115
|
|
SintefManufacturing/python-urx
|
2250bda067d79692fb7515e78005d36aad5a0545
|
tools/fakerobot.py
|
python
|
Server.init
|
(self)
|
__init__ should not be overriden
|
__init__ should not be overriden
|
[
"__init__",
"should",
"not",
"be",
"overriden"
] |
def init(self):
"""
__init__ should not be overriden
"""
self.handlers = []
|
[
"def",
"init",
"(",
"self",
")",
":",
"self",
".",
"handlers",
"=",
"[",
"]"
] |
https://github.com/SintefManufacturing/python-urx/blob/2250bda067d79692fb7515e78005d36aad5a0545/tools/fakerobot.py#L31-L35
|
||
jazzband/tablib
|
94ffe67e50eb5bfd99d73a4f010e463478a98928
|
src/tablib/core.py
|
python
|
Dataset.append_separator
|
(self, text='-')
|
Adds a :ref:`separator <separators>` to the :class:`Dataset`.
|
Adds a :ref:`separator <separators>` to the :class:`Dataset`.
|
[
"Adds",
"a",
":",
"ref",
":",
"separator",
"<separators",
">",
"to",
"the",
":",
"class",
":",
"Dataset",
"."
] |
def append_separator(self, text='-'):
"""Adds a :ref:`separator <separators>` to the :class:`Dataset`."""
# change offsets if headers are or aren't defined
if not self.headers:
index = self.height if self.height else 0
else:
index = (self.height + 1) if self.height else 1
self.insert_separator(index, text)
|
[
"def",
"append_separator",
"(",
"self",
",",
"text",
"=",
"'-'",
")",
":",
"# change offsets if headers are or aren't defined",
"if",
"not",
"self",
".",
"headers",
":",
"index",
"=",
"self",
".",
"height",
"if",
"self",
".",
"height",
"else",
"0",
"else",
":",
"index",
"=",
"(",
"self",
".",
"height",
"+",
"1",
")",
"if",
"self",
".",
"height",
"else",
"1",
"self",
".",
"insert_separator",
"(",
"index",
",",
"text",
")"
] |
https://github.com/jazzband/tablib/blob/94ffe67e50eb5bfd99d73a4f010e463478a98928/src/tablib/core.py#L578-L587
|
||
LagoLunatic/wwrando
|
33164143eb9f51c3015be3e31402a79dfcebacfd
|
wwlib/jpc.py
|
python
|
BSP1.read_color_table
|
(self, color_data_offset, color_data_count)
|
return color_table
|
[] |
def read_color_table(self, color_data_offset, color_data_count):
color_table = []
for i in range(color_data_count):
keyframe_time = read_u16(self.data, color_data_offset+i*6 + 0)
r = read_u8(self.data, color_data_offset+i*6 + 2)
g = read_u8(self.data, color_data_offset+i*6 + 3)
b = read_u8(self.data, color_data_offset+i*6 + 4)
a = read_u8(self.data, color_data_offset+i*6 + 5)
color_table.append(ColorAnimationKeyframe(keyframe_time, (r, g, b, a)))
return color_table
|
[
"def",
"read_color_table",
"(",
"self",
",",
"color_data_offset",
",",
"color_data_count",
")",
":",
"color_table",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"color_data_count",
")",
":",
"keyframe_time",
"=",
"read_u16",
"(",
"self",
".",
"data",
",",
"color_data_offset",
"+",
"i",
"*",
"6",
"+",
"0",
")",
"r",
"=",
"read_u8",
"(",
"self",
".",
"data",
",",
"color_data_offset",
"+",
"i",
"*",
"6",
"+",
"2",
")",
"g",
"=",
"read_u8",
"(",
"self",
".",
"data",
",",
"color_data_offset",
"+",
"i",
"*",
"6",
"+",
"3",
")",
"b",
"=",
"read_u8",
"(",
"self",
".",
"data",
",",
"color_data_offset",
"+",
"i",
"*",
"6",
"+",
"4",
")",
"a",
"=",
"read_u8",
"(",
"self",
".",
"data",
",",
"color_data_offset",
"+",
"i",
"*",
"6",
"+",
"5",
")",
"color_table",
".",
"append",
"(",
"ColorAnimationKeyframe",
"(",
"keyframe_time",
",",
"(",
"r",
",",
"g",
",",
"b",
",",
"a",
")",
")",
")",
"return",
"color_table"
] |
https://github.com/LagoLunatic/wwrando/blob/33164143eb9f51c3015be3e31402a79dfcebacfd/wwlib/jpc.py#L309-L319
|
|||
vlachoudis/bCNC
|
67126b4894dabf6579baf47af8d0f9b7de35e6e3
|
bCNC/lib/svg_elements.py
|
python
|
Shape.__iadd__
|
(self, other)
|
return NotImplemented
|
[] |
def __iadd__(self, other):
if isinstance(other, Shape):
return Path(self) + Path(other)
return NotImplemented
|
[
"def",
"__iadd__",
"(",
"self",
",",
"other",
")",
":",
"if",
"isinstance",
"(",
"other",
",",
"Shape",
")",
":",
"return",
"Path",
"(",
"self",
")",
"+",
"Path",
"(",
"other",
")",
"return",
"NotImplemented"
] |
https://github.com/vlachoudis/bCNC/blob/67126b4894dabf6579baf47af8d0f9b7de35e6e3/bCNC/lib/svg_elements.py#L3174-L3177
|
|||
wal-e/wal-e
|
6c43976e13c619ebdddd0d869301c42ed131e983
|
wal_e/log_help.py
|
python
|
WalELogger._fmt_structured
|
(d)
|
return ' '.join([timeEntry, pidEntry] + rest)
|
Formats '{k1:v1, k2:v2}' => 'time=... pid=... k1=v1 k2=v2'
Output is lexically sorted, *except* the time and pid always
come first, to assist with human scanning of the data.
|
Formats '{k1:v1, k2:v2}' => 'time=... pid=... k1=v1 k2=v2'
|
[
"Formats",
"{",
"k1",
":",
"v1",
"k2",
":",
"v2",
"}",
"=",
">",
"time",
"=",
"...",
"pid",
"=",
"...",
"k1",
"=",
"v1",
"k2",
"=",
"v2"
] |
def _fmt_structured(d):
"""Formats '{k1:v1, k2:v2}' => 'time=... pid=... k1=v1 k2=v2'
Output is lexically sorted, *except* the time and pid always
come first, to assist with human scanning of the data.
"""
timeEntry = datetime.datetime.utcnow().strftime(
"time=%Y-%m-%dT%H:%M:%S.%f-00")
pidEntry = "pid=" + str(os.getpid())
rest = sorted('='.join([str(k), str(v)])
for (k, v) in list(d.items()))
return ' '.join([timeEntry, pidEntry] + rest)
|
[
"def",
"_fmt_structured",
"(",
"d",
")",
":",
"timeEntry",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
".",
"strftime",
"(",
"\"time=%Y-%m-%dT%H:%M:%S.%f-00\"",
")",
"pidEntry",
"=",
"\"pid=\"",
"+",
"str",
"(",
"os",
".",
"getpid",
"(",
")",
")",
"rest",
"=",
"sorted",
"(",
"'='",
".",
"join",
"(",
"[",
"str",
"(",
"k",
")",
",",
"str",
"(",
"v",
")",
"]",
")",
"for",
"(",
"k",
",",
"v",
")",
"in",
"list",
"(",
"d",
".",
"items",
"(",
")",
")",
")",
"return",
"' '",
".",
"join",
"(",
"[",
"timeEntry",
",",
"pidEntry",
"]",
"+",
"rest",
")"
] |
https://github.com/wal-e/wal-e/blob/6c43976e13c619ebdddd0d869301c42ed131e983/wal_e/log_help.py#L145-L158
|
|
rucio/rucio
|
6d0d358e04f5431f0b9a98ae40f31af0ddff4833
|
lib/rucio/core/did.py
|
python
|
__add_files_to_dataset
|
(scope, name, files, account, rse_id, ignore_duplicate=False, session=None)
|
Add files to dataset.
:param scope: The scope name.
:param name: The data identifier name.
:param files: The list of files.
:param account: The account owner.
:param rse_id: The RSE id for the replicas.
:param ignore_duplicate: If True, ignore duplicate entries.
:param session: The database session in use.
:returns: List of files attached (excluding the ones that were already attached to the dataset).
|
Add files to dataset.
|
[
"Add",
"files",
"to",
"dataset",
"."
] |
def __add_files_to_dataset(scope, name, files, account, rse_id, ignore_duplicate=False, session=None):
"""
Add files to dataset.
:param scope: The scope name.
:param name: The data identifier name.
:param files: The list of files.
:param account: The account owner.
:param rse_id: The RSE id for the replicas.
:param ignore_duplicate: If True, ignore duplicate entries.
:param session: The database session in use.
:returns: List of files attached (excluding the ones that were already attached to the dataset).
"""
# Get metadata from dataset
try:
dataset_meta = validate_name(scope=scope, name=name, did_type='D')
except Exception:
dataset_meta = None
if rse_id:
rucio.core.replica.add_replicas(rse_id=rse_id, files=files, dataset_meta=dataset_meta,
account=account, session=session)
files = get_files(files=files, session=session)
existing_content = []
if ignore_duplicate:
content_query = session.query(models.DataIdentifierAssociation.scope,
models.DataIdentifierAssociation.name,
models.DataIdentifierAssociation.child_scope,
models.DataIdentifierAssociation.child_name).\
with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle')
content_condition = []
for file in files:
content_condition.append(and_(models.DataIdentifierAssociation.scope == scope,
models.DataIdentifierAssociation.name == name,
models.DataIdentifierAssociation.child_scope == file['scope'],
models.DataIdentifierAssociation.child_name == file['name']))
for row in content_query.filter(or_(*content_condition)):
existing_content.append(row)
contents = []
added_archives_condition = []
for file in files:
if not existing_content or (scope, name, file['scope'], file['name']) not in existing_content:
contents.append({'scope': scope, 'name': name, 'child_scope': file['scope'],
'child_name': file['name'], 'bytes': file['bytes'],
'adler32': file.get('adler32'),
'guid': file['guid'], 'events': file['events'],
'md5': file.get('md5'), 'did_type': DIDType.DATASET,
'child_type': DIDType.FILE, 'rule_evaluation': True})
added_archives_condition.append(
and_(models.DataIdentifier.scope == file['scope'],
models.DataIdentifier.name == file['name'],
models.DataIdentifier.is_archive == true()))
# if any of the attached files is an archive, set is_archive = True on the dataset
if session.query(models.DataIdentifier). \
with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle'). \
filter(or_(*added_archives_condition)). \
first() is not None:
session.query(models.DataIdentifier). \
filter(models.DataIdentifier.scope == scope). \
filter(models.DataIdentifier.name == name). \
filter(or_(models.DataIdentifier.is_archive.is_(None),
models.DataIdentifier.is_archive == false())). \
update({'is_archive': True})
try:
contents and session.bulk_insert_mappings(models.DataIdentifierAssociation, contents)
session.flush()
return contents
except IntegrityError as error:
if match('.*IntegrityError.*ORA-02291: integrity constraint .*CONTENTS_CHILD_ID_FK.*violated - parent key not found.*', error.args[0]) \
or match('.*IntegrityError.*1452.*Cannot add or update a child row: a foreign key constraint fails.*', error.args[0]) \
or match('.*IntegrityError.*foreign key constraints? failed.*', error.args[0]) \
or match('.*IntegrityError.*insert or update on table.*violates foreign key constraint.*', error.args[0]):
raise exception.DataIdentifierNotFound("Data identifier not found")
elif match('.*IntegrityError.*ORA-00001: unique constraint .*CONTENTS_PK.*violated.*', error.args[0]) \
or match('.*IntegrityError.*UNIQUE constraint failed: contents.scope, contents.name, contents.child_scope, contents.child_name.*', error.args[0])\
or match('.*IntegrityError.*duplicate key value violates unique constraint.*', error.args[0]) \
or match('.*UniqueViolation.*duplicate key value violates unique constraint.*', error.args[0]) \
or match('.*IntegrityError.*1062.*Duplicate entry .*for key.*PRIMARY.*', error.args[0]) \
or match('.*duplicate entry.*key.*PRIMARY.*', error.args[0]) \
or match('.*IntegrityError.*columns? .*not unique.*', error.args[0]):
raise exception.FileAlreadyExists(error.args)
else:
raise exception.RucioException(error.args)
|
[
"def",
"__add_files_to_dataset",
"(",
"scope",
",",
"name",
",",
"files",
",",
"account",
",",
"rse_id",
",",
"ignore_duplicate",
"=",
"False",
",",
"session",
"=",
"None",
")",
":",
"# Get metadata from dataset",
"try",
":",
"dataset_meta",
"=",
"validate_name",
"(",
"scope",
"=",
"scope",
",",
"name",
"=",
"name",
",",
"did_type",
"=",
"'D'",
")",
"except",
"Exception",
":",
"dataset_meta",
"=",
"None",
"if",
"rse_id",
":",
"rucio",
".",
"core",
".",
"replica",
".",
"add_replicas",
"(",
"rse_id",
"=",
"rse_id",
",",
"files",
"=",
"files",
",",
"dataset_meta",
"=",
"dataset_meta",
",",
"account",
"=",
"account",
",",
"session",
"=",
"session",
")",
"files",
"=",
"get_files",
"(",
"files",
"=",
"files",
",",
"session",
"=",
"session",
")",
"existing_content",
"=",
"[",
"]",
"if",
"ignore_duplicate",
":",
"content_query",
"=",
"session",
".",
"query",
"(",
"models",
".",
"DataIdentifierAssociation",
".",
"scope",
",",
"models",
".",
"DataIdentifierAssociation",
".",
"name",
",",
"models",
".",
"DataIdentifierAssociation",
".",
"child_scope",
",",
"models",
".",
"DataIdentifierAssociation",
".",
"child_name",
")",
".",
"with_hint",
"(",
"models",
".",
"DataIdentifierAssociation",
",",
"\"INDEX(CONTENTS CONTENTS_PK)\"",
",",
"'oracle'",
")",
"content_condition",
"=",
"[",
"]",
"for",
"file",
"in",
"files",
":",
"content_condition",
".",
"append",
"(",
"and_",
"(",
"models",
".",
"DataIdentifierAssociation",
".",
"scope",
"==",
"scope",
",",
"models",
".",
"DataIdentifierAssociation",
".",
"name",
"==",
"name",
",",
"models",
".",
"DataIdentifierAssociation",
".",
"child_scope",
"==",
"file",
"[",
"'scope'",
"]",
",",
"models",
".",
"DataIdentifierAssociation",
".",
"child_name",
"==",
"file",
"[",
"'name'",
"]",
")",
")",
"for",
"row",
"in",
"content_query",
".",
"filter",
"(",
"or_",
"(",
"*",
"content_condition",
")",
")",
":",
"existing_content",
".",
"append",
"(",
"row",
")",
"contents",
"=",
"[",
"]",
"added_archives_condition",
"=",
"[",
"]",
"for",
"file",
"in",
"files",
":",
"if",
"not",
"existing_content",
"or",
"(",
"scope",
",",
"name",
",",
"file",
"[",
"'scope'",
"]",
",",
"file",
"[",
"'name'",
"]",
")",
"not",
"in",
"existing_content",
":",
"contents",
".",
"append",
"(",
"{",
"'scope'",
":",
"scope",
",",
"'name'",
":",
"name",
",",
"'child_scope'",
":",
"file",
"[",
"'scope'",
"]",
",",
"'child_name'",
":",
"file",
"[",
"'name'",
"]",
",",
"'bytes'",
":",
"file",
"[",
"'bytes'",
"]",
",",
"'adler32'",
":",
"file",
".",
"get",
"(",
"'adler32'",
")",
",",
"'guid'",
":",
"file",
"[",
"'guid'",
"]",
",",
"'events'",
":",
"file",
"[",
"'events'",
"]",
",",
"'md5'",
":",
"file",
".",
"get",
"(",
"'md5'",
")",
",",
"'did_type'",
":",
"DIDType",
".",
"DATASET",
",",
"'child_type'",
":",
"DIDType",
".",
"FILE",
",",
"'rule_evaluation'",
":",
"True",
"}",
")",
"added_archives_condition",
".",
"append",
"(",
"and_",
"(",
"models",
".",
"DataIdentifier",
".",
"scope",
"==",
"file",
"[",
"'scope'",
"]",
",",
"models",
".",
"DataIdentifier",
".",
"name",
"==",
"file",
"[",
"'name'",
"]",
",",
"models",
".",
"DataIdentifier",
".",
"is_archive",
"==",
"true",
"(",
")",
")",
")",
"# if any of the attached files is an archive, set is_archive = True on the dataset",
"if",
"session",
".",
"query",
"(",
"models",
".",
"DataIdentifier",
")",
".",
"with_hint",
"(",
"models",
".",
"DataIdentifier",
",",
"\"INDEX(DIDS DIDS_PK)\"",
",",
"'oracle'",
")",
".",
"filter",
"(",
"or_",
"(",
"*",
"added_archives_condition",
")",
")",
".",
"first",
"(",
")",
"is",
"not",
"None",
":",
"session",
".",
"query",
"(",
"models",
".",
"DataIdentifier",
")",
".",
"filter",
"(",
"models",
".",
"DataIdentifier",
".",
"scope",
"==",
"scope",
")",
".",
"filter",
"(",
"models",
".",
"DataIdentifier",
".",
"name",
"==",
"name",
")",
".",
"filter",
"(",
"or_",
"(",
"models",
".",
"DataIdentifier",
".",
"is_archive",
".",
"is_",
"(",
"None",
")",
",",
"models",
".",
"DataIdentifier",
".",
"is_archive",
"==",
"false",
"(",
")",
")",
")",
".",
"update",
"(",
"{",
"'is_archive'",
":",
"True",
"}",
")",
"try",
":",
"contents",
"and",
"session",
".",
"bulk_insert_mappings",
"(",
"models",
".",
"DataIdentifierAssociation",
",",
"contents",
")",
"session",
".",
"flush",
"(",
")",
"return",
"contents",
"except",
"IntegrityError",
"as",
"error",
":",
"if",
"match",
"(",
"'.*IntegrityError.*ORA-02291: integrity constraint .*CONTENTS_CHILD_ID_FK.*violated - parent key not found.*'",
",",
"error",
".",
"args",
"[",
"0",
"]",
")",
"or",
"match",
"(",
"'.*IntegrityError.*1452.*Cannot add or update a child row: a foreign key constraint fails.*'",
",",
"error",
".",
"args",
"[",
"0",
"]",
")",
"or",
"match",
"(",
"'.*IntegrityError.*foreign key constraints? failed.*'",
",",
"error",
".",
"args",
"[",
"0",
"]",
")",
"or",
"match",
"(",
"'.*IntegrityError.*insert or update on table.*violates foreign key constraint.*'",
",",
"error",
".",
"args",
"[",
"0",
"]",
")",
":",
"raise",
"exception",
".",
"DataIdentifierNotFound",
"(",
"\"Data identifier not found\"",
")",
"elif",
"match",
"(",
"'.*IntegrityError.*ORA-00001: unique constraint .*CONTENTS_PK.*violated.*'",
",",
"error",
".",
"args",
"[",
"0",
"]",
")",
"or",
"match",
"(",
"'.*IntegrityError.*UNIQUE constraint failed: contents.scope, contents.name, contents.child_scope, contents.child_name.*'",
",",
"error",
".",
"args",
"[",
"0",
"]",
")",
"or",
"match",
"(",
"'.*IntegrityError.*duplicate key value violates unique constraint.*'",
",",
"error",
".",
"args",
"[",
"0",
"]",
")",
"or",
"match",
"(",
"'.*UniqueViolation.*duplicate key value violates unique constraint.*'",
",",
"error",
".",
"args",
"[",
"0",
"]",
")",
"or",
"match",
"(",
"'.*IntegrityError.*1062.*Duplicate entry .*for key.*PRIMARY.*'",
",",
"error",
".",
"args",
"[",
"0",
"]",
")",
"or",
"match",
"(",
"'.*duplicate entry.*key.*PRIMARY.*'",
",",
"error",
".",
"args",
"[",
"0",
"]",
")",
"or",
"match",
"(",
"'.*IntegrityError.*columns? .*not unique.*'",
",",
"error",
".",
"args",
"[",
"0",
"]",
")",
":",
"raise",
"exception",
".",
"FileAlreadyExists",
"(",
"error",
".",
"args",
")",
"else",
":",
"raise",
"exception",
".",
"RucioException",
"(",
"error",
".",
"args",
")"
] |
https://github.com/rucio/rucio/blob/6d0d358e04f5431f0b9a98ae40f31af0ddff4833/lib/rucio/core/did.py#L359-L446
|
||
Jenyay/outwiker
|
50530cf7b3f71480bb075b2829bc0669773b835b
|
src/outwiker/gui/controls/ultimatelistctrl.py
|
python
|
UltimateListMainWindow.GetItemVisited
|
(self, item)
|
return item.GetVisited()
|
Returns whether an hypertext item was visited.
:param `item`: an instance of :class:`UltimateListItem`.
|
Returns whether an hypertext item was visited.
|
[
"Returns",
"whether",
"an",
"hypertext",
"item",
"was",
"visited",
"."
] |
def GetItemVisited(self, item):
"""
Returns whether an hypertext item was visited.
:param `item`: an instance of :class:`UltimateListItem`.
"""
item = self.GetItem(item, item._col)
return item.GetVisited()
|
[
"def",
"GetItemVisited",
"(",
"self",
",",
"item",
")",
":",
"item",
"=",
"self",
".",
"GetItem",
"(",
"item",
",",
"item",
".",
"_col",
")",
"return",
"item",
".",
"GetVisited",
"(",
")"
] |
https://github.com/Jenyay/outwiker/blob/50530cf7b3f71480bb075b2829bc0669773b835b/src/outwiker/gui/controls/ultimatelistctrl.py#L9230-L9238
|
|
bitcoin-core/HWI
|
6871946c2176f2f9777b6ac8f0614d96d99bfa0e
|
hwilib/_gui.py
|
python
|
BitBox02NoiseConfig.show_pairing
|
(self, code: str, device_response: Callable[[], bool])
|
return dialog.result() == QDialog.Accepted
|
[] |
def show_pairing(self, code: str, device_response: Callable[[], bool]) -> bool:
dialog = BitBox02PairingDialog(code, device_response)
dialog.show()
# render the window since the next operation is blocking
while True:
QCoreApplication.processEvents()
if dialog.painted:
break
time.sleep(0.1)
if not device_response():
return False
dialog.enable_buttons()
dialog.exec_()
return dialog.result() == QDialog.Accepted
|
[
"def",
"show_pairing",
"(",
"self",
",",
"code",
":",
"str",
",",
"device_response",
":",
"Callable",
"[",
"[",
"]",
",",
"bool",
"]",
")",
"->",
"bool",
":",
"dialog",
"=",
"BitBox02PairingDialog",
"(",
"code",
",",
"device_response",
")",
"dialog",
".",
"show",
"(",
")",
"# render the window since the next operation is blocking",
"while",
"True",
":",
"QCoreApplication",
".",
"processEvents",
"(",
")",
"if",
"dialog",
".",
"painted",
":",
"break",
"time",
".",
"sleep",
"(",
"0.1",
")",
"if",
"not",
"device_response",
"(",
")",
":",
"return",
"False",
"dialog",
".",
"enable_buttons",
"(",
")",
"dialog",
".",
"exec_",
"(",
")",
"return",
"dialog",
".",
"result",
"(",
")",
"==",
"QDialog",
".",
"Accepted"
] |
https://github.com/bitcoin-core/HWI/blob/6871946c2176f2f9777b6ac8f0614d96d99bfa0e/hwilib/_gui.py#L246-L259
|
|||
ubuntu/microk8s
|
9d61706b8cee7476e66093bb60ebea2c81bc3723
|
scripts/wrappers/common/utils.py
|
python
|
xable
|
(action: str, addons: list, xabled_addons: list)
|
Enables or disables the given addons.
Collated into a single function since the logic is identical other than
the script names.
|
Enables or disables the given addons.
|
[
"Enables",
"or",
"disables",
"the",
"given",
"addons",
"."
] |
def xable(action: str, addons: list, xabled_addons: list):
"""Enables or disables the given addons.
Collated into a single function since the logic is identical other than
the script names.
"""
actions = Path(__file__).absolute().parent / "../../../actions"
existing_addons = {sh.with_suffix("").name[7:] for sh in actions.glob("enable.*.sh")}
# Backwards compatibility with enabling multiple addons at once, e.g.
# `microk8s.enable foo bar:"baz"`
if all(a.split(":")[0] in existing_addons for a in addons) and len(addons) > 1:
for addon in addons:
if addon in xabled_addons and addon != "kubeflow":
click.echo("Addon %s is already %sd." % (addon, action))
else:
addon, *args = addon.split(":")
wait_for_ready(timeout=30)
p = subprocess.run([str(actions / ("%s.%s.sh" % (action, addon)))] + args)
if p.returncode:
sys.exit(p.returncode)
wait_for_ready(timeout=30)
# The new way of xabling addons, that allows for unix-style argument passing,
# such as `microk8s.enable foo --bar`.
else:
addon, *args = addons[0].split(":")
if addon in xabled_addons and addon != "kubeflow":
click.echo("Addon %s is already %sd." % (addon, action))
sys.exit(0)
if addon not in existing_addons:
click.echo("Nothing to do for `%s`." % addon, err=True)
sys.exit(1)
if args and addons[1:]:
click.echo(
"Can't pass string arguments and flag arguments simultaneously!\n"
"{0} an addon with only one argument style at a time:\n"
"\n"
" microk8s {1} foo:'bar'\n"
"or\n"
" microk8s {1} foo --bar\n".format(action.title(), action)
)
sys.exit(1)
wait_for_ready(timeout=30)
script = [str(actions / ("%s.%s.sh" % (action, addon)))]
if args:
p = subprocess.run(script + args)
else:
p = subprocess.run(script + list(addons[1:]))
if p.returncode:
sys.exit(p.returncode)
wait_for_ready(timeout=30)
|
[
"def",
"xable",
"(",
"action",
":",
"str",
",",
"addons",
":",
"list",
",",
"xabled_addons",
":",
"list",
")",
":",
"actions",
"=",
"Path",
"(",
"__file__",
")",
".",
"absolute",
"(",
")",
".",
"parent",
"/",
"\"../../../actions\"",
"existing_addons",
"=",
"{",
"sh",
".",
"with_suffix",
"(",
"\"\"",
")",
".",
"name",
"[",
"7",
":",
"]",
"for",
"sh",
"in",
"actions",
".",
"glob",
"(",
"\"enable.*.sh\"",
")",
"}",
"# Backwards compatibility with enabling multiple addons at once, e.g.",
"# `microk8s.enable foo bar:\"baz\"`",
"if",
"all",
"(",
"a",
".",
"split",
"(",
"\":\"",
")",
"[",
"0",
"]",
"in",
"existing_addons",
"for",
"a",
"in",
"addons",
")",
"and",
"len",
"(",
"addons",
")",
">",
"1",
":",
"for",
"addon",
"in",
"addons",
":",
"if",
"addon",
"in",
"xabled_addons",
"and",
"addon",
"!=",
"\"kubeflow\"",
":",
"click",
".",
"echo",
"(",
"\"Addon %s is already %sd.\"",
"%",
"(",
"addon",
",",
"action",
")",
")",
"else",
":",
"addon",
",",
"",
"*",
"args",
"=",
"addon",
".",
"split",
"(",
"\":\"",
")",
"wait_for_ready",
"(",
"timeout",
"=",
"30",
")",
"p",
"=",
"subprocess",
".",
"run",
"(",
"[",
"str",
"(",
"actions",
"/",
"(",
"\"%s.%s.sh\"",
"%",
"(",
"action",
",",
"addon",
")",
")",
")",
"]",
"+",
"args",
")",
"if",
"p",
".",
"returncode",
":",
"sys",
".",
"exit",
"(",
"p",
".",
"returncode",
")",
"wait_for_ready",
"(",
"timeout",
"=",
"30",
")",
"# The new way of xabling addons, that allows for unix-style argument passing,",
"# such as `microk8s.enable foo --bar`.",
"else",
":",
"addon",
",",
"",
"*",
"args",
"=",
"addons",
"[",
"0",
"]",
".",
"split",
"(",
"\":\"",
")",
"if",
"addon",
"in",
"xabled_addons",
"and",
"addon",
"!=",
"\"kubeflow\"",
":",
"click",
".",
"echo",
"(",
"\"Addon %s is already %sd.\"",
"%",
"(",
"addon",
",",
"action",
")",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"if",
"addon",
"not",
"in",
"existing_addons",
":",
"click",
".",
"echo",
"(",
"\"Nothing to do for `%s`.\"",
"%",
"addon",
",",
"err",
"=",
"True",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"if",
"args",
"and",
"addons",
"[",
"1",
":",
"]",
":",
"click",
".",
"echo",
"(",
"\"Can't pass string arguments and flag arguments simultaneously!\\n\"",
"\"{0} an addon with only one argument style at a time:\\n\"",
"\"\\n\"",
"\" microk8s {1} foo:'bar'\\n\"",
"\"or\\n\"",
"\" microk8s {1} foo --bar\\n\"",
".",
"format",
"(",
"action",
".",
"title",
"(",
")",
",",
"action",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"wait_for_ready",
"(",
"timeout",
"=",
"30",
")",
"script",
"=",
"[",
"str",
"(",
"actions",
"/",
"(",
"\"%s.%s.sh\"",
"%",
"(",
"action",
",",
"addon",
")",
")",
")",
"]",
"if",
"args",
":",
"p",
"=",
"subprocess",
".",
"run",
"(",
"script",
"+",
"args",
")",
"else",
":",
"p",
"=",
"subprocess",
".",
"run",
"(",
"script",
"+",
"list",
"(",
"addons",
"[",
"1",
":",
"]",
")",
")",
"if",
"p",
".",
"returncode",
":",
"sys",
".",
"exit",
"(",
"p",
".",
"returncode",
")",
"wait_for_ready",
"(",
"timeout",
"=",
"30",
")"
] |
https://github.com/ubuntu/microk8s/blob/9d61706b8cee7476e66093bb60ebea2c81bc3723/scripts/wrappers/common/utils.py#L243-L300
|
||
plone/guillotina
|
57ad54988f797a93630e424fd4b6a75fa26410af
|
guillotina/utils/execute.py
|
python
|
in_queue
|
(func: Callable[..., Coroutine[Any, Any, Any]], *args, **kwargs)
|
return ExecuteContext(util.add, partial(func, *args, **kwargs))
|
Execute view-type object(context, request) in the async queue.
:param view: view to be queued
:rtype: ExecuteContext
|
Execute view-type object(context, request) in the async queue.
|
[
"Execute",
"view",
"-",
"type",
"object",
"(",
"context",
"request",
")",
"in",
"the",
"async",
"queue",
"."
] |
def in_queue(func: Callable[..., Coroutine[Any, Any, Any]], *args, **kwargs) -> ExecuteContext:
"""
Execute view-type object(context, request) in the async queue.
:param view: view to be queued
:rtype: ExecuteContext
"""
util = get_utility(IQueueUtility)
return ExecuteContext(util.add, partial(func, *args, **kwargs))
|
[
"def",
"in_queue",
"(",
"func",
":",
"Callable",
"[",
"...",
",",
"Coroutine",
"[",
"Any",
",",
"Any",
",",
"Any",
"]",
"]",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"->",
"ExecuteContext",
":",
"util",
"=",
"get_utility",
"(",
"IQueueUtility",
")",
"return",
"ExecuteContext",
"(",
"util",
".",
"add",
",",
"partial",
"(",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")"
] |
https://github.com/plone/guillotina/blob/57ad54988f797a93630e424fd4b6a75fa26410af/guillotina/utils/execute.py#L59-L68
|
|
KhronosGroup/NNEF-Tools
|
c913758ca687dab8cb7b49e8f1556819a2d0ca25
|
nnef_tools/optimization/nnef_optimizer.py
|
python
|
Optimizer._merge_linear_mul
|
(linear, mul)
|
[] |
def _merge_linear_mul(linear, mul):
variable = mul.inputs[1] if mul.inputs[0] == linear.output else mul.inputs[0]
if variable.data is None or not Optimizer._is_channelwise_shape(variable.shape):
return False
if len(variable.shape) == 0:
scale = np.expand_dims(variable.data, axis=0)
elif len(variable.shape) >= 2:
scale = Optimizer._squeeze_batch_and_spatial_dims(variable.data)
negate = mul.type == 'div'
weights = linear.inputs[1]
if weights.data is None:
return False
if len(linear.inputs) > 2:
bias = linear.inputs[2]
if bias.data is None:
return False
bias.data = bias.data * scale if not negate else bias.data / scale
bias.shape = bias.data.shape
Optimizer._ensure_variable_producer(bias, label=linear.output.name + '_bias')
rank = len(weights.shape)
shape = (1,) + scale.shape + (1,) * (rank - 2) if linear.type == 'deconv' else scale.shape + (1,) * (rank - 1)
scale = np.reshape(scale, newshape=shape)
weights.data = weights.data * scale if not negate else weights.data / scale
linear.copy_with(inputs=(linear.inputs[0], weights, *linear.inputs[2:]), outputs=mul.output)
|
[
"def",
"_merge_linear_mul",
"(",
"linear",
",",
"mul",
")",
":",
"variable",
"=",
"mul",
".",
"inputs",
"[",
"1",
"]",
"if",
"mul",
".",
"inputs",
"[",
"0",
"]",
"==",
"linear",
".",
"output",
"else",
"mul",
".",
"inputs",
"[",
"0",
"]",
"if",
"variable",
".",
"data",
"is",
"None",
"or",
"not",
"Optimizer",
".",
"_is_channelwise_shape",
"(",
"variable",
".",
"shape",
")",
":",
"return",
"False",
"if",
"len",
"(",
"variable",
".",
"shape",
")",
"==",
"0",
":",
"scale",
"=",
"np",
".",
"expand_dims",
"(",
"variable",
".",
"data",
",",
"axis",
"=",
"0",
")",
"elif",
"len",
"(",
"variable",
".",
"shape",
")",
">=",
"2",
":",
"scale",
"=",
"Optimizer",
".",
"_squeeze_batch_and_spatial_dims",
"(",
"variable",
".",
"data",
")",
"negate",
"=",
"mul",
".",
"type",
"==",
"'div'",
"weights",
"=",
"linear",
".",
"inputs",
"[",
"1",
"]",
"if",
"weights",
".",
"data",
"is",
"None",
":",
"return",
"False",
"if",
"len",
"(",
"linear",
".",
"inputs",
")",
">",
"2",
":",
"bias",
"=",
"linear",
".",
"inputs",
"[",
"2",
"]",
"if",
"bias",
".",
"data",
"is",
"None",
":",
"return",
"False",
"bias",
".",
"data",
"=",
"bias",
".",
"data",
"*",
"scale",
"if",
"not",
"negate",
"else",
"bias",
".",
"data",
"/",
"scale",
"bias",
".",
"shape",
"=",
"bias",
".",
"data",
".",
"shape",
"Optimizer",
".",
"_ensure_variable_producer",
"(",
"bias",
",",
"label",
"=",
"linear",
".",
"output",
".",
"name",
"+",
"'_bias'",
")",
"rank",
"=",
"len",
"(",
"weights",
".",
"shape",
")",
"shape",
"=",
"(",
"1",
",",
")",
"+",
"scale",
".",
"shape",
"+",
"(",
"1",
",",
")",
"*",
"(",
"rank",
"-",
"2",
")",
"if",
"linear",
".",
"type",
"==",
"'deconv'",
"else",
"scale",
".",
"shape",
"+",
"(",
"1",
",",
")",
"*",
"(",
"rank",
"-",
"1",
")",
"scale",
"=",
"np",
".",
"reshape",
"(",
"scale",
",",
"newshape",
"=",
"shape",
")",
"weights",
".",
"data",
"=",
"weights",
".",
"data",
"*",
"scale",
"if",
"not",
"negate",
"else",
"weights",
".",
"data",
"/",
"scale",
"linear",
".",
"copy_with",
"(",
"inputs",
"=",
"(",
"linear",
".",
"inputs",
"[",
"0",
"]",
",",
"weights",
",",
"*",
"linear",
".",
"inputs",
"[",
"2",
":",
"]",
")",
",",
"outputs",
"=",
"mul",
".",
"output",
")"
] |
https://github.com/KhronosGroup/NNEF-Tools/blob/c913758ca687dab8cb7b49e8f1556819a2d0ca25/nnef_tools/optimization/nnef_optimizer.py#L373-L405
|
||||
DamnWidget/anaconda
|
a9998fb362320f907d5ccbc6fcf5b62baca677c0
|
anaconda_lib/linting/sublime.py
|
python
|
Linter.is_that_code
|
(self, point)
|
return self.view.match_selector(point, matcher)
|
Determines if the given region is valid Python code
|
Determines if the given region is valid Python code
|
[
"Determines",
"if",
"the",
"given",
"region",
"is",
"valid",
"Python",
"code"
] |
def is_that_code(self, point):
"""Determines if the given region is valid Python code
"""
matcher = 'source.python - string - comment'
return self.view.match_selector(point, matcher)
|
[
"def",
"is_that_code",
"(",
"self",
",",
"point",
")",
":",
"matcher",
"=",
"'source.python - string - comment'",
"return",
"self",
".",
"view",
".",
"match_selector",
"(",
"point",
",",
"matcher",
")"
] |
https://github.com/DamnWidget/anaconda/blob/a9998fb362320f907d5ccbc6fcf5b62baca677c0/anaconda_lib/linting/sublime.py#L113-L118
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.