nwo
stringlengths 5
106
| sha
stringlengths 40
40
| path
stringlengths 4
174
| language
stringclasses 1
value | identifier
stringlengths 1
140
| parameters
stringlengths 0
87.7k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
426k
| docstring
stringlengths 0
64.3k
| docstring_summary
stringlengths 0
26.3k
| docstring_tokens
list | function
stringlengths 18
4.83M
| function_tokens
list | url
stringlengths 83
304
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Pagure/pagure
|
512f23f5cd1f965276969747792edeb1215cba68
|
pagure/lib/git.py
|
python
|
rebase_pull_request
|
(session, request, username)
|
return "Pull-request rebased"
|
Rebase the specified pull-request.
Args:
session (sqlalchemy): the session to connect to the database with
request (pagure.lib.model.PullRequest): the database object
corresponding to the pull-request to rebase
username (string): the name of the user asking for the pull-request
to be rebased
Returns: (string or None): Pull-request rebased
Raises: pagure.exceptions.PagureException
|
Rebase the specified pull-request.
|
[
"Rebase",
"the",
"specified",
"pull",
"-",
"request",
"."
] |
def rebase_pull_request(session, request, username):
"""Rebase the specified pull-request.
Args:
session (sqlalchemy): the session to connect to the database with
request (pagure.lib.model.PullRequest): the database object
corresponding to the pull-request to rebase
username (string): the name of the user asking for the pull-request
to be rebased
Returns: (string or None): Pull-request rebased
Raises: pagure.exceptions.PagureException
"""
_log.info("%s asked to rebase the pull-request: %s", username, request)
user = pagure.lib.query.get_user(session, username)
if request.remote:
# Get the fork
repopath = pagure.utils.get_remote_repo_path(
request.remote_git, request.branch_from
)
elif request.project_from:
# Get the fork
repopath = pagure.utils.get_repo_path(request.project_from)
else:
_log.info(
"PR is neither from a remote git repo or an existing local "
"repo, bailing"
)
return
if not request.project or not os.path.exists(
pagure.utils.get_repo_path(request.project)
):
_log.info(
"Could not find the targeted git repository for %s",
request.project.fullname,
)
raise pagure.exceptions.PagureException(
"Could not find the targeted git repository for %s"
% request.project.fullname
)
with TemporaryClone(
project=request.project,
repotype="main",
action="rebase_pr",
path=repopath,
) as tempclone:
new_repo = tempclone.repo
new_repo.checkout("refs/heads/%s" % request.branch_from)
# Add the upstream repo as remote
upstream = "%s_%s" % (request.user.user, request.uid)
upstream_path = pagure.utils.get_repo_path(request.project)
_log.info(
" Adding remote: %s pointing to: %s", upstream, upstream_path
)
remote = new_repo.create_remote(upstream, upstream_path)
# Fetch the commits
remote.fetch()
def _run_command(command):
_log.info("Running command: %s", command)
try:
out = subprocess.check_output(
command, cwd=tempclone.repopath, stderr=subprocess.STDOUT
)
_log.info(" command ran successfully")
_log.debug("Output: %s" % out)
except subprocess.CalledProcessError as err:
_log.debug(
"Rebase FAILED: {cmd} returned code {code} with the "
"following output: {output}".format(
cmd=err.cmd, code=err.returncode, output=err.output
)
)
raise pagure.exceptions.PagureException(
"Did not manage to rebase this pull-request"
)
# Configure git for that user
command = ["git", "config", "user.name", username]
_run_command(command)
command = ["git", "config", "user.email", user.default_email]
_run_command(command)
# Do the rebase
command = ["git", "pull", "--rebase", upstream, request.branch]
_run_command(command)
# Retrieve the reference of the branch we're working on
try:
branch_ref = get_branch_ref(new_repo, request.branch_from)
except pagure.exceptions.PagureException:
branch_ref = None
if not branch_ref:
_log.debug(" Target branch could not be found")
raise pagure.exceptions.BranchNotFoundException(
"Branch %s could not be found in the repo %s"
% (request.branch, request.project.fullname)
)
# Push the changes
_log.info("Pushing %s to %s", branch_ref.name, request.branch_from)
try:
if request.allow_rebase:
tempclone.push(
username,
branch_ref.name,
request.branch_from,
pull_request=request,
force=True,
internal="yes",
)
else:
tempclone.push(
username,
branch_ref.name,
request.branch_from,
pull_request=request,
force=True,
)
except subprocess.CalledProcessError as err:
_log.debug(
"Rebase FAILED: {cmd} returned code {code} with the "
"following output: {output}".format(
cmd=err.cmd, code=err.returncode, output=err.output
)
)
raise pagure.exceptions.PagureException(
"Did not manage to rebase this pull-request"
)
return "Pull-request rebased"
|
[
"def",
"rebase_pull_request",
"(",
"session",
",",
"request",
",",
"username",
")",
":",
"_log",
".",
"info",
"(",
"\"%s asked to rebase the pull-request: %s\"",
",",
"username",
",",
"request",
")",
"user",
"=",
"pagure",
".",
"lib",
".",
"query",
".",
"get_user",
"(",
"session",
",",
"username",
")",
"if",
"request",
".",
"remote",
":",
"# Get the fork",
"repopath",
"=",
"pagure",
".",
"utils",
".",
"get_remote_repo_path",
"(",
"request",
".",
"remote_git",
",",
"request",
".",
"branch_from",
")",
"elif",
"request",
".",
"project_from",
":",
"# Get the fork",
"repopath",
"=",
"pagure",
".",
"utils",
".",
"get_repo_path",
"(",
"request",
".",
"project_from",
")",
"else",
":",
"_log",
".",
"info",
"(",
"\"PR is neither from a remote git repo or an existing local \"",
"\"repo, bailing\"",
")",
"return",
"if",
"not",
"request",
".",
"project",
"or",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"pagure",
".",
"utils",
".",
"get_repo_path",
"(",
"request",
".",
"project",
")",
")",
":",
"_log",
".",
"info",
"(",
"\"Could not find the targeted git repository for %s\"",
",",
"request",
".",
"project",
".",
"fullname",
",",
")",
"raise",
"pagure",
".",
"exceptions",
".",
"PagureException",
"(",
"\"Could not find the targeted git repository for %s\"",
"%",
"request",
".",
"project",
".",
"fullname",
")",
"with",
"TemporaryClone",
"(",
"project",
"=",
"request",
".",
"project",
",",
"repotype",
"=",
"\"main\"",
",",
"action",
"=",
"\"rebase_pr\"",
",",
"path",
"=",
"repopath",
",",
")",
"as",
"tempclone",
":",
"new_repo",
"=",
"tempclone",
".",
"repo",
"new_repo",
".",
"checkout",
"(",
"\"refs/heads/%s\"",
"%",
"request",
".",
"branch_from",
")",
"# Add the upstream repo as remote",
"upstream",
"=",
"\"%s_%s\"",
"%",
"(",
"request",
".",
"user",
".",
"user",
",",
"request",
".",
"uid",
")",
"upstream_path",
"=",
"pagure",
".",
"utils",
".",
"get_repo_path",
"(",
"request",
".",
"project",
")",
"_log",
".",
"info",
"(",
"\" Adding remote: %s pointing to: %s\"",
",",
"upstream",
",",
"upstream_path",
")",
"remote",
"=",
"new_repo",
".",
"create_remote",
"(",
"upstream",
",",
"upstream_path",
")",
"# Fetch the commits",
"remote",
".",
"fetch",
"(",
")",
"def",
"_run_command",
"(",
"command",
")",
":",
"_log",
".",
"info",
"(",
"\"Running command: %s\"",
",",
"command",
")",
"try",
":",
"out",
"=",
"subprocess",
".",
"check_output",
"(",
"command",
",",
"cwd",
"=",
"tempclone",
".",
"repopath",
",",
"stderr",
"=",
"subprocess",
".",
"STDOUT",
")",
"_log",
".",
"info",
"(",
"\" command ran successfully\"",
")",
"_log",
".",
"debug",
"(",
"\"Output: %s\"",
"%",
"out",
")",
"except",
"subprocess",
".",
"CalledProcessError",
"as",
"err",
":",
"_log",
".",
"debug",
"(",
"\"Rebase FAILED: {cmd} returned code {code} with the \"",
"\"following output: {output}\"",
".",
"format",
"(",
"cmd",
"=",
"err",
".",
"cmd",
",",
"code",
"=",
"err",
".",
"returncode",
",",
"output",
"=",
"err",
".",
"output",
")",
")",
"raise",
"pagure",
".",
"exceptions",
".",
"PagureException",
"(",
"\"Did not manage to rebase this pull-request\"",
")",
"# Configure git for that user",
"command",
"=",
"[",
"\"git\"",
",",
"\"config\"",
",",
"\"user.name\"",
",",
"username",
"]",
"_run_command",
"(",
"command",
")",
"command",
"=",
"[",
"\"git\"",
",",
"\"config\"",
",",
"\"user.email\"",
",",
"user",
".",
"default_email",
"]",
"_run_command",
"(",
"command",
")",
"# Do the rebase",
"command",
"=",
"[",
"\"git\"",
",",
"\"pull\"",
",",
"\"--rebase\"",
",",
"upstream",
",",
"request",
".",
"branch",
"]",
"_run_command",
"(",
"command",
")",
"# Retrieve the reference of the branch we're working on",
"try",
":",
"branch_ref",
"=",
"get_branch_ref",
"(",
"new_repo",
",",
"request",
".",
"branch_from",
")",
"except",
"pagure",
".",
"exceptions",
".",
"PagureException",
":",
"branch_ref",
"=",
"None",
"if",
"not",
"branch_ref",
":",
"_log",
".",
"debug",
"(",
"\" Target branch could not be found\"",
")",
"raise",
"pagure",
".",
"exceptions",
".",
"BranchNotFoundException",
"(",
"\"Branch %s could not be found in the repo %s\"",
"%",
"(",
"request",
".",
"branch",
",",
"request",
".",
"project",
".",
"fullname",
")",
")",
"# Push the changes",
"_log",
".",
"info",
"(",
"\"Pushing %s to %s\"",
",",
"branch_ref",
".",
"name",
",",
"request",
".",
"branch_from",
")",
"try",
":",
"if",
"request",
".",
"allow_rebase",
":",
"tempclone",
".",
"push",
"(",
"username",
",",
"branch_ref",
".",
"name",
",",
"request",
".",
"branch_from",
",",
"pull_request",
"=",
"request",
",",
"force",
"=",
"True",
",",
"internal",
"=",
"\"yes\"",
",",
")",
"else",
":",
"tempclone",
".",
"push",
"(",
"username",
",",
"branch_ref",
".",
"name",
",",
"request",
".",
"branch_from",
",",
"pull_request",
"=",
"request",
",",
"force",
"=",
"True",
",",
")",
"except",
"subprocess",
".",
"CalledProcessError",
"as",
"err",
":",
"_log",
".",
"debug",
"(",
"\"Rebase FAILED: {cmd} returned code {code} with the \"",
"\"following output: {output}\"",
".",
"format",
"(",
"cmd",
"=",
"err",
".",
"cmd",
",",
"code",
"=",
"err",
".",
"returncode",
",",
"output",
"=",
"err",
".",
"output",
")",
")",
"raise",
"pagure",
".",
"exceptions",
".",
"PagureException",
"(",
"\"Did not manage to rebase this pull-request\"",
")",
"return",
"\"Pull-request rebased\""
] |
https://github.com/Pagure/pagure/blob/512f23f5cd1f965276969747792edeb1215cba68/pagure/lib/git.py#L1950-L2086
|
|
JacquesLucke/animation_nodes
|
b1e3ace8dcb0a771fd882fc3ac4e490b009fa0d1
|
animation_nodes/nodes/action/action_viewer.py
|
python
|
FrameRangeRectangle.__init__
|
(self, x1, y1, x2, y2, startFrame, endFrame)
|
[] |
def __init__(self, x1, y1, x2, y2, startFrame, endFrame):
assert startFrame <= endFrame
super().__init__(x1, y1, x2, y2)
self.startFrame = startFrame
self.endFrame = endFrame
|
[
"def",
"__init__",
"(",
"self",
",",
"x1",
",",
"y1",
",",
"x2",
",",
"y2",
",",
"startFrame",
",",
"endFrame",
")",
":",
"assert",
"startFrame",
"<=",
"endFrame",
"super",
"(",
")",
".",
"__init__",
"(",
"x1",
",",
"y1",
",",
"x2",
",",
"y2",
")",
"self",
".",
"startFrame",
"=",
"startFrame",
"self",
".",
"endFrame",
"=",
"endFrame"
] |
https://github.com/JacquesLucke/animation_nodes/blob/b1e3ace8dcb0a771fd882fc3ac4e490b009fa0d1/animation_nodes/nodes/action/action_viewer.py#L50-L54
|
||||
KhronosGroup/NNEF-Tools
|
c913758ca687dab8cb7b49e8f1556819a2d0ca25
|
nnef_tools/io/tf/lite/flatbuffers/ExpOptions.py
|
python
|
ExpOptions.GetRootAsExpOptions
|
(cls, buf, offset)
|
return x
|
[] |
def GetRootAsExpOptions(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = ExpOptions()
x.Init(buf, n + offset)
return x
|
[
"def",
"GetRootAsExpOptions",
"(",
"cls",
",",
"buf",
",",
"offset",
")",
":",
"n",
"=",
"flatbuffers",
".",
"encode",
".",
"Get",
"(",
"flatbuffers",
".",
"packer",
".",
"uoffset",
",",
"buf",
",",
"offset",
")",
"x",
"=",
"ExpOptions",
"(",
")",
"x",
".",
"Init",
"(",
"buf",
",",
"n",
"+",
"offset",
")",
"return",
"x"
] |
https://github.com/KhronosGroup/NNEF-Tools/blob/c913758ca687dab8cb7b49e8f1556819a2d0ca25/nnef_tools/io/tf/lite/flatbuffers/ExpOptions.py#L13-L17
|
|||
TencentCloud/tencentcloud-sdk-python
|
3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2
|
tencentcloud/live/v20180801/live_client.py
|
python
|
LiveClient.DescribeLiveSnapshotTemplate
|
(self, request)
|
获取单个截图模板。
:param request: Request instance for DescribeLiveSnapshotTemplate.
:type request: :class:`tencentcloud.live.v20180801.models.DescribeLiveSnapshotTemplateRequest`
:rtype: :class:`tencentcloud.live.v20180801.models.DescribeLiveSnapshotTemplateResponse`
|
获取单个截图模板。
|
[
"获取单个截图模板。"
] |
def DescribeLiveSnapshotTemplate(self, request):
"""获取单个截图模板。
:param request: Request instance for DescribeLiveSnapshotTemplate.
:type request: :class:`tencentcloud.live.v20180801.models.DescribeLiveSnapshotTemplateRequest`
:rtype: :class:`tencentcloud.live.v20180801.models.DescribeLiveSnapshotTemplateResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeLiveSnapshotTemplate", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeLiveSnapshotTemplateResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
|
[
"def",
"DescribeLiveSnapshotTemplate",
"(",
"self",
",",
"request",
")",
":",
"try",
":",
"params",
"=",
"request",
".",
"_serialize",
"(",
")",
"body",
"=",
"self",
".",
"call",
"(",
"\"DescribeLiveSnapshotTemplate\"",
",",
"params",
")",
"response",
"=",
"json",
".",
"loads",
"(",
"body",
")",
"if",
"\"Error\"",
"not",
"in",
"response",
"[",
"\"Response\"",
"]",
":",
"model",
"=",
"models",
".",
"DescribeLiveSnapshotTemplateResponse",
"(",
")",
"model",
".",
"_deserialize",
"(",
"response",
"[",
"\"Response\"",
"]",
")",
"return",
"model",
"else",
":",
"code",
"=",
"response",
"[",
"\"Response\"",
"]",
"[",
"\"Error\"",
"]",
"[",
"\"Code\"",
"]",
"message",
"=",
"response",
"[",
"\"Response\"",
"]",
"[",
"\"Error\"",
"]",
"[",
"\"Message\"",
"]",
"reqid",
"=",
"response",
"[",
"\"Response\"",
"]",
"[",
"\"RequestId\"",
"]",
"raise",
"TencentCloudSDKException",
"(",
"code",
",",
"message",
",",
"reqid",
")",
"except",
"Exception",
"as",
"e",
":",
"if",
"isinstance",
"(",
"e",
",",
"TencentCloudSDKException",
")",
":",
"raise",
"else",
":",
"raise",
"TencentCloudSDKException",
"(",
"e",
".",
"message",
",",
"e",
".",
"message",
")"
] |
https://github.com/TencentCloud/tencentcloud-sdk-python/blob/3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2/tencentcloud/live/v20180801/live_client.py#L1882-L1907
|
||
pandas-dev/pandas
|
5ba7d714014ae8feaccc0dd4a98890828cf2832d
|
pandas/io/parsers/readers.py
|
python
|
validate_integer
|
(name, val, min_val=0)
|
return val
|
Checks whether the 'name' parameter for parsing is either
an integer OR float that can SAFELY be cast to an integer
without losing accuracy. Raises a ValueError if that is
not the case.
Parameters
----------
name : str
Parameter name (used for error reporting)
val : int or float
The value to check
min_val : int
Minimum allowed value (val < min_val will result in a ValueError)
|
Checks whether the 'name' parameter for parsing is either
an integer OR float that can SAFELY be cast to an integer
without losing accuracy. Raises a ValueError if that is
not the case.
|
[
"Checks",
"whether",
"the",
"name",
"parameter",
"for",
"parsing",
"is",
"either",
"an",
"integer",
"OR",
"float",
"that",
"can",
"SAFELY",
"be",
"cast",
"to",
"an",
"integer",
"without",
"losing",
"accuracy",
".",
"Raises",
"a",
"ValueError",
"if",
"that",
"is",
"not",
"the",
"case",
"."
] |
def validate_integer(name, val, min_val=0):
"""
Checks whether the 'name' parameter for parsing is either
an integer OR float that can SAFELY be cast to an integer
without losing accuracy. Raises a ValueError if that is
not the case.
Parameters
----------
name : str
Parameter name (used for error reporting)
val : int or float
The value to check
min_val : int
Minimum allowed value (val < min_val will result in a ValueError)
"""
msg = f"'{name:s}' must be an integer >={min_val:d}"
if val is not None:
if is_float(val):
if int(val) != val:
raise ValueError(msg)
val = int(val)
elif not (is_integer(val) and val >= min_val):
raise ValueError(msg)
return val
|
[
"def",
"validate_integer",
"(",
"name",
",",
"val",
",",
"min_val",
"=",
"0",
")",
":",
"msg",
"=",
"f\"'{name:s}' must be an integer >={min_val:d}\"",
"if",
"val",
"is",
"not",
"None",
":",
"if",
"is_float",
"(",
"val",
")",
":",
"if",
"int",
"(",
"val",
")",
"!=",
"val",
":",
"raise",
"ValueError",
"(",
"msg",
")",
"val",
"=",
"int",
"(",
"val",
")",
"elif",
"not",
"(",
"is_integer",
"(",
"val",
")",
"and",
"val",
">=",
"min_val",
")",
":",
"raise",
"ValueError",
"(",
"msg",
")",
"return",
"val"
] |
https://github.com/pandas-dev/pandas/blob/5ba7d714014ae8feaccc0dd4a98890828cf2832d/pandas/io/parsers/readers.py#L480-L506
|
|
cloudera/hue
|
23f02102d4547c17c32bd5ea0eb24e9eadd657a4
|
desktop/core/ext-py/boto-2.46.1/boto/mws/connection.py
|
python
|
MWSConnection.list_orders
|
(self, request, response, **kw)
|
return self._post_request(request, kw, response)
|
Returns a list of orders created or updated during a time
frame that you specify.
|
Returns a list of orders created or updated during a time
frame that you specify.
|
[
"Returns",
"a",
"list",
"of",
"orders",
"created",
"or",
"updated",
"during",
"a",
"time",
"frame",
"that",
"you",
"specify",
"."
] |
def list_orders(self, request, response, **kw):
"""Returns a list of orders created or updated during a time
frame that you specify.
"""
toggle = set(('FulfillmentChannel.Channel.1',
'OrderStatus.Status.1', 'PaymentMethod.1',
'LastUpdatedAfter', 'LastUpdatedBefore'))
for do, dont in {
'BuyerEmail': toggle.union(['SellerOrderId']),
'SellerOrderId': toggle.union(['BuyerEmail']),
}.items():
if do in kw and any(i in dont for i in kw):
message = "Don't include {0} when specifying " \
"{1}".format(' or '.join(dont), do)
raise AssertionError(message)
return self._post_request(request, kw, response)
|
[
"def",
"list_orders",
"(",
"self",
",",
"request",
",",
"response",
",",
"*",
"*",
"kw",
")",
":",
"toggle",
"=",
"set",
"(",
"(",
"'FulfillmentChannel.Channel.1'",
",",
"'OrderStatus.Status.1'",
",",
"'PaymentMethod.1'",
",",
"'LastUpdatedAfter'",
",",
"'LastUpdatedBefore'",
")",
")",
"for",
"do",
",",
"dont",
"in",
"{",
"'BuyerEmail'",
":",
"toggle",
".",
"union",
"(",
"[",
"'SellerOrderId'",
"]",
")",
",",
"'SellerOrderId'",
":",
"toggle",
".",
"union",
"(",
"[",
"'BuyerEmail'",
"]",
")",
",",
"}",
".",
"items",
"(",
")",
":",
"if",
"do",
"in",
"kw",
"and",
"any",
"(",
"i",
"in",
"dont",
"for",
"i",
"in",
"kw",
")",
":",
"message",
"=",
"\"Don't include {0} when specifying \"",
"\"{1}\"",
".",
"format",
"(",
"' or '",
".",
"join",
"(",
"dont",
")",
",",
"do",
")",
"raise",
"AssertionError",
"(",
"message",
")",
"return",
"self",
".",
"_post_request",
"(",
"request",
",",
"kw",
",",
"response",
")"
] |
https://github.com/cloudera/hue/blob/23f02102d4547c17c32bd5ea0eb24e9eadd657a4/desktop/core/ext-py/boto-2.46.1/boto/mws/connection.py#L717-L732
|
|
google/timesketch
|
1ce6b60e125d104e6644947c6f1dbe1b82ac76b6
|
api_client/python/timesketch_api_client/graph.py
|
python
|
Graph.layout
|
(self)
|
return self._layout
|
Property that returns back the layout of the graph.
|
Property that returns back the layout of the graph.
|
[
"Property",
"that",
"returns",
"back",
"the",
"layout",
"of",
"the",
"graph",
"."
] |
def layout(self):
"""Property that returns back the layout of the graph."""
if self._layout:
return self._layout
layout = self._GRAPH_LAYOUTS.get('spring')
self._layout = layout(self.graph)
return self._layout
|
[
"def",
"layout",
"(",
"self",
")",
":",
"if",
"self",
".",
"_layout",
":",
"return",
"self",
".",
"_layout",
"layout",
"=",
"self",
".",
"_GRAPH_LAYOUTS",
".",
"get",
"(",
"'spring'",
")",
"self",
".",
"_layout",
"=",
"layout",
"(",
"self",
".",
"graph",
")",
"return",
"self",
".",
"_layout"
] |
https://github.com/google/timesketch/blob/1ce6b60e125d104e6644947c6f1dbe1b82ac76b6/api_client/python/timesketch_api_client/graph.py#L338-L345
|
|
jdf/processing.py
|
76e48ac855fd34169a7576a5cbc396bda698e781
|
mode/formatter/autopep8.py
|
python
|
ReformattedLines._add_item
|
(self, item, indent_amt)
|
Add an item to the line.
Reflow the line to get the best formatting after the item is
inserted. The bracket depth indicates if the item is being
inserted inside of a container or not.
|
Add an item to the line.
|
[
"Add",
"an",
"item",
"to",
"the",
"line",
"."
] |
def _add_item(self, item, indent_amt):
"""Add an item to the line.
Reflow the line to get the best formatting after the item is
inserted. The bracket depth indicates if the item is being
inserted inside of a container or not.
"""
if self._prev_item and self._prev_item.is_string and item.is_string:
# Place consecutive string literals on separate lines.
self._lines.append(self._LineBreak())
self._lines.append(self._Indent(indent_amt))
item_text = unicode(item)
if self._lines and self._bracket_depth:
# Adding the item into a container.
self._prevent_default_initializer_splitting(item, indent_amt)
if item_text in '.,)]}':
self._split_after_delimiter(item, indent_amt)
elif self._lines and not self.line_empty():
# Adding the item outside of a container.
if self.fits_on_current_line(len(item_text)):
self._enforce_space(item)
else:
# Line break for the new item.
self._lines.append(self._LineBreak())
self._lines.append(self._Indent(indent_amt))
self._lines.append(item)
self._prev_item, self._prev_prev_item = item, self._prev_item
if item_text in '([{':
self._bracket_depth += 1
elif item_text in '}])':
self._bracket_depth -= 1
assert self._bracket_depth >= 0
|
[
"def",
"_add_item",
"(",
"self",
",",
"item",
",",
"indent_amt",
")",
":",
"if",
"self",
".",
"_prev_item",
"and",
"self",
".",
"_prev_item",
".",
"is_string",
"and",
"item",
".",
"is_string",
":",
"# Place consecutive string literals on separate lines.",
"self",
".",
"_lines",
".",
"append",
"(",
"self",
".",
"_LineBreak",
"(",
")",
")",
"self",
".",
"_lines",
".",
"append",
"(",
"self",
".",
"_Indent",
"(",
"indent_amt",
")",
")",
"item_text",
"=",
"unicode",
"(",
"item",
")",
"if",
"self",
".",
"_lines",
"and",
"self",
".",
"_bracket_depth",
":",
"# Adding the item into a container.",
"self",
".",
"_prevent_default_initializer_splitting",
"(",
"item",
",",
"indent_amt",
")",
"if",
"item_text",
"in",
"'.,)]}'",
":",
"self",
".",
"_split_after_delimiter",
"(",
"item",
",",
"indent_amt",
")",
"elif",
"self",
".",
"_lines",
"and",
"not",
"self",
".",
"line_empty",
"(",
")",
":",
"# Adding the item outside of a container.",
"if",
"self",
".",
"fits_on_current_line",
"(",
"len",
"(",
"item_text",
")",
")",
":",
"self",
".",
"_enforce_space",
"(",
"item",
")",
"else",
":",
"# Line break for the new item.",
"self",
".",
"_lines",
".",
"append",
"(",
"self",
".",
"_LineBreak",
"(",
")",
")",
"self",
".",
"_lines",
".",
"append",
"(",
"self",
".",
"_Indent",
"(",
"indent_amt",
")",
")",
"self",
".",
"_lines",
".",
"append",
"(",
"item",
")",
"self",
".",
"_prev_item",
",",
"self",
".",
"_prev_prev_item",
"=",
"item",
",",
"self",
".",
"_prev_item",
"if",
"item_text",
"in",
"'([{'",
":",
"self",
".",
"_bracket_depth",
"+=",
"1",
"elif",
"item_text",
"in",
"'}])'",
":",
"self",
".",
"_bracket_depth",
"-=",
"1",
"assert",
"self",
".",
"_bracket_depth",
">=",
"0"
] |
https://github.com/jdf/processing.py/blob/76e48ac855fd34169a7576a5cbc396bda698e781/mode/formatter/autopep8.py#L1556-L1595
|
||
Epistimio/orion
|
732e739d99561020dbe620760acf062ade746006
|
src/orion/core/worker/transformer.py
|
python
|
Compose.target_type
|
(self)
|
return type_after if type_after else type_before
|
Infer type of the tranformation target.
|
Infer type of the tranformation target.
|
[
"Infer",
"type",
"of",
"the",
"tranformation",
"target",
"."
] |
def target_type(self):
"""Infer type of the tranformation target."""
type_before = self.composition.target_type
type_after = self.apply.target_type
return type_after if type_after else type_before
|
[
"def",
"target_type",
"(",
"self",
")",
":",
"type_before",
"=",
"self",
".",
"composition",
".",
"target_type",
"type_after",
"=",
"self",
".",
"apply",
".",
"target_type",
"return",
"type_after",
"if",
"type_after",
"else",
"type_before"
] |
https://github.com/Epistimio/orion/blob/732e739d99561020dbe620760acf062ade746006/src/orion/core/worker/transformer.py#L316-L320
|
|
WerWolv/EdiZon_CheatsConfigsAndScripts
|
d16d36c7509c01dca770f402babd83ff2e9ae6e7
|
Scripts/lib/python3.5/asyncio/base_events.py
|
python
|
BaseEventLoop._process_events
|
(self, event_list)
|
Process selector events.
|
Process selector events.
|
[
"Process",
"selector",
"events",
"."
] |
def _process_events(self, event_list):
"""Process selector events."""
raise NotImplementedError
|
[
"def",
"_process_events",
"(",
"self",
",",
"event_list",
")",
":",
"raise",
"NotImplementedError"
] |
https://github.com/WerWolv/EdiZon_CheatsConfigsAndScripts/blob/d16d36c7509c01dca770f402babd83ff2e9ae6e7/Scripts/lib/python3.5/asyncio/base_events.py#L351-L353
|
||
AppScale/gts
|
46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9
|
AppServer/lib/django-1.4/django/template/defaultfilters.py
|
python
|
date
|
(value, arg=None)
|
Formats a date according to the given format.
|
Formats a date according to the given format.
|
[
"Formats",
"a",
"date",
"according",
"to",
"the",
"given",
"format",
"."
] |
def date(value, arg=None):
"""Formats a date according to the given format."""
if not value:
return u''
if arg is None:
arg = settings.DATE_FORMAT
try:
return formats.date_format(value, arg)
except AttributeError:
try:
return format(value, arg)
except AttributeError:
return ''
|
[
"def",
"date",
"(",
"value",
",",
"arg",
"=",
"None",
")",
":",
"if",
"not",
"value",
":",
"return",
"u''",
"if",
"arg",
"is",
"None",
":",
"arg",
"=",
"settings",
".",
"DATE_FORMAT",
"try",
":",
"return",
"formats",
".",
"date_format",
"(",
"value",
",",
"arg",
")",
"except",
"AttributeError",
":",
"try",
":",
"return",
"format",
"(",
"value",
",",
"arg",
")",
"except",
"AttributeError",
":",
"return",
"''"
] |
https://github.com/AppScale/gts/blob/46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9/AppServer/lib/django-1.4/django/template/defaultfilters.py#L708-L720
|
||
spack/spack
|
675210bd8bd1c5d32ad1cc83d898fb43b569ed74
|
lib/spack/external/jinja2/nodes.py
|
python
|
Node.iter_child_nodes
|
(self, exclude=None, only=None)
|
Iterates over all direct child nodes of the node. This iterates
over all fields and yields the values of they are nodes. If the value
of a field is a list all the nodes in that list are returned.
|
Iterates over all direct child nodes of the node. This iterates
over all fields and yields the values of they are nodes. If the value
of a field is a list all the nodes in that list are returned.
|
[
"Iterates",
"over",
"all",
"direct",
"child",
"nodes",
"of",
"the",
"node",
".",
"This",
"iterates",
"over",
"all",
"fields",
"and",
"yields",
"the",
"values",
"of",
"they",
"are",
"nodes",
".",
"If",
"the",
"value",
"of",
"a",
"field",
"is",
"a",
"list",
"all",
"the",
"nodes",
"in",
"that",
"list",
"are",
"returned",
"."
] |
def iter_child_nodes(self, exclude=None, only=None):
"""Iterates over all direct child nodes of the node. This iterates
over all fields and yields the values of they are nodes. If the value
of a field is a list all the nodes in that list are returned.
"""
for _, item in self.iter_fields(exclude, only):
if isinstance(item, list):
for n in item:
if isinstance(n, Node):
yield n
elif isinstance(item, Node):
yield item
|
[
"def",
"iter_child_nodes",
"(",
"self",
",",
"exclude",
"=",
"None",
",",
"only",
"=",
"None",
")",
":",
"for",
"_",
",",
"item",
"in",
"self",
".",
"iter_fields",
"(",
"exclude",
",",
"only",
")",
":",
"if",
"isinstance",
"(",
"item",
",",
"list",
")",
":",
"for",
"n",
"in",
"item",
":",
"if",
"isinstance",
"(",
"n",
",",
"Node",
")",
":",
"yield",
"n",
"elif",
"isinstance",
"(",
"item",
",",
"Node",
")",
":",
"yield",
"item"
] |
https://github.com/spack/spack/blob/675210bd8bd1c5d32ad1cc83d898fb43b569ed74/lib/spack/external/jinja2/nodes.py#L155-L166
|
||
cea-hpc/clustershell
|
c421133ed4baa69e35ff76c476d4097201485344
|
lib/ClusterShell/NodeSet.py
|
python
|
NodeSetBase.__iter__
|
(self)
|
Iterator on single nodes as string.
|
Iterator on single nodes as string.
|
[
"Iterator",
"on",
"single",
"nodes",
"as",
"string",
"."
] |
def __iter__(self):
"""Iterator on single nodes as string."""
# Does not call self._iterbase() + str() for better performance.
for pat, ivec, pads, _ in self._iter():
if ivec is not None:
# For performance reasons, add a special case for 1D RangeSet
if len(ivec) == 1:
yield pat % ("%0*d" % (pads[0] or 0, ivec[0]))
else:
yield pat % tuple(["%0*d" % (pad or 0, i) \
for pad, i in zip(pads, ivec)])
else:
yield pat % ()
|
[
"def",
"__iter__",
"(",
"self",
")",
":",
"# Does not call self._iterbase() + str() for better performance.",
"for",
"pat",
",",
"ivec",
",",
"pads",
",",
"_",
"in",
"self",
".",
"_iter",
"(",
")",
":",
"if",
"ivec",
"is",
"not",
"None",
":",
"# For performance reasons, add a special case for 1D RangeSet",
"if",
"len",
"(",
"ivec",
")",
"==",
"1",
":",
"yield",
"pat",
"%",
"(",
"\"%0*d\"",
"%",
"(",
"pads",
"[",
"0",
"]",
"or",
"0",
",",
"ivec",
"[",
"0",
"]",
")",
")",
"else",
":",
"yield",
"pat",
"%",
"tuple",
"(",
"[",
"\"%0*d\"",
"%",
"(",
"pad",
"or",
"0",
",",
"i",
")",
"for",
"pad",
",",
"i",
"in",
"zip",
"(",
"pads",
",",
"ivec",
")",
"]",
")",
"else",
":",
"yield",
"pat",
"%",
"(",
")"
] |
https://github.com/cea-hpc/clustershell/blob/c421133ed4baa69e35ff76c476d4097201485344/lib/ClusterShell/NodeSet.py#L195-L207
|
||
dropbox/dropbox-sdk-python
|
015437429be224732990041164a21a0501235db1
|
dropbox/team_log.py
|
python
|
EventDetails.is_paper_doc_delete_comment_details
|
(self)
|
return self._tag == 'paper_doc_delete_comment_details'
|
Check if the union tag is ``paper_doc_delete_comment_details``.
:rtype: bool
|
Check if the union tag is ``paper_doc_delete_comment_details``.
|
[
"Check",
"if",
"the",
"union",
"tag",
"is",
"paper_doc_delete_comment_details",
"."
] |
def is_paper_doc_delete_comment_details(self):
"""
Check if the union tag is ``paper_doc_delete_comment_details``.
:rtype: bool
"""
return self._tag == 'paper_doc_delete_comment_details'
|
[
"def",
"is_paper_doc_delete_comment_details",
"(",
"self",
")",
":",
"return",
"self",
".",
"_tag",
"==",
"'paper_doc_delete_comment_details'"
] |
https://github.com/dropbox/dropbox-sdk-python/blob/015437429be224732990041164a21a0501235db1/dropbox/team_log.py#L14799-L14805
|
|
micahflee/torbrowser-launcher
|
4b9d49c18315476b1a95878c2d84e8e8299db6c9
|
torbrowser_launcher/settings.py
|
python
|
Settings.install
|
(self)
|
[] |
def install(self):
self.save()
subprocess.Popen([self.common.paths["tbl_bin"]])
self.close()
|
[
"def",
"install",
"(",
"self",
")",
":",
"self",
".",
"save",
"(",
")",
"subprocess",
".",
"Popen",
"(",
"[",
"self",
".",
"common",
".",
"paths",
"[",
"\"tbl_bin\"",
"]",
"]",
")",
"self",
".",
"close",
"(",
")"
] |
https://github.com/micahflee/torbrowser-launcher/blob/4b9d49c18315476b1a95878c2d84e8e8299db6c9/torbrowser_launcher/settings.py#L169-L172
|
||||
albertz/music-player
|
d23586f5bf657cbaea8147223be7814d117ae73d
|
src/modules/songdb.py
|
python
|
DB._removeOldDb
|
(self)
|
[] |
def _removeOldDb(self):
# Maybe we really should do some backuping...?
self.disconnectAll()
import shutil, os
shutil.rmtree(self.path, ignore_errors=True)
try: os.remove(self.path)
except OSError: pass
|
[
"def",
"_removeOldDb",
"(",
"self",
")",
":",
"# Maybe we really should do some backuping...?",
"self",
".",
"disconnectAll",
"(",
")",
"import",
"shutil",
",",
"os",
"shutil",
".",
"rmtree",
"(",
"self",
".",
"path",
",",
"ignore_errors",
"=",
"True",
")",
"try",
":",
"os",
".",
"remove",
"(",
"self",
".",
"path",
")",
"except",
"OSError",
":",
"pass"
] |
https://github.com/albertz/music-player/blob/d23586f5bf657cbaea8147223be7814d117ae73d/src/modules/songdb.py#L312-L318
|
||||
bikalims/bika.lims
|
35e4bbdb5a3912cae0b5eb13e51097c8b0486349
|
bika/lims/jsonapi/request.py
|
python
|
get_json
|
()
|
return data or dict()
|
get the request json payload
|
get the request json payload
|
[
"get",
"the",
"request",
"json",
"payload"
] |
def get_json():
""" get the request json payload
"""
data = get_request_data().pop()
return data or dict()
|
[
"def",
"get_json",
"(",
")",
":",
"data",
"=",
"get_request_data",
"(",
")",
".",
"pop",
"(",
")",
"return",
"data",
"or",
"dict",
"(",
")"
] |
https://github.com/bikalims/bika.lims/blob/35e4bbdb5a3912cae0b5eb13e51097c8b0486349/bika/lims/jsonapi/request.py#L206-L210
|
|
JiYou/openstack
|
8607dd488bde0905044b303eb6e52bdea6806923
|
packages/source/cinder/cinder/volume/drivers/netapp/iscsi.py
|
python
|
NetAppISCSIDriver._check_flags
|
(self)
|
Ensure that the flags we care about are set.
|
Ensure that the flags we care about are set.
|
[
"Ensure",
"that",
"the",
"flags",
"we",
"care",
"about",
"are",
"set",
"."
] |
def _check_flags(self):
"""Ensure that the flags we care about are set."""
required_flags = ['netapp_wsdl_url', 'netapp_login', 'netapp_password',
'netapp_server_hostname', 'netapp_server_port']
for flag in required_flags:
if not getattr(self.configuration, flag, None):
raise exception.InvalidInput(reason=_('%s is not set') % flag)
if not (self.configuration.netapp_storage_service or
self.configuration.netapp_storage_service_prefix):
raise exception.InvalidInput(
reason=_('Either '
'netapp_storage_service or '
'netapp_storage_service_prefix must '
'be set'))
|
[
"def",
"_check_flags",
"(",
"self",
")",
":",
"required_flags",
"=",
"[",
"'netapp_wsdl_url'",
",",
"'netapp_login'",
",",
"'netapp_password'",
",",
"'netapp_server_hostname'",
",",
"'netapp_server_port'",
"]",
"for",
"flag",
"in",
"required_flags",
":",
"if",
"not",
"getattr",
"(",
"self",
".",
"configuration",
",",
"flag",
",",
"None",
")",
":",
"raise",
"exception",
".",
"InvalidInput",
"(",
"reason",
"=",
"_",
"(",
"'%s is not set'",
")",
"%",
"flag",
")",
"if",
"not",
"(",
"self",
".",
"configuration",
".",
"netapp_storage_service",
"or",
"self",
".",
"configuration",
".",
"netapp_storage_service_prefix",
")",
":",
"raise",
"exception",
".",
"InvalidInput",
"(",
"reason",
"=",
"_",
"(",
"'Either '",
"'netapp_storage_service or '",
"'netapp_storage_service_prefix must '",
"'be set'",
")",
")"
] |
https://github.com/JiYou/openstack/blob/8607dd488bde0905044b303eb6e52bdea6806923/packages/source/cinder/cinder/volume/drivers/netapp/iscsi.py#L163-L176
|
||
Arelle/Arelle
|
20f3d8a8afd41668e1520799acd333349ce0ba17
|
arelle/TkTableWrapper.py
|
python
|
Table.clear_all
|
(self, first=None, last=None)
|
Perform all of the above clear functions on the specified area.
|
Perform all of the above clear functions on the specified area.
|
[
"Perform",
"all",
"of",
"the",
"above",
"clear",
"functions",
"on",
"the",
"specified",
"area",
"."
] |
def clear_all(self, first=None, last=None):
"""Perform all of the above clear functions on the specified area."""
self.clear('all', first, last)
|
[
"def",
"clear_all",
"(",
"self",
",",
"first",
"=",
"None",
",",
"last",
"=",
"None",
")",
":",
"self",
".",
"clear",
"(",
"'all'",
",",
"first",
",",
"last",
")"
] |
https://github.com/Arelle/Arelle/blob/20f3d8a8afd41668e1520799acd333349ce0ba17/arelle/TkTableWrapper.py#L212-L214
|
||
trailofbits/protofuzz
|
acc6ab67b4af5d569f250b4f13f5e67d04b78ba3
|
protofuzz/gen.py
|
python
|
Permuter.make_dependent
|
(self, source, target, action)
|
Create a dependency between path 'source' and path 'target' via the callable 'action'.
>>> permuter._generators
[IterValueGenerator(one), IterValueGenerator(two)]
>>> permuter.make_dependent('one', 'two', lambda x: x + 1)
Going forward, 'two' will only contain values that are (one+1).
|
Create a dependency between path 'source' and path 'target' via the callable 'action'.
|
[
"Create",
"a",
"dependency",
"between",
"path",
"source",
"and",
"path",
"target",
"via",
"the",
"callable",
"action",
"."
] |
def make_dependent(self, source, target, action):
"""Create a dependency between path 'source' and path 'target' via the callable 'action'.
>>> permuter._generators
[IterValueGenerator(one), IterValueGenerator(two)]
>>> permuter.make_dependent('one', 'two', lambda x: x + 1)
Going forward, 'two' will only contain values that are (one+1).
"""
if not self._generators:
return
src_permuter, src = self._resolve_child(source)
dest = self._resolve_child(target)[1]
# pylint: disable=protected-access
container = src_permuter._generators
idx = container.index(src)
container[idx] = DependentValueGenerator(src.name(), dest, action)
self._update_independent_generators()
|
[
"def",
"make_dependent",
"(",
"self",
",",
"source",
",",
"target",
",",
"action",
")",
":",
"if",
"not",
"self",
".",
"_generators",
":",
"return",
"src_permuter",
",",
"src",
"=",
"self",
".",
"_resolve_child",
"(",
"source",
")",
"dest",
"=",
"self",
".",
"_resolve_child",
"(",
"target",
")",
"[",
"1",
"]",
"# pylint: disable=protected-access",
"container",
"=",
"src_permuter",
".",
"_generators",
"idx",
"=",
"container",
".",
"index",
"(",
"src",
")",
"container",
"[",
"idx",
"]",
"=",
"DependentValueGenerator",
"(",
"src",
".",
"name",
"(",
")",
",",
"dest",
",",
"action",
")",
"self",
".",
"_update_independent_generators",
"(",
")"
] |
https://github.com/trailofbits/protofuzz/blob/acc6ab67b4af5d569f250b4f13f5e67d04b78ba3/protofuzz/gen.py#L126-L147
|
||
spesmilo/electrum
|
bdbd59300fbd35b01605e66145458e5f396108e8
|
electrum/channel_db.py
|
python
|
NodeInfo.parse_addresses_field
|
(addresses_field)
|
return addresses
|
[] |
def parse_addresses_field(addresses_field):
buf = addresses_field
def read(n):
nonlocal buf
data, buf = buf[0:n], buf[n:]
return data
addresses = []
while buf:
atype = ord(read(1))
if atype == 0:
pass
elif atype == 1: # IPv4
ipv4_addr = '.'.join(map(lambda x: '%d' % x, read(4)))
port = int.from_bytes(read(2), 'big')
if is_ip_address(ipv4_addr) and port != 0:
addresses.append((ipv4_addr, port))
elif atype == 2: # IPv6
ipv6_addr = b':'.join([binascii.hexlify(read(2)) for i in range(8)])
ipv6_addr = ipv6_addr.decode('ascii')
port = int.from_bytes(read(2), 'big')
if is_ip_address(ipv6_addr) and port != 0:
addresses.append((ipv6_addr, port))
elif atype == 3: # onion v2
host = base64.b32encode(read(10)) + b'.onion'
host = host.decode('ascii').lower()
port = int.from_bytes(read(2), 'big')
addresses.append((host, port))
elif atype == 4: # onion v3
host = base64.b32encode(read(35)) + b'.onion'
host = host.decode('ascii').lower()
port = int.from_bytes(read(2), 'big')
addresses.append((host, port))
else:
# unknown address type
# we don't know how long it is -> have to escape
# if there are other addresses we could have parsed later, they are lost.
break
return addresses
|
[
"def",
"parse_addresses_field",
"(",
"addresses_field",
")",
":",
"buf",
"=",
"addresses_field",
"def",
"read",
"(",
"n",
")",
":",
"nonlocal",
"buf",
"data",
",",
"buf",
"=",
"buf",
"[",
"0",
":",
"n",
"]",
",",
"buf",
"[",
"n",
":",
"]",
"return",
"data",
"addresses",
"=",
"[",
"]",
"while",
"buf",
":",
"atype",
"=",
"ord",
"(",
"read",
"(",
"1",
")",
")",
"if",
"atype",
"==",
"0",
":",
"pass",
"elif",
"atype",
"==",
"1",
":",
"# IPv4",
"ipv4_addr",
"=",
"'.'",
".",
"join",
"(",
"map",
"(",
"lambda",
"x",
":",
"'%d'",
"%",
"x",
",",
"read",
"(",
"4",
")",
")",
")",
"port",
"=",
"int",
".",
"from_bytes",
"(",
"read",
"(",
"2",
")",
",",
"'big'",
")",
"if",
"is_ip_address",
"(",
"ipv4_addr",
")",
"and",
"port",
"!=",
"0",
":",
"addresses",
".",
"append",
"(",
"(",
"ipv4_addr",
",",
"port",
")",
")",
"elif",
"atype",
"==",
"2",
":",
"# IPv6",
"ipv6_addr",
"=",
"b':'",
".",
"join",
"(",
"[",
"binascii",
".",
"hexlify",
"(",
"read",
"(",
"2",
")",
")",
"for",
"i",
"in",
"range",
"(",
"8",
")",
"]",
")",
"ipv6_addr",
"=",
"ipv6_addr",
".",
"decode",
"(",
"'ascii'",
")",
"port",
"=",
"int",
".",
"from_bytes",
"(",
"read",
"(",
"2",
")",
",",
"'big'",
")",
"if",
"is_ip_address",
"(",
"ipv6_addr",
")",
"and",
"port",
"!=",
"0",
":",
"addresses",
".",
"append",
"(",
"(",
"ipv6_addr",
",",
"port",
")",
")",
"elif",
"atype",
"==",
"3",
":",
"# onion v2",
"host",
"=",
"base64",
".",
"b32encode",
"(",
"read",
"(",
"10",
")",
")",
"+",
"b'.onion'",
"host",
"=",
"host",
".",
"decode",
"(",
"'ascii'",
")",
".",
"lower",
"(",
")",
"port",
"=",
"int",
".",
"from_bytes",
"(",
"read",
"(",
"2",
")",
",",
"'big'",
")",
"addresses",
".",
"append",
"(",
"(",
"host",
",",
"port",
")",
")",
"elif",
"atype",
"==",
"4",
":",
"# onion v3",
"host",
"=",
"base64",
".",
"b32encode",
"(",
"read",
"(",
"35",
")",
")",
"+",
"b'.onion'",
"host",
"=",
"host",
".",
"decode",
"(",
"'ascii'",
")",
".",
"lower",
"(",
")",
"port",
"=",
"int",
".",
"from_bytes",
"(",
"read",
"(",
"2",
")",
",",
"'big'",
")",
"addresses",
".",
"append",
"(",
"(",
"host",
",",
"port",
")",
")",
"else",
":",
"# unknown address type",
"# we don't know how long it is -> have to escape",
"# if there are other addresses we could have parsed later, they are lost.",
"break",
"return",
"addresses"
] |
https://github.com/spesmilo/electrum/blob/bdbd59300fbd35b01605e66145458e5f396108e8/electrum/channel_db.py#L188-L225
|
|||
pymedusa/Medusa
|
1405fbb6eb8ef4d20fcca24c32ddca52b11f0f38
|
medusa/tv/episode.py
|
python
|
Episode.full_path
|
(self)
|
Return episode full path.
:return:
:rtype: str
|
Return episode full path.
|
[
"Return",
"episode",
"full",
"path",
"."
] |
def full_path(self):
"""Return episode full path.
:return:
:rtype: str
"""
if self.location is None or self.location == '':
return None
else:
return os.path.join(self.series.location, self.location)
|
[
"def",
"full_path",
"(",
"self",
")",
":",
"if",
"self",
".",
"location",
"is",
"None",
"or",
"self",
".",
"location",
"==",
"''",
":",
"return",
"None",
"else",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"series",
".",
"location",
",",
"self",
".",
"location",
")"
] |
https://github.com/pymedusa/Medusa/blob/1405fbb6eb8ef4d20fcca24c32ddca52b11f0f38/medusa/tv/episode.py#L1407-L1416
|
||
shiyanhui/FileHeader
|
f347cc134021fb0b710694b71c57742476f5fd2b
|
jinja2/filters.py
|
python
|
environmentfilter
|
(f)
|
return f
|
Decorator for marking evironment dependent filters. The current
:class:`Environment` is passed to the filter as first argument.
|
Decorator for marking evironment dependent filters. The current
:class:`Environment` is passed to the filter as first argument.
|
[
"Decorator",
"for",
"marking",
"evironment",
"dependent",
"filters",
".",
"The",
"current",
":",
"class",
":",
"Environment",
"is",
"passed",
"to",
"the",
"filter",
"as",
"first",
"argument",
"."
] |
def environmentfilter(f):
"""Decorator for marking evironment dependent filters. The current
:class:`Environment` is passed to the filter as first argument.
"""
f.environmentfilter = True
return f
|
[
"def",
"environmentfilter",
"(",
"f",
")",
":",
"f",
".",
"environmentfilter",
"=",
"True",
"return",
"f"
] |
https://github.com/shiyanhui/FileHeader/blob/f347cc134021fb0b710694b71c57742476f5fd2b/jinja2/filters.py#L46-L51
|
|
zhl2008/awd-platform
|
0416b31abea29743387b10b3914581fbe8e7da5e
|
web_hxb2/lib/python3.5/site-packages/django/contrib/admin/helpers.py
|
python
|
InlineAdminForm.fk_field
|
(self)
|
[] |
def fk_field(self):
fk = getattr(self.formset, "fk", None)
if fk:
return AdminField(self.form, fk.name, False)
else:
return ""
|
[
"def",
"fk_field",
"(",
"self",
")",
":",
"fk",
"=",
"getattr",
"(",
"self",
".",
"formset",
",",
"\"fk\"",
",",
"None",
")",
"if",
"fk",
":",
"return",
"AdminField",
"(",
"self",
".",
"form",
",",
"fk",
".",
"name",
",",
"False",
")",
"else",
":",
"return",
"\"\""
] |
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_hxb2/lib/python3.5/site-packages/django/contrib/admin/helpers.py#L348-L353
|
||||
mchristopher/PokemonGo-DesktopMap
|
ec37575f2776ee7d64456e2a1f6b6b78830b4fe0
|
app/pywin/Lib/telnetlib.py
|
python
|
Telnet.set_debuglevel
|
(self, debuglevel)
|
Set the debug level.
The higher it is, the more debug output you get (on sys.stdout).
|
Set the debug level.
|
[
"Set",
"the",
"debug",
"level",
"."
] |
def set_debuglevel(self, debuglevel):
"""Set the debug level.
The higher it is, the more debug output you get (on sys.stdout).
"""
self.debuglevel = debuglevel
|
[
"def",
"set_debuglevel",
"(",
"self",
",",
"debuglevel",
")",
":",
"self",
".",
"debuglevel",
"=",
"debuglevel"
] |
https://github.com/mchristopher/PokemonGo-DesktopMap/blob/ec37575f2776ee7d64456e2a1f6b6b78830b4fe0/app/pywin/Lib/telnetlib.py#L247-L253
|
||
cortex-lab/phy
|
9a330b9437a3d0b40a37a201d147224e6e7fb462
|
phy/gui/widgets.py
|
python
|
Table.get_previous_id
|
(self, callback=None)
|
Get the previous non-skipped row id.
|
Get the previous non-skipped row id.
|
[
"Get",
"the",
"previous",
"non",
"-",
"skipped",
"row",
"id",
"."
] |
def get_previous_id(self, callback=None):
"""Get the previous non-skipped row id."""
self.eval_js('table.getSiblingId(undefined, "previous");', callback=callback)
|
[
"def",
"get_previous_id",
"(",
"self",
",",
"callback",
"=",
"None",
")",
":",
"self",
".",
"eval_js",
"(",
"'table.getSiblingId(undefined, \"previous\");'",
",",
"callback",
"=",
"callback",
")"
] |
https://github.com/cortex-lab/phy/blob/9a330b9437a3d0b40a37a201d147224e6e7fb462/phy/gui/widgets.py#L470-L472
|
||
HariSekhon/Nagios-Plugins
|
a436fc63e10ab8a64d623df109777dea2eda5758
|
older/check_sftp.py
|
python
|
which
|
(executable)
|
return None
|
takes an executable name as the only arg and tests if it is in the path.
Returns the full path of the executable if it exists in path, or None if it
does not
|
takes an executable name as the only arg and tests if it is in the path.
Returns the full path of the executable if it exists in path, or None if it
does not
|
[
"takes",
"an",
"executable",
"name",
"as",
"the",
"only",
"arg",
"and",
"tests",
"if",
"it",
"is",
"in",
"the",
"path",
".",
"Returns",
"the",
"full",
"path",
"of",
"the",
"executable",
"if",
"it",
"exists",
"in",
"path",
"or",
"None",
"if",
"it",
"does",
"not"
] |
def which(executable):
"""takes an executable name as the only arg and tests if it is in the path.
Returns the full path of the executable if it exists in path, or None if it
does not"""
for basepath in os.environ['PATH'].split(os.pathsep):
path = os.path.join(basepath, executable)
if os.path.isfile(path):
return path
return None
|
[
"def",
"which",
"(",
"executable",
")",
":",
"for",
"basepath",
"in",
"os",
".",
"environ",
"[",
"'PATH'",
"]",
".",
"split",
"(",
"os",
".",
"pathsep",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"basepath",
",",
"executable",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
":",
"return",
"path",
"return",
"None"
] |
https://github.com/HariSekhon/Nagios-Plugins/blob/a436fc63e10ab8a64d623df109777dea2eda5758/older/check_sftp.py#L49-L58
|
|
cclib/cclib
|
81cd4a81cc4a3bbed7016b3e417ca9bff8ad3a92
|
cclib/parser/turbomoleparser.py
|
python
|
Turbomole.parse_dscf_orbitals
|
(self, inputfile, line)
|
return orbitals, line
|
Extract orbital occupation and energies from a dscf logfile.
Returns
-------
tuple
a two membered tuple where the first element is a list of dictionaries of the the orbitals parsed, while the second is the line on which parsing should continue.
|
Extract orbital occupation and energies from a dscf logfile.
Returns
-------
tuple
a two membered tuple where the first element is a list of dictionaries of the the orbitals parsed, while the second is the line on which parsing should continue.
|
[
"Extract",
"orbital",
"occupation",
"and",
"energies",
"from",
"a",
"dscf",
"logfile",
".",
"Returns",
"-------",
"tuple",
"a",
"two",
"membered",
"tuple",
"where",
"the",
"first",
"element",
"is",
"a",
"list",
"of",
"dictionaries",
"of",
"the",
"the",
"orbitals",
"parsed",
"while",
"the",
"second",
"is",
"the",
"line",
"on",
"which",
"parsing",
"should",
"continue",
"."
] |
def parse_dscf_orbitals(self, inputfile, line):
"""
Extract orbital occupation and energies from a dscf logfile.
Returns
-------
tuple
a two membered tuple where the first element is a list of dictionaries of the the orbitals parsed, while the second is the line on which parsing should continue.
"""
## Orbital occupation info from dscf.
# orbitals $scfmo will be written to file mos
#
# irrep 1a 2a 3a 4a 5a
# eigenvalues H -20.25992 -1.24314 -0.57053 -0.46144 -0.39295
# eV -551.3047 -33.8279 -15.5250 -12.5564 -10.6929
# occupation 2.0000 2.0000 2.0000 2.0000 2.0000
# ...
# irrep 6a 7a
# eigenvalues H 0.55091 0.64409
# eV 14.9910 17.5268
## Or
# orbitals $uhfmo_beta will be written to file beta
#
# orbitals $uhfmo_alpha will be written to file alpha
#
# alpha:
#
# irrep 31a 32a 33a 34a 35a
# eigenvalues H -0.47570 -0.46573 -0.40741 -0.39213 -0.35411
# eV -12.9446 -12.6733 -11.0862 -10.6705 -9.6358
# occupation 1.0000 1.0000 1.0000 1.0000 1.0000
# ...
# irrep 36a 37a 38a 39a 40a
# eigenvalues H -0.18634 -0.10035 -0.09666 -0.02740 0.06072
# eV -5.0705 -2.7306 -2.6303 -0.7455 1.6522
#
# beta:
#
# irrep 30a 31a 32a 33a 34a
# eigenvalues H -0.49118 -0.47348 -0.44470 -0.39020 -0.37919
# eV -13.3658 -12.8842 -12.1009 -10.6181 -10.3184
# occupation 1.0000 1.0000 1.0000 1.0000 1.0000
# ...
# irrep 35a 36a 37a 38a 39a
# eigenvalues H -0.28091 -0.15088 -0.09343 -0.07531 -0.00688
# eV -7.6440 -4.1058 -2.5424 -2.0493 -0.1873
# Skip blank line.
line = next(inputfile)
orbitals = []
while True:
irreps = []
energies_hartree = []
energies_eV = []
occupations = []
# MO index
line = next(inputfile)
# Check we're still in the right section.
if "irrep" not in line:
# All done.
break
else:
# Turbomole lists orbitals of different symmetry separately.
irreps = line.split()[1:]
# Energy in H.
line = next(inputfile)
energies_hartree = [float(energy) for energy in line.split()[2:]]
# Energy in eV.
line = next(inputfile)
energies_eV = [float(energy) for energy in line.split()[1:]]
# Occupation.
# This line will be missing if the orbitals are virtual (unoccupied).
line = next(inputfile)
if "occupation" in line:
occupations = [float(occupation) for occupation in line.split()[1:]]
line = next(inputfile)
# If we have any missing occupations, fill with 0
occupations.extend([0.0] * (len(irreps) - len(occupations)))
# Add to list.
orbitals.extend([
{'irrep': irrep, 'energy_H': energy_H, 'energy_eV': energy_eV, 'occupancy': occupation}
for irrep, energy_H, energy_eV, occupation
in zip(irreps, energies_hartree, energies_eV, occupations)
])
return orbitals, line
|
[
"def",
"parse_dscf_orbitals",
"(",
"self",
",",
"inputfile",
",",
"line",
")",
":",
"## Orbital occupation info from dscf.",
"# orbitals $scfmo will be written to file mos",
"# ",
"# irrep 1a 2a 3a 4a 5a ",
"# eigenvalues H -20.25992 -1.24314 -0.57053 -0.46144 -0.39295",
"# eV -551.3047 -33.8279 -15.5250 -12.5564 -10.6929",
"# occupation 2.0000 2.0000 2.0000 2.0000 2.0000",
"# ...",
"# irrep 6a 7a ",
"# eigenvalues H 0.55091 0.64409",
"# eV 14.9910 17.5268",
"## Or",
"# orbitals $uhfmo_beta will be written to file beta",
"# ",
"# orbitals $uhfmo_alpha will be written to file alpha",
"# ",
"# alpha: ",
"# ",
"# irrep 31a 32a 33a 34a 35a ",
"# eigenvalues H -0.47570 -0.46573 -0.40741 -0.39213 -0.35411",
"# eV -12.9446 -12.6733 -11.0862 -10.6705 -9.6358",
"# occupation 1.0000 1.0000 1.0000 1.0000 1.0000 ",
"# ...",
"# irrep 36a 37a 38a 39a 40a ",
"# eigenvalues H -0.18634 -0.10035 -0.09666 -0.02740 0.06072",
"# eV -5.0705 -2.7306 -2.6303 -0.7455 1.6522",
"# ",
"# beta: ",
"# ",
"# irrep 30a 31a 32a 33a 34a ",
"# eigenvalues H -0.49118 -0.47348 -0.44470 -0.39020 -0.37919",
"# eV -13.3658 -12.8842 -12.1009 -10.6181 -10.3184",
"# occupation 1.0000 1.0000 1.0000 1.0000 1.0000 ",
"# ...",
"# irrep 35a 36a 37a 38a 39a ",
"# eigenvalues H -0.28091 -0.15088 -0.09343 -0.07531 -0.00688",
"# eV -7.6440 -4.1058 -2.5424 -2.0493 -0.1873",
"# Skip blank line.",
"line",
"=",
"next",
"(",
"inputfile",
")",
"orbitals",
"=",
"[",
"]",
"while",
"True",
":",
"irreps",
"=",
"[",
"]",
"energies_hartree",
"=",
"[",
"]",
"energies_eV",
"=",
"[",
"]",
"occupations",
"=",
"[",
"]",
"# MO index",
"line",
"=",
"next",
"(",
"inputfile",
")",
"# Check we're still in the right section.",
"if",
"\"irrep\"",
"not",
"in",
"line",
":",
"# All done.",
"break",
"else",
":",
"# Turbomole lists orbitals of different symmetry separately.",
"irreps",
"=",
"line",
".",
"split",
"(",
")",
"[",
"1",
":",
"]",
"# Energy in H.",
"line",
"=",
"next",
"(",
"inputfile",
")",
"energies_hartree",
"=",
"[",
"float",
"(",
"energy",
")",
"for",
"energy",
"in",
"line",
".",
"split",
"(",
")",
"[",
"2",
":",
"]",
"]",
"# Energy in eV.",
"line",
"=",
"next",
"(",
"inputfile",
")",
"energies_eV",
"=",
"[",
"float",
"(",
"energy",
")",
"for",
"energy",
"in",
"line",
".",
"split",
"(",
")",
"[",
"1",
":",
"]",
"]",
"# Occupation.",
"# This line will be missing if the orbitals are virtual (unoccupied).",
"line",
"=",
"next",
"(",
"inputfile",
")",
"if",
"\"occupation\"",
"in",
"line",
":",
"occupations",
"=",
"[",
"float",
"(",
"occupation",
")",
"for",
"occupation",
"in",
"line",
".",
"split",
"(",
")",
"[",
"1",
":",
"]",
"]",
"line",
"=",
"next",
"(",
"inputfile",
")",
"# If we have any missing occupations, fill with 0",
"occupations",
".",
"extend",
"(",
"[",
"0.0",
"]",
"*",
"(",
"len",
"(",
"irreps",
")",
"-",
"len",
"(",
"occupations",
")",
")",
")",
"# Add to list.",
"orbitals",
".",
"extend",
"(",
"[",
"{",
"'irrep'",
":",
"irrep",
",",
"'energy_H'",
":",
"energy_H",
",",
"'energy_eV'",
":",
"energy_eV",
",",
"'occupancy'",
":",
"occupation",
"}",
"for",
"irrep",
",",
"energy_H",
",",
"energy_eV",
",",
"occupation",
"in",
"zip",
"(",
"irreps",
",",
"energies_hartree",
",",
"energies_eV",
",",
"occupations",
")",
"]",
")",
"return",
"orbitals",
",",
"line"
] |
https://github.com/cclib/cclib/blob/81cd4a81cc4a3bbed7016b3e417ca9bff8ad3a92/cclib/parser/turbomoleparser.py#L1152-L1244
|
|
viblo/pymunk
|
77647ca037d5ceabd728f20f37d2da8a3bfb73a0
|
pymunk/bb.py
|
python
|
BB.contains
|
(self, other: "BB")
|
return bool(lib.cpBBContainsBB(self, other))
|
Returns true if bb completley contains the other bb
|
Returns true if bb completley contains the other bb
|
[
"Returns",
"true",
"if",
"bb",
"completley",
"contains",
"the",
"other",
"bb"
] |
def contains(self, other: "BB") -> bool:
"""Returns true if bb completley contains the other bb"""
return bool(lib.cpBBContainsBB(self, other))
|
[
"def",
"contains",
"(",
"self",
",",
"other",
":",
"\"BB\"",
")",
"->",
"bool",
":",
"return",
"bool",
"(",
"lib",
".",
"cpBBContainsBB",
"(",
"self",
",",
"other",
")",
")"
] |
https://github.com/viblo/pymunk/blob/77647ca037d5ceabd728f20f37d2da8a3bfb73a0/pymunk/bb.py#L53-L55
|
|
prkumar/uplink
|
3472806f68a60a93f7cb555d36365551a5411cc5
|
uplink/helpers.py
|
python
|
RequestBuilder.add_transaction_hook
|
(self, hook)
|
[] |
def add_transaction_hook(self, hook):
self._transaction_hooks.append(hook)
|
[
"def",
"add_transaction_hook",
"(",
"self",
",",
"hook",
")",
":",
"self",
".",
"_transaction_hooks",
".",
"append",
"(",
"hook",
")"
] |
https://github.com/prkumar/uplink/blob/3472806f68a60a93f7cb555d36365551a5411cc5/uplink/helpers.py#L114-L115
|
||||
inguma/bokken
|
6109dd0025093a11631cb88cf48cb5c5ed5e617d
|
lib/web/net.py
|
python
|
htmlunquote
|
(text)
|
return text
|
r"""
Decodes `text` that's HTML quoted.
>>> htmlunquote(u'<'&">')
u'<\'&">'
|
r"""
Decodes `text` that's HTML quoted.
|
[
"r",
"Decodes",
"text",
"that",
"s",
"HTML",
"quoted",
"."
] |
def htmlunquote(text):
r"""
Decodes `text` that's HTML quoted.
>>> htmlunquote(u'<'&">')
u'<\'&">'
"""
text = text.replace(u""", u'"')
text = text.replace(u"'", u"'")
text = text.replace(u">", u">")
text = text.replace(u"<", u"<")
text = text.replace(u"&", u"&") # Must be done last!
return text
|
[
"def",
"htmlunquote",
"(",
"text",
")",
":",
"text",
"=",
"text",
".",
"replace",
"(",
"u\""\"",
",",
"u'\"'",
")",
"text",
"=",
"text",
".",
"replace",
"(",
"u\"'\"",
",",
"u\"'\"",
")",
"text",
"=",
"text",
".",
"replace",
"(",
"u\">\"",
",",
"u\">\"",
")",
"text",
"=",
"text",
".",
"replace",
"(",
"u\"<\"",
",",
"u\"<\"",
")",
"text",
"=",
"text",
".",
"replace",
"(",
"u\"&\"",
",",
"u\"&\"",
")",
"# Must be done last!",
"return",
"text"
] |
https://github.com/inguma/bokken/blob/6109dd0025093a11631cb88cf48cb5c5ed5e617d/lib/web/net.py#L156-L168
|
|
securesystemslab/zippy
|
ff0e84ac99442c2c55fe1d285332cfd4e185e089
|
zippy/benchmarks/src/benchmarks/sympy/sympy/integrals/transforms.py
|
python
|
_hankel_transform
|
(f, r, k, nu, name, simplify=True)
|
return _simplify(F, simplify), cond
|
Compute a general Hankel transform
.. math:: F_\nu(k) = \int_{0}^\infty f(r) J_\nu(k r) r \mathrm{d} r.
|
Compute a general Hankel transform
|
[
"Compute",
"a",
"general",
"Hankel",
"transform"
] |
def _hankel_transform(f, r, k, nu, name, simplify=True):
"""
Compute a general Hankel transform
.. math:: F_\nu(k) = \int_{0}^\infty f(r) J_\nu(k r) r \mathrm{d} r.
"""
from sympy import besselj, oo
F = integrate(f*besselj(nu, k*r)*r, (r, 0, oo))
if not F.has(Integral):
return _simplify(F, simplify), True
if not F.is_Piecewise:
raise IntegralTransformError(name, f, 'could not compute integral')
F, cond = F.args[0]
if F.has(Integral):
raise IntegralTransformError(name, f, 'integral in unexpected form')
return _simplify(F, simplify), cond
|
[
"def",
"_hankel_transform",
"(",
"f",
",",
"r",
",",
"k",
",",
"nu",
",",
"name",
",",
"simplify",
"=",
"True",
")",
":",
"from",
"sympy",
"import",
"besselj",
",",
"oo",
"F",
"=",
"integrate",
"(",
"f",
"*",
"besselj",
"(",
"nu",
",",
"k",
"*",
"r",
")",
"*",
"r",
",",
"(",
"r",
",",
"0",
",",
"oo",
")",
")",
"if",
"not",
"F",
".",
"has",
"(",
"Integral",
")",
":",
"return",
"_simplify",
"(",
"F",
",",
"simplify",
")",
",",
"True",
"if",
"not",
"F",
".",
"is_Piecewise",
":",
"raise",
"IntegralTransformError",
"(",
"name",
",",
"f",
",",
"'could not compute integral'",
")",
"F",
",",
"cond",
"=",
"F",
".",
"args",
"[",
"0",
"]",
"if",
"F",
".",
"has",
"(",
"Integral",
")",
":",
"raise",
"IntegralTransformError",
"(",
"name",
",",
"f",
",",
"'integral in unexpected form'",
")",
"return",
"_simplify",
"(",
"F",
",",
"simplify",
")",
",",
"cond"
] |
https://github.com/securesystemslab/zippy/blob/ff0e84ac99442c2c55fe1d285332cfd4e185e089/zippy/benchmarks/src/benchmarks/sympy/sympy/integrals/transforms.py#L1667-L1686
|
|
openshift/openshift-tools
|
1188778e728a6e4781acf728123e5b356380fe6f
|
openshift/installer/vendored/openshift-ansible-3.9.14-1/roles/lib_vendored_deps/library/oc_adm_registry.py
|
python
|
OpenShiftCLI._run
|
(self, cmds, input_data)
|
return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
|
Actually executes the command. This makes mocking easier.
|
Actually executes the command. This makes mocking easier.
|
[
"Actually",
"executes",
"the",
"command",
".",
"This",
"makes",
"mocking",
"easier",
"."
] |
def _run(self, cmds, input_data):
''' Actually executes the command. This makes mocking easier. '''
curr_env = os.environ.copy()
curr_env.update({'KUBECONFIG': self.kubeconfig})
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=curr_env)
stdout, stderr = proc.communicate(input_data)
return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
|
[
"def",
"_run",
"(",
"self",
",",
"cmds",
",",
"input_data",
")",
":",
"curr_env",
"=",
"os",
".",
"environ",
".",
"copy",
"(",
")",
"curr_env",
".",
"update",
"(",
"{",
"'KUBECONFIG'",
":",
"self",
".",
"kubeconfig",
"}",
")",
"proc",
"=",
"subprocess",
".",
"Popen",
"(",
"cmds",
",",
"stdin",
"=",
"subprocess",
".",
"PIPE",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
",",
"env",
"=",
"curr_env",
")",
"stdout",
",",
"stderr",
"=",
"proc",
".",
"communicate",
"(",
"input_data",
")",
"return",
"proc",
".",
"returncode",
",",
"stdout",
".",
"decode",
"(",
"'utf-8'",
")",
",",
"stderr",
".",
"decode",
"(",
"'utf-8'",
")"
] |
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/openshift/installer/vendored/openshift-ansible-3.9.14-1/roles/lib_vendored_deps/library/oc_adm_registry.py#L1214-L1226
|
|
zhaoweicai/Detectron-Cascade-RCNN
|
5a297fcc16eab6c26b7b1a9fe2767c626730f03b
|
detectron/utils/segms.py
|
python
|
rle_mask_voting
|
(
top_masks, all_masks, all_dets, iou_thresh, binarize_thresh, method='AVG'
)
|
return top_segms_out
|
Returns new masks (in correspondence with `top_masks`) by combining
multiple overlapping masks coming from the pool of `all_masks`. Two methods
for combining masks are supported: 'AVG' uses a weighted average of
overlapping mask pixels; 'UNION' takes the union of all mask pixels.
|
Returns new masks (in correspondence with `top_masks`) by combining
multiple overlapping masks coming from the pool of `all_masks`. Two methods
for combining masks are supported: 'AVG' uses a weighted average of
overlapping mask pixels; 'UNION' takes the union of all mask pixels.
|
[
"Returns",
"new",
"masks",
"(",
"in",
"correspondence",
"with",
"top_masks",
")",
"by",
"combining",
"multiple",
"overlapping",
"masks",
"coming",
"from",
"the",
"pool",
"of",
"all_masks",
".",
"Two",
"methods",
"for",
"combining",
"masks",
"are",
"supported",
":",
"AVG",
"uses",
"a",
"weighted",
"average",
"of",
"overlapping",
"mask",
"pixels",
";",
"UNION",
"takes",
"the",
"union",
"of",
"all",
"mask",
"pixels",
"."
] |
def rle_mask_voting(
top_masks, all_masks, all_dets, iou_thresh, binarize_thresh, method='AVG'
):
"""Returns new masks (in correspondence with `top_masks`) by combining
multiple overlapping masks coming from the pool of `all_masks`. Two methods
for combining masks are supported: 'AVG' uses a weighted average of
overlapping mask pixels; 'UNION' takes the union of all mask pixels.
"""
if len(top_masks) == 0:
return
all_not_crowd = [False] * len(all_masks)
top_to_all_overlaps = mask_util.iou(top_masks, all_masks, all_not_crowd)
decoded_all_masks = [
np.array(mask_util.decode(rle), dtype=np.float32) for rle in all_masks
]
decoded_top_masks = [
np.array(mask_util.decode(rle), dtype=np.float32) for rle in top_masks
]
all_boxes = all_dets[:, :4].astype(np.int32)
all_scores = all_dets[:, 4]
# Fill box support with weights
mask_shape = decoded_all_masks[0].shape
mask_weights = np.zeros((len(all_masks), mask_shape[0], mask_shape[1]))
for k in range(len(all_masks)):
ref_box = all_boxes[k]
x_0 = max(ref_box[0], 0)
x_1 = min(ref_box[2] + 1, mask_shape[1])
y_0 = max(ref_box[1], 0)
y_1 = min(ref_box[3] + 1, mask_shape[0])
mask_weights[k, y_0:y_1, x_0:x_1] = all_scores[k]
mask_weights = np.maximum(mask_weights, 1e-5)
top_segms_out = []
for k in range(len(top_masks)):
# Corner case of empty mask
if decoded_top_masks[k].sum() == 0:
top_segms_out.append(top_masks[k])
continue
inds_to_vote = np.where(top_to_all_overlaps[k] >= iou_thresh)[0]
# Only matches itself
if len(inds_to_vote) == 1:
top_segms_out.append(top_masks[k])
continue
masks_to_vote = [decoded_all_masks[i] for i in inds_to_vote]
if method == 'AVG':
ws = mask_weights[inds_to_vote]
soft_mask = np.average(masks_to_vote, axis=0, weights=ws)
mask = np.array(soft_mask > binarize_thresh, dtype=np.uint8)
elif method == 'UNION':
# Any pixel that's on joins the mask
soft_mask = np.sum(masks_to_vote, axis=0)
mask = np.array(soft_mask > 1e-5, dtype=np.uint8)
else:
raise NotImplementedError('Method {} is unknown'.format(method))
rle = mask_util.encode(np.array(mask[:, :, np.newaxis], order='F'))[0]
top_segms_out.append(rle)
return top_segms_out
|
[
"def",
"rle_mask_voting",
"(",
"top_masks",
",",
"all_masks",
",",
"all_dets",
",",
"iou_thresh",
",",
"binarize_thresh",
",",
"method",
"=",
"'AVG'",
")",
":",
"if",
"len",
"(",
"top_masks",
")",
"==",
"0",
":",
"return",
"all_not_crowd",
"=",
"[",
"False",
"]",
"*",
"len",
"(",
"all_masks",
")",
"top_to_all_overlaps",
"=",
"mask_util",
".",
"iou",
"(",
"top_masks",
",",
"all_masks",
",",
"all_not_crowd",
")",
"decoded_all_masks",
"=",
"[",
"np",
".",
"array",
"(",
"mask_util",
".",
"decode",
"(",
"rle",
")",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"for",
"rle",
"in",
"all_masks",
"]",
"decoded_top_masks",
"=",
"[",
"np",
".",
"array",
"(",
"mask_util",
".",
"decode",
"(",
"rle",
")",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"for",
"rle",
"in",
"top_masks",
"]",
"all_boxes",
"=",
"all_dets",
"[",
":",
",",
":",
"4",
"]",
".",
"astype",
"(",
"np",
".",
"int32",
")",
"all_scores",
"=",
"all_dets",
"[",
":",
",",
"4",
"]",
"# Fill box support with weights",
"mask_shape",
"=",
"decoded_all_masks",
"[",
"0",
"]",
".",
"shape",
"mask_weights",
"=",
"np",
".",
"zeros",
"(",
"(",
"len",
"(",
"all_masks",
")",
",",
"mask_shape",
"[",
"0",
"]",
",",
"mask_shape",
"[",
"1",
"]",
")",
")",
"for",
"k",
"in",
"range",
"(",
"len",
"(",
"all_masks",
")",
")",
":",
"ref_box",
"=",
"all_boxes",
"[",
"k",
"]",
"x_0",
"=",
"max",
"(",
"ref_box",
"[",
"0",
"]",
",",
"0",
")",
"x_1",
"=",
"min",
"(",
"ref_box",
"[",
"2",
"]",
"+",
"1",
",",
"mask_shape",
"[",
"1",
"]",
")",
"y_0",
"=",
"max",
"(",
"ref_box",
"[",
"1",
"]",
",",
"0",
")",
"y_1",
"=",
"min",
"(",
"ref_box",
"[",
"3",
"]",
"+",
"1",
",",
"mask_shape",
"[",
"0",
"]",
")",
"mask_weights",
"[",
"k",
",",
"y_0",
":",
"y_1",
",",
"x_0",
":",
"x_1",
"]",
"=",
"all_scores",
"[",
"k",
"]",
"mask_weights",
"=",
"np",
".",
"maximum",
"(",
"mask_weights",
",",
"1e-5",
")",
"top_segms_out",
"=",
"[",
"]",
"for",
"k",
"in",
"range",
"(",
"len",
"(",
"top_masks",
")",
")",
":",
"# Corner case of empty mask",
"if",
"decoded_top_masks",
"[",
"k",
"]",
".",
"sum",
"(",
")",
"==",
"0",
":",
"top_segms_out",
".",
"append",
"(",
"top_masks",
"[",
"k",
"]",
")",
"continue",
"inds_to_vote",
"=",
"np",
".",
"where",
"(",
"top_to_all_overlaps",
"[",
"k",
"]",
">=",
"iou_thresh",
")",
"[",
"0",
"]",
"# Only matches itself",
"if",
"len",
"(",
"inds_to_vote",
")",
"==",
"1",
":",
"top_segms_out",
".",
"append",
"(",
"top_masks",
"[",
"k",
"]",
")",
"continue",
"masks_to_vote",
"=",
"[",
"decoded_all_masks",
"[",
"i",
"]",
"for",
"i",
"in",
"inds_to_vote",
"]",
"if",
"method",
"==",
"'AVG'",
":",
"ws",
"=",
"mask_weights",
"[",
"inds_to_vote",
"]",
"soft_mask",
"=",
"np",
".",
"average",
"(",
"masks_to_vote",
",",
"axis",
"=",
"0",
",",
"weights",
"=",
"ws",
")",
"mask",
"=",
"np",
".",
"array",
"(",
"soft_mask",
">",
"binarize_thresh",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
"elif",
"method",
"==",
"'UNION'",
":",
"# Any pixel that's on joins the mask",
"soft_mask",
"=",
"np",
".",
"sum",
"(",
"masks_to_vote",
",",
"axis",
"=",
"0",
")",
"mask",
"=",
"np",
".",
"array",
"(",
"soft_mask",
">",
"1e-5",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
"else",
":",
"raise",
"NotImplementedError",
"(",
"'Method {} is unknown'",
".",
"format",
"(",
"method",
")",
")",
"rle",
"=",
"mask_util",
".",
"encode",
"(",
"np",
".",
"array",
"(",
"mask",
"[",
":",
",",
":",
",",
"np",
".",
"newaxis",
"]",
",",
"order",
"=",
"'F'",
")",
")",
"[",
"0",
"]",
"top_segms_out",
".",
"append",
"(",
"rle",
")",
"return",
"top_segms_out"
] |
https://github.com/zhaoweicai/Detectron-Cascade-RCNN/blob/5a297fcc16eab6c26b7b1a9fe2767c626730f03b/detectron/utils/segms.py#L145-L206
|
|
Qiskit/qiskit-terra
|
b66030e3b9192efdd3eb95cf25c6545fe0a13da4
|
qiskit/qasm/node/reset.py
|
python
|
Reset.qasm
|
(self)
|
return "reset " + self.children[0].qasm() + ";"
|
Return the corresponding OPENQASM string.
|
Return the corresponding OPENQASM string.
|
[
"Return",
"the",
"corresponding",
"OPENQASM",
"string",
"."
] |
def qasm(self):
"""Return the corresponding OPENQASM string."""
return "reset " + self.children[0].qasm() + ";"
|
[
"def",
"qasm",
"(",
"self",
")",
":",
"return",
"\"reset \"",
"+",
"self",
".",
"children",
"[",
"0",
"]",
".",
"qasm",
"(",
")",
"+",
"\";\""
] |
https://github.com/Qiskit/qiskit-terra/blob/b66030e3b9192efdd3eb95cf25c6545fe0a13da4/qiskit/qasm/node/reset.py#L27-L29
|
|
Bartzi/stn-ocr
|
7fd90d845197367eb4e850edcbf95bb815116c99
|
mxnet/utils/create_gif.py
|
python
|
intToBin
|
(i)
|
return chr(i1) + chr(i2)
|
Integer to two bytes
|
Integer to two bytes
|
[
"Integer",
"to",
"two",
"bytes"
] |
def intToBin(i):
""" Integer to two bytes """
# devide in two parts (bytes)
i1 = i % 256
i2 = int(i/256)
# make string (little endian)
return chr(i1) + chr(i2)
|
[
"def",
"intToBin",
"(",
"i",
")",
":",
"# devide in two parts (bytes)",
"i1",
"=",
"i",
"%",
"256",
"i2",
"=",
"int",
"(",
"i",
"/",
"256",
")",
"# make string (little endian)",
"return",
"chr",
"(",
"i1",
")",
"+",
"chr",
"(",
"i2",
")"
] |
https://github.com/Bartzi/stn-ocr/blob/7fd90d845197367eb4e850edcbf95bb815116c99/mxnet/utils/create_gif.py#L15-L21
|
|
aws-samples/aws-kube-codesuite
|
ab4e5ce45416b83bffb947ab8d234df5437f4fca
|
src/networkx/classes/graph.py
|
python
|
Graph.clear
|
(self)
|
Remove all nodes and edges from the graph.
This also removes the name, and all graph, node, and edge attributes.
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.clear()
>>> list(G.nodes)
[]
>>> list(G.edges)
[]
|
Remove all nodes and edges from the graph.
|
[
"Remove",
"all",
"nodes",
"and",
"edges",
"from",
"the",
"graph",
"."
] |
def clear(self):
"""Remove all nodes and edges from the graph.
This also removes the name, and all graph, node, and edge attributes.
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.clear()
>>> list(G.nodes)
[]
>>> list(G.edges)
[]
"""
self.name = ''
self._adj.clear()
self._node.clear()
self.graph.clear()
|
[
"def",
"clear",
"(",
"self",
")",
":",
"self",
".",
"name",
"=",
"''",
"self",
".",
"_adj",
".",
"clear",
"(",
")",
"self",
".",
"_node",
".",
"clear",
"(",
")",
"self",
".",
"graph",
".",
"clear",
"(",
")"
] |
https://github.com/aws-samples/aws-kube-codesuite/blob/ab4e5ce45416b83bffb947ab8d234df5437f4fca/src/networkx/classes/graph.py#L1295-L1313
|
||
tomplus/kubernetes_asyncio
|
f028cc793e3a2c519be6a52a49fb77ff0b014c9b
|
kubernetes_asyncio/client/models/v1beta1_json_schema_props.py
|
python
|
V1beta1JSONSchemaProps.multiple_of
|
(self, multiple_of)
|
Sets the multiple_of of this V1beta1JSONSchemaProps.
:param multiple_of: The multiple_of of this V1beta1JSONSchemaProps. # noqa: E501
:type: float
|
Sets the multiple_of of this V1beta1JSONSchemaProps.
|
[
"Sets",
"the",
"multiple_of",
"of",
"this",
"V1beta1JSONSchemaProps",
"."
] |
def multiple_of(self, multiple_of):
"""Sets the multiple_of of this V1beta1JSONSchemaProps.
:param multiple_of: The multiple_of of this V1beta1JSONSchemaProps. # noqa: E501
:type: float
"""
self._multiple_of = multiple_of
|
[
"def",
"multiple_of",
"(",
"self",
",",
"multiple_of",
")",
":",
"self",
".",
"_multiple_of",
"=",
"multiple_of"
] |
https://github.com/tomplus/kubernetes_asyncio/blob/f028cc793e3a2c519be6a52a49fb77ff0b014c9b/kubernetes_asyncio/client/models/v1beta1_json_schema_props.py#L834-L842
|
||
gramps-project/gramps
|
04d4651a43eb210192f40a9f8c2bad8ee8fa3753
|
gramps/gui/widgets/styledtexteditor.py
|
python
|
StyledTextEditor.__init__
|
(self)
|
Setup initial instance variable values.
|
Setup initial instance variable values.
|
[
"Setup",
"initial",
"instance",
"variable",
"values",
"."
] |
def __init__(self):
"""Setup initial instance variable values."""
self.textbuffer = UndoableStyledBuffer()
self.undo_disabled = self.textbuffer.undo_disabled # see bug 7097
self.textbuffer.connect('style-changed', self._on_buffer_style_changed)
self.textbuffer.connect('changed', self._on_buffer_changed)
self.undo_action = self.redo_action = None
Gtk.TextView.__init__(self)
self.set_buffer(self.textbuffer)
st_cont = self.get_style_context()
self.linkcolor = get_link_color(st_cont)
self.textbuffer.linkcolor = self.linkcolor
self.match = None
self.last_match = None
self._init_url_match()
self.url_match = None
self.spellcheck = Spell(self)
self._internal_style_change = False
self.uimanager = None
self._connect_signals()
# variable to not copy to clipboard on double/triple click
self.selclick = False
|
[
"def",
"__init__",
"(",
"self",
")",
":",
"self",
".",
"textbuffer",
"=",
"UndoableStyledBuffer",
"(",
")",
"self",
".",
"undo_disabled",
"=",
"self",
".",
"textbuffer",
".",
"undo_disabled",
"# see bug 7097",
"self",
".",
"textbuffer",
".",
"connect",
"(",
"'style-changed'",
",",
"self",
".",
"_on_buffer_style_changed",
")",
"self",
".",
"textbuffer",
".",
"connect",
"(",
"'changed'",
",",
"self",
".",
"_on_buffer_changed",
")",
"self",
".",
"undo_action",
"=",
"self",
".",
"redo_action",
"=",
"None",
"Gtk",
".",
"TextView",
".",
"__init__",
"(",
"self",
")",
"self",
".",
"set_buffer",
"(",
"self",
".",
"textbuffer",
")",
"st_cont",
"=",
"self",
".",
"get_style_context",
"(",
")",
"self",
".",
"linkcolor",
"=",
"get_link_color",
"(",
"st_cont",
")",
"self",
".",
"textbuffer",
".",
"linkcolor",
"=",
"self",
".",
"linkcolor",
"self",
".",
"match",
"=",
"None",
"self",
".",
"last_match",
"=",
"None",
"self",
".",
"_init_url_match",
"(",
")",
"self",
".",
"url_match",
"=",
"None",
"self",
".",
"spellcheck",
"=",
"Spell",
"(",
"self",
")",
"self",
".",
"_internal_style_change",
"=",
"False",
"self",
".",
"uimanager",
"=",
"None",
"self",
".",
"_connect_signals",
"(",
")",
"# variable to not copy to clipboard on double/triple click",
"self",
".",
"selclick",
"=",
"False"
] |
https://github.com/gramps-project/gramps/blob/04d4651a43eb210192f40a9f8c2bad8ee8fa3753/gramps/gui/widgets/styledtexteditor.py#L286-L312
|
||
PySimpleGUI/PySimpleGUI
|
6c0d1fb54f493d45e90180b322fbbe70f7a5af3c
|
PySimpleGUIWeb/PySimpleGUIWeb.py
|
python
|
Window.SetAlpha
|
(self, alpha)
|
Change the window's transparency
:param alpha: From 0 to 1 with 0 being completely transparent
:return:
|
Change the window's transparency
:param alpha: From 0 to 1 with 0 being completely transparent
:return:
|
[
"Change",
"the",
"window",
"s",
"transparency",
":",
"param",
"alpha",
":",
"From",
"0",
"to",
"1",
"with",
"0",
"being",
"completely",
"transparent",
":",
"return",
":"
] |
def SetAlpha(self, alpha):
'''
Change the window's transparency
:param alpha: From 0 to 1 with 0 being completely transparent
:return:
'''
self._AlphaChannel = alpha * 255
if self._AlphaChannel is not None:
self.MasterFrame.SetTransparent(self._AlphaChannel)
|
[
"def",
"SetAlpha",
"(",
"self",
",",
"alpha",
")",
":",
"self",
".",
"_AlphaChannel",
"=",
"alpha",
"*",
"255",
"if",
"self",
".",
"_AlphaChannel",
"is",
"not",
"None",
":",
"self",
".",
"MasterFrame",
".",
"SetTransparent",
"(",
"self",
".",
"_AlphaChannel",
")"
] |
https://github.com/PySimpleGUI/PySimpleGUI/blob/6c0d1fb54f493d45e90180b322fbbe70f7a5af3c/PySimpleGUIWeb/PySimpleGUIWeb.py#L3205-L3213
|
||
glitchdotcom/WebPutty
|
4f5da5eb2b4668cbf3c15cf002feacd1d95d2ef7
|
libs/babel/support.py
|
python
|
Translations.load
|
(cls, dirname=None, locales=None, domain=DEFAULT_DOMAIN)
|
return cls(fileobj=open(filename, 'rb'), domain=domain)
|
Load translations from the given directory.
:param dirname: the directory containing the ``MO`` files
:param locales: the list of locales in order of preference (items in
this list can be either `Locale` objects or locale
strings)
:param domain: the message domain
:return: the loaded catalog, or a ``NullTranslations`` instance if no
matching translations were found
:rtype: `Translations`
|
Load translations from the given directory.
|
[
"Load",
"translations",
"from",
"the",
"given",
"directory",
"."
] |
def load(cls, dirname=None, locales=None, domain=DEFAULT_DOMAIN):
"""Load translations from the given directory.
:param dirname: the directory containing the ``MO`` files
:param locales: the list of locales in order of preference (items in
this list can be either `Locale` objects or locale
strings)
:param domain: the message domain
:return: the loaded catalog, or a ``NullTranslations`` instance if no
matching translations were found
:rtype: `Translations`
"""
if locales is not None:
if not isinstance(locales, (list, tuple)):
locales = [locales]
locales = [str(locale) for locale in locales]
if not domain:
domain = cls.DEFAULT_DOMAIN
filename = gettext.find(domain, dirname, locales)
if not filename:
return gettext.NullTranslations()
return cls(fileobj=open(filename, 'rb'), domain=domain)
|
[
"def",
"load",
"(",
"cls",
",",
"dirname",
"=",
"None",
",",
"locales",
"=",
"None",
",",
"domain",
"=",
"DEFAULT_DOMAIN",
")",
":",
"if",
"locales",
"is",
"not",
"None",
":",
"if",
"not",
"isinstance",
"(",
"locales",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"locales",
"=",
"[",
"locales",
"]",
"locales",
"=",
"[",
"str",
"(",
"locale",
")",
"for",
"locale",
"in",
"locales",
"]",
"if",
"not",
"domain",
":",
"domain",
"=",
"cls",
".",
"DEFAULT_DOMAIN",
"filename",
"=",
"gettext",
".",
"find",
"(",
"domain",
",",
"dirname",
",",
"locales",
")",
"if",
"not",
"filename",
":",
"return",
"gettext",
".",
"NullTranslations",
"(",
")",
"return",
"cls",
"(",
"fileobj",
"=",
"open",
"(",
"filename",
",",
"'rb'",
")",
",",
"domain",
"=",
"domain",
")"
] |
https://github.com/glitchdotcom/WebPutty/blob/4f5da5eb2b4668cbf3c15cf002feacd1d95d2ef7/libs/babel/support.py#L283-L304
|
|
openlabs/magento
|
903c02db6ea2404d1e2013a7f0951a621c80fd80
|
magento/catalog.py
|
python
|
Product.create
|
(self, product_type, attribute_set_id, sku, data)
|
return int(self.call(
'catalog_product.create',
[product_type, attribute_set_id, sku, data]
)
)
|
Create Product and return ID
:param product_type: String type of product
:param attribute_set_id: ID of attribute set
:param sku: SKU of the product
:param data: Dictionary of data
:return: INT id of product created
|
Create Product and return ID
|
[
"Create",
"Product",
"and",
"return",
"ID"
] |
def create(self, product_type, attribute_set_id, sku, data):
"""
Create Product and return ID
:param product_type: String type of product
:param attribute_set_id: ID of attribute set
:param sku: SKU of the product
:param data: Dictionary of data
:return: INT id of product created
"""
return int(self.call(
'catalog_product.create',
[product_type, attribute_set_id, sku, data]
)
)
|
[
"def",
"create",
"(",
"self",
",",
"product_type",
",",
"attribute_set_id",
",",
"sku",
",",
"data",
")",
":",
"return",
"int",
"(",
"self",
".",
"call",
"(",
"'catalog_product.create'",
",",
"[",
"product_type",
",",
"attribute_set_id",
",",
"sku",
",",
"data",
"]",
")",
")"
] |
https://github.com/openlabs/magento/blob/903c02db6ea2404d1e2013a7f0951a621c80fd80/magento/catalog.py#L268-L282
|
|
realpython/book2-exercises
|
cde325eac8e6d8cff2316601c2e5b36bb46af7d0
|
web2py-rest/gluon/compileapp.py
|
python
|
test
|
()
|
return
|
Example::
>>> import traceback, types
>>> environment={'x':1}
>>> open('a.py', 'w').write('print 1/x')
>>> save_pyc('a.py')
>>> os.unlink('a.py')
>>> if type(read_pyc('a.pyc'))==types.CodeType: print 'code'
code
>>> exec read_pyc('a.pyc') in environment
1
|
Example::
|
[
"Example",
"::"
] |
def test():
"""
Example::
>>> import traceback, types
>>> environment={'x':1}
>>> open('a.py', 'w').write('print 1/x')
>>> save_pyc('a.py')
>>> os.unlink('a.py')
>>> if type(read_pyc('a.pyc'))==types.CodeType: print 'code'
code
>>> exec read_pyc('a.pyc') in environment
1
"""
return
|
[
"def",
"test",
"(",
")",
":",
"return"
] |
https://github.com/realpython/book2-exercises/blob/cde325eac8e6d8cff2316601c2e5b36bb46af7d0/web2py-rest/gluon/compileapp.py#L743-L758
|
|
uccser/cs-unplugged
|
f83593f872792e71a9fab3f2d77a0f489205926b
|
csunplugged/resources/utils/get_options_html.py
|
python
|
get_options_html
|
(options, local_options, request_parameters=None)
|
return html_string
|
Return HTML string of form elements for given options.
Args:
options (list): List of ResourceParameters options.
local_options (list): List of ResourceParameters local options.
request_parameters (QueryDict): Request QueryDict for resource form.
Returns:
HTML string
|
Return HTML string of form elements for given options.
|
[
"Return",
"HTML",
"string",
"of",
"form",
"elements",
"for",
"given",
"options",
"."
] |
def get_options_html(options, local_options, request_parameters=None):
"""Return HTML string of form elements for given options.
Args:
options (list): List of ResourceParameters options.
local_options (list): List of ResourceParameters local options.
request_parameters (QueryDict): Request QueryDict for resource form.
Returns:
HTML string
"""
html_elements = []
for parameter in options.values():
html_elements.append(parameter.html_element(request_parameters))
if settings.DEBUG:
html_elements.append(etree.Element("hr"))
h3 = etree.Element("h3")
h3.text = "Local Generation Only"
html_elements.append(h3)
for parameter in local_options.values():
html_elements.append(parameter.html_element(request_parameters))
html_string = ""
for html_elem in html_elements:
html_string += etree.tostring(html_elem, pretty_print=True, encoding='utf-8').decode('utf-8')
return html_string
|
[
"def",
"get_options_html",
"(",
"options",
",",
"local_options",
",",
"request_parameters",
"=",
"None",
")",
":",
"html_elements",
"=",
"[",
"]",
"for",
"parameter",
"in",
"options",
".",
"values",
"(",
")",
":",
"html_elements",
".",
"append",
"(",
"parameter",
".",
"html_element",
"(",
"request_parameters",
")",
")",
"if",
"settings",
".",
"DEBUG",
":",
"html_elements",
".",
"append",
"(",
"etree",
".",
"Element",
"(",
"\"hr\"",
")",
")",
"h3",
"=",
"etree",
".",
"Element",
"(",
"\"h3\"",
")",
"h3",
".",
"text",
"=",
"\"Local Generation Only\"",
"html_elements",
".",
"append",
"(",
"h3",
")",
"for",
"parameter",
"in",
"local_options",
".",
"values",
"(",
")",
":",
"html_elements",
".",
"append",
"(",
"parameter",
".",
"html_element",
"(",
"request_parameters",
")",
")",
"html_string",
"=",
"\"\"",
"for",
"html_elem",
"in",
"html_elements",
":",
"html_string",
"+=",
"etree",
".",
"tostring",
"(",
"html_elem",
",",
"pretty_print",
"=",
"True",
",",
"encoding",
"=",
"'utf-8'",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"return",
"html_string"
] |
https://github.com/uccser/cs-unplugged/blob/f83593f872792e71a9fab3f2d77a0f489205926b/csunplugged/resources/utils/get_options_html.py#L7-L32
|
|
CedricGuillemet/Imogen
|
ee417b42747ed5b46cb11b02ef0c3630000085b3
|
bin/Lib/locale.py
|
python
|
_print_locale
|
()
|
Test function.
|
Test function.
|
[
"Test",
"function",
"."
] |
def _print_locale():
""" Test function.
"""
categories = {}
def _init_categories(categories=categories):
for k,v in globals().items():
if k[:3] == 'LC_':
categories[k] = v
_init_categories()
del categories['LC_ALL']
print('Locale defaults as determined by getdefaultlocale():')
print('-'*72)
lang, enc = getdefaultlocale()
print('Language: ', lang or '(undefined)')
print('Encoding: ', enc or '(undefined)')
print()
print('Locale settings on startup:')
print('-'*72)
for name,category in categories.items():
print(name, '...')
lang, enc = getlocale(category)
print(' Language: ', lang or '(undefined)')
print(' Encoding: ', enc or '(undefined)')
print()
print()
print('Locale settings after calling resetlocale():')
print('-'*72)
resetlocale()
for name,category in categories.items():
print(name, '...')
lang, enc = getlocale(category)
print(' Language: ', lang or '(undefined)')
print(' Encoding: ', enc or '(undefined)')
print()
try:
setlocale(LC_ALL, "")
except:
print('NOTE:')
print('setlocale(LC_ALL, "") does not support the default locale')
print('given in the OS environment variables.')
else:
print()
print('Locale settings after calling setlocale(LC_ALL, ""):')
print('-'*72)
for name,category in categories.items():
print(name, '...')
lang, enc = getlocale(category)
print(' Language: ', lang or '(undefined)')
print(' Encoding: ', enc or '(undefined)')
print()
|
[
"def",
"_print_locale",
"(",
")",
":",
"categories",
"=",
"{",
"}",
"def",
"_init_categories",
"(",
"categories",
"=",
"categories",
")",
":",
"for",
"k",
",",
"v",
"in",
"globals",
"(",
")",
".",
"items",
"(",
")",
":",
"if",
"k",
"[",
":",
"3",
"]",
"==",
"'LC_'",
":",
"categories",
"[",
"k",
"]",
"=",
"v",
"_init_categories",
"(",
")",
"del",
"categories",
"[",
"'LC_ALL'",
"]",
"print",
"(",
"'Locale defaults as determined by getdefaultlocale():'",
")",
"print",
"(",
"'-'",
"*",
"72",
")",
"lang",
",",
"enc",
"=",
"getdefaultlocale",
"(",
")",
"print",
"(",
"'Language: '",
",",
"lang",
"or",
"'(undefined)'",
")",
"print",
"(",
"'Encoding: '",
",",
"enc",
"or",
"'(undefined)'",
")",
"print",
"(",
")",
"print",
"(",
"'Locale settings on startup:'",
")",
"print",
"(",
"'-'",
"*",
"72",
")",
"for",
"name",
",",
"category",
"in",
"categories",
".",
"items",
"(",
")",
":",
"print",
"(",
"name",
",",
"'...'",
")",
"lang",
",",
"enc",
"=",
"getlocale",
"(",
"category",
")",
"print",
"(",
"' Language: '",
",",
"lang",
"or",
"'(undefined)'",
")",
"print",
"(",
"' Encoding: '",
",",
"enc",
"or",
"'(undefined)'",
")",
"print",
"(",
")",
"print",
"(",
")",
"print",
"(",
"'Locale settings after calling resetlocale():'",
")",
"print",
"(",
"'-'",
"*",
"72",
")",
"resetlocale",
"(",
")",
"for",
"name",
",",
"category",
"in",
"categories",
".",
"items",
"(",
")",
":",
"print",
"(",
"name",
",",
"'...'",
")",
"lang",
",",
"enc",
"=",
"getlocale",
"(",
"category",
")",
"print",
"(",
"' Language: '",
",",
"lang",
"or",
"'(undefined)'",
")",
"print",
"(",
"' Encoding: '",
",",
"enc",
"or",
"'(undefined)'",
")",
"print",
"(",
")",
"try",
":",
"setlocale",
"(",
"LC_ALL",
",",
"\"\"",
")",
"except",
":",
"print",
"(",
"'NOTE:'",
")",
"print",
"(",
"'setlocale(LC_ALL, \"\") does not support the default locale'",
")",
"print",
"(",
"'given in the OS environment variables.'",
")",
"else",
":",
"print",
"(",
")",
"print",
"(",
"'Locale settings after calling setlocale(LC_ALL, \"\"):'",
")",
"print",
"(",
"'-'",
"*",
"72",
")",
"for",
"name",
",",
"category",
"in",
"categories",
".",
"items",
"(",
")",
":",
"print",
"(",
"name",
",",
"'...'",
")",
"lang",
",",
"enc",
"=",
"getlocale",
"(",
"category",
")",
"print",
"(",
"' Language: '",
",",
"lang",
"or",
"'(undefined)'",
")",
"print",
"(",
"' Encoding: '",
",",
"enc",
"or",
"'(undefined)'",
")",
"print",
"(",
")"
] |
https://github.com/CedricGuillemet/Imogen/blob/ee417b42747ed5b46cb11b02ef0c3630000085b3/bin/Lib/locale.py#L1677-L1731
|
||
openhatch/oh-mainline
|
ce29352a034e1223141dcc2f317030bbc3359a51
|
vendor/packages/webob/webob/byterange.py
|
python
|
Range.parse
|
(cls, header)
|
return cls(start, end)
|
Parse the header; may return None if header is invalid
|
Parse the header; may return None if header is invalid
|
[
"Parse",
"the",
"header",
";",
"may",
"return",
"None",
"if",
"header",
"is",
"invalid"
] |
def parse(cls, header):
"""
Parse the header; may return None if header is invalid
"""
m = _rx_range.match(header or '')
if not m:
return None
start, end = m.groups()
if not start:
return cls(-int(end), None)
start = int(start)
if not end:
return cls(start, None)
end = int(end) + 1 # return val is non-inclusive
if start >= end:
return None
return cls(start, end)
|
[
"def",
"parse",
"(",
"cls",
",",
"header",
")",
":",
"m",
"=",
"_rx_range",
".",
"match",
"(",
"header",
"or",
"''",
")",
"if",
"not",
"m",
":",
"return",
"None",
"start",
",",
"end",
"=",
"m",
".",
"groups",
"(",
")",
"if",
"not",
"start",
":",
"return",
"cls",
"(",
"-",
"int",
"(",
"end",
")",
",",
"None",
")",
"start",
"=",
"int",
"(",
"start",
")",
"if",
"not",
"end",
":",
"return",
"cls",
"(",
"start",
",",
"None",
")",
"end",
"=",
"int",
"(",
"end",
")",
"+",
"1",
"# return val is non-inclusive",
"if",
"start",
">=",
"end",
":",
"return",
"None",
"return",
"cls",
"(",
"start",
",",
"end",
")"
] |
https://github.com/openhatch/oh-mainline/blob/ce29352a034e1223141dcc2f317030bbc3359a51/vendor/packages/webob/webob/byterange.py#L70-L86
|
|
ChunyuanLI/Optimus
|
f63f4a7ca10aea022978500a37d72dd53a37a576
|
code/examples/big_ae/run_data_filtering.py
|
python
|
train
|
(args, train_dataloader, model_vae, encoder_tokenizer, decoder_tokenizer, table_name)
|
return num_collected, num_dropped
|
Train the model
|
Train the model
|
[
"Train",
"the",
"model"
] |
def train(args, train_dataloader, model_vae, encoder_tokenizer, decoder_tokenizer, table_name):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
# train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
# train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
# model_encoder, model_decoder, model_connector = model_vae.encoder, model_vae.decoder, model_vae.linear
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model_vae.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},
{'params': [p for n, p in model_vae.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model_vae, optimizer = amp.initialize(model_vae, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model_vae = torch.nn.DataParallel(model_vae, device_ids=range(args.n_gpu)).to(args.device)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model_vae = torch.nn.parallel.DistributedDataParallel(model_vae, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
files = Path(args.input_file_path)
num_files = len(list(files.glob('*seq64*.json')))
# create output file folder
if not os.path.exists(args.output_file_path) and args.local_rank in [-1, 0]:
os.makedirs(args.output_file_path)
# Train!
logger.info("***** Running training *****")
logger.info(" Num files = %d", num_files)
logger.info(" Num examples of first file = %d", train_dataloader.num_examples)
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
num_collected, num_dropped = 0, 0
model_vae.zero_grad()
num_train_epochs_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
n_iter = int(args.num_train_epochs) * len(train_dataloader)
tmp_list = []
dict_token_length = defaultdict(int)
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
dict_file = os.path.join(args.output_dir, args.dataset.lower()+f'.length_freq.json' )
set_seed(args) # Added here for reproducibility (even between python 2 and 3)
for epoch in num_train_epochs_iterator:
for idx_file in range(num_files):
examples = []
cached_features_file = os.path.join(args.output_file_path, args.dataset.lower()+f'.segmented.nltk.split.seq64.{train_dataloader.file_idx}.json' )
logger.info(f"Epoch {epoch}, File idx {train_dataloader.file_idx}")
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
# if idx_file > 11:
# break
for step, batch in enumerate(epoch_iterator):
inst, token_lengths = batch
dict_token_length[ token_lengths[0,0].item() ] += 1
if ( token_lengths> 256 ).sum().item()>0:
over_length_tensor = ( token_lengths> 256 ).sum(-1)
inst_ = [inst[i] for i in range(len(inst)) if over_length_tensor[i]==0 ]
examples += inst_
num_collected += len(inst_)
num_dropped += len(inst) - len(inst_)
logger.info(f"{num_dropped} files filtered.")
else:
examples += inst
num_collected += len(inst)
# Good practice: save your data multiple times on Philly
if args.use_philly:
save_solid = False
while not save_solid:
try:
with open(cached_features_file, 'w') as fp:
json.dump(examples, fp)
save_solid = True
except:
pass
else:
with open(cached_features_file, 'w') as fp:
json.dump(examples, fp)
logger.info(f"Saving features in the cached file at {cached_features_file}")
train_dataloader.reset()
if args.local_rank in [-1, 0]:
tb_writer.close()
logger.info(dict_token_length)
# Good practice: save your dict multiple times on Philly
if args.use_philly:
save_solid = False
while not save_solid:
try:
with open(dict_file, 'w') as fp:
json.dump(dict_token_length, fp)
save_solid = True
except:
pass
else:
with open(dict_file, 'w') as fp:
json.dump(dict_token_length, fp)
return num_collected, num_dropped
|
[
"def",
"train",
"(",
"args",
",",
"train_dataloader",
",",
"model_vae",
",",
"encoder_tokenizer",
",",
"decoder_tokenizer",
",",
"table_name",
")",
":",
"if",
"args",
".",
"local_rank",
"in",
"[",
"-",
"1",
",",
"0",
"]",
":",
"tb_writer",
"=",
"SummaryWriter",
"(",
")",
"args",
".",
"train_batch_size",
"=",
"args",
".",
"per_gpu_train_batch_size",
"*",
"max",
"(",
"1",
",",
"args",
".",
"n_gpu",
")",
"# train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)",
"# train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)",
"if",
"args",
".",
"max_steps",
">",
"0",
":",
"t_total",
"=",
"args",
".",
"max_steps",
"args",
".",
"num_train_epochs",
"=",
"args",
".",
"max_steps",
"//",
"(",
"len",
"(",
"train_dataloader",
")",
"//",
"args",
".",
"gradient_accumulation_steps",
")",
"+",
"1",
"else",
":",
"t_total",
"=",
"len",
"(",
"train_dataloader",
")",
"//",
"args",
".",
"gradient_accumulation_steps",
"*",
"args",
".",
"num_train_epochs",
"# Prepare optimizer and schedule (linear warmup and decay)",
"# model_encoder, model_decoder, model_connector = model_vae.encoder, model_vae.decoder, model_vae.linear",
"no_decay",
"=",
"[",
"'bias'",
",",
"'LayerNorm.weight'",
"]",
"optimizer_grouped_parameters",
"=",
"[",
"{",
"'params'",
":",
"[",
"p",
"for",
"n",
",",
"p",
"in",
"model_vae",
".",
"named_parameters",
"(",
")",
"if",
"not",
"any",
"(",
"nd",
"in",
"n",
"for",
"nd",
"in",
"no_decay",
")",
"]",
",",
"'weight_decay'",
":",
"args",
".",
"weight_decay",
"}",
",",
"{",
"'params'",
":",
"[",
"p",
"for",
"n",
",",
"p",
"in",
"model_vae",
".",
"named_parameters",
"(",
")",
"if",
"any",
"(",
"nd",
"in",
"n",
"for",
"nd",
"in",
"no_decay",
")",
"]",
",",
"'weight_decay'",
":",
"0.0",
"}",
"]",
"optimizer",
"=",
"AdamW",
"(",
"optimizer_grouped_parameters",
",",
"lr",
"=",
"args",
".",
"learning_rate",
",",
"eps",
"=",
"args",
".",
"adam_epsilon",
")",
"scheduler",
"=",
"WarmupLinearSchedule",
"(",
"optimizer",
",",
"warmup_steps",
"=",
"args",
".",
"warmup_steps",
",",
"t_total",
"=",
"t_total",
")",
"if",
"args",
".",
"fp16",
":",
"try",
":",
"from",
"apex",
"import",
"amp",
"except",
"ImportError",
":",
"raise",
"ImportError",
"(",
"\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\"",
")",
"model_vae",
",",
"optimizer",
"=",
"amp",
".",
"initialize",
"(",
"model_vae",
",",
"optimizer",
",",
"opt_level",
"=",
"args",
".",
"fp16_opt_level",
")",
"# multi-gpu training (should be after apex fp16 initialization)",
"if",
"args",
".",
"n_gpu",
">",
"1",
":",
"model_vae",
"=",
"torch",
".",
"nn",
".",
"DataParallel",
"(",
"model_vae",
",",
"device_ids",
"=",
"range",
"(",
"args",
".",
"n_gpu",
")",
")",
".",
"to",
"(",
"args",
".",
"device",
")",
"# Distributed training (should be after apex fp16 initialization)",
"if",
"args",
".",
"local_rank",
"!=",
"-",
"1",
":",
"model_vae",
"=",
"torch",
".",
"nn",
".",
"parallel",
".",
"DistributedDataParallel",
"(",
"model_vae",
",",
"device_ids",
"=",
"[",
"args",
".",
"local_rank",
"]",
",",
"output_device",
"=",
"args",
".",
"local_rank",
",",
"find_unused_parameters",
"=",
"True",
")",
"files",
"=",
"Path",
"(",
"args",
".",
"input_file_path",
")",
"num_files",
"=",
"len",
"(",
"list",
"(",
"files",
".",
"glob",
"(",
"'*seq64*.json'",
")",
")",
")",
"# create output file folder",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"args",
".",
"output_file_path",
")",
"and",
"args",
".",
"local_rank",
"in",
"[",
"-",
"1",
",",
"0",
"]",
":",
"os",
".",
"makedirs",
"(",
"args",
".",
"output_file_path",
")",
"# Train!",
"logger",
".",
"info",
"(",
"\"***** Running training *****\"",
")",
"logger",
".",
"info",
"(",
"\" Num files = %d\"",
",",
"num_files",
")",
"logger",
".",
"info",
"(",
"\" Num examples of first file = %d\"",
",",
"train_dataloader",
".",
"num_examples",
")",
"logger",
".",
"info",
"(",
"\" Num Epochs = %d\"",
",",
"args",
".",
"num_train_epochs",
")",
"logger",
".",
"info",
"(",
"\" Instantaneous batch size per GPU = %d\"",
",",
"args",
".",
"per_gpu_train_batch_size",
")",
"logger",
".",
"info",
"(",
"\" Total train batch size (w. parallel, distributed & accumulation) = %d\"",
",",
"args",
".",
"train_batch_size",
"*",
"args",
".",
"gradient_accumulation_steps",
"*",
"(",
"torch",
".",
"distributed",
".",
"get_world_size",
"(",
")",
"if",
"args",
".",
"local_rank",
"!=",
"-",
"1",
"else",
"1",
")",
")",
"logger",
".",
"info",
"(",
"\" Gradient Accumulation steps = %d\"",
",",
"args",
".",
"gradient_accumulation_steps",
")",
"logger",
".",
"info",
"(",
"\" Total optimization steps = %d\"",
",",
"t_total",
")",
"num_collected",
",",
"num_dropped",
"=",
"0",
",",
"0",
"model_vae",
".",
"zero_grad",
"(",
")",
"num_train_epochs_iterator",
"=",
"trange",
"(",
"int",
"(",
"args",
".",
"num_train_epochs",
")",
",",
"desc",
"=",
"\"Epoch\"",
",",
"disable",
"=",
"args",
".",
"local_rank",
"not",
"in",
"[",
"-",
"1",
",",
"0",
"]",
")",
"n_iter",
"=",
"int",
"(",
"args",
".",
"num_train_epochs",
")",
"*",
"len",
"(",
"train_dataloader",
")",
"tmp_list",
"=",
"[",
"]",
"dict_token_length",
"=",
"defaultdict",
"(",
"int",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"args",
".",
"output_dir",
")",
"and",
"args",
".",
"local_rank",
"in",
"[",
"-",
"1",
",",
"0",
"]",
":",
"os",
".",
"makedirs",
"(",
"args",
".",
"output_dir",
")",
"dict_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"output_dir",
",",
"args",
".",
"dataset",
".",
"lower",
"(",
")",
"+",
"f'.length_freq.json'",
")",
"set_seed",
"(",
"args",
")",
"# Added here for reproducibility (even between python 2 and 3)",
"for",
"epoch",
"in",
"num_train_epochs_iterator",
":",
"for",
"idx_file",
"in",
"range",
"(",
"num_files",
")",
":",
"examples",
"=",
"[",
"]",
"cached_features_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"output_file_path",
",",
"args",
".",
"dataset",
".",
"lower",
"(",
")",
"+",
"f'.segmented.nltk.split.seq64.{train_dataloader.file_idx}.json'",
")",
"logger",
".",
"info",
"(",
"f\"Epoch {epoch}, File idx {train_dataloader.file_idx}\"",
")",
"epoch_iterator",
"=",
"tqdm",
"(",
"train_dataloader",
",",
"desc",
"=",
"\"Iteration\"",
",",
"disable",
"=",
"args",
".",
"local_rank",
"not",
"in",
"[",
"-",
"1",
",",
"0",
"]",
")",
"# if idx_file > 11:",
"# break",
"for",
"step",
",",
"batch",
"in",
"enumerate",
"(",
"epoch_iterator",
")",
":",
"inst",
",",
"token_lengths",
"=",
"batch",
"dict_token_length",
"[",
"token_lengths",
"[",
"0",
",",
"0",
"]",
".",
"item",
"(",
")",
"]",
"+=",
"1",
"if",
"(",
"token_lengths",
">",
"256",
")",
".",
"sum",
"(",
")",
".",
"item",
"(",
")",
">",
"0",
":",
"over_length_tensor",
"=",
"(",
"token_lengths",
">",
"256",
")",
".",
"sum",
"(",
"-",
"1",
")",
"inst_",
"=",
"[",
"inst",
"[",
"i",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"inst",
")",
")",
"if",
"over_length_tensor",
"[",
"i",
"]",
"==",
"0",
"]",
"examples",
"+=",
"inst_",
"num_collected",
"+=",
"len",
"(",
"inst_",
")",
"num_dropped",
"+=",
"len",
"(",
"inst",
")",
"-",
"len",
"(",
"inst_",
")",
"logger",
".",
"info",
"(",
"f\"{num_dropped} files filtered.\"",
")",
"else",
":",
"examples",
"+=",
"inst",
"num_collected",
"+=",
"len",
"(",
"inst",
")",
"# Good practice: save your data multiple times on Philly",
"if",
"args",
".",
"use_philly",
":",
"save_solid",
"=",
"False",
"while",
"not",
"save_solid",
":",
"try",
":",
"with",
"open",
"(",
"cached_features_file",
",",
"'w'",
")",
"as",
"fp",
":",
"json",
".",
"dump",
"(",
"examples",
",",
"fp",
")",
"save_solid",
"=",
"True",
"except",
":",
"pass",
"else",
":",
"with",
"open",
"(",
"cached_features_file",
",",
"'w'",
")",
"as",
"fp",
":",
"json",
".",
"dump",
"(",
"examples",
",",
"fp",
")",
"logger",
".",
"info",
"(",
"f\"Saving features in the cached file at {cached_features_file}\"",
")",
"train_dataloader",
".",
"reset",
"(",
")",
"if",
"args",
".",
"local_rank",
"in",
"[",
"-",
"1",
",",
"0",
"]",
":",
"tb_writer",
".",
"close",
"(",
")",
"logger",
".",
"info",
"(",
"dict_token_length",
")",
"# Good practice: save your dict multiple times on Philly",
"if",
"args",
".",
"use_philly",
":",
"save_solid",
"=",
"False",
"while",
"not",
"save_solid",
":",
"try",
":",
"with",
"open",
"(",
"dict_file",
",",
"'w'",
")",
"as",
"fp",
":",
"json",
".",
"dump",
"(",
"dict_token_length",
",",
"fp",
")",
"save_solid",
"=",
"True",
"except",
":",
"pass",
"else",
":",
"with",
"open",
"(",
"dict_file",
",",
"'w'",
")",
"as",
"fp",
":",
"json",
".",
"dump",
"(",
"dict_token_length",
",",
"fp",
")",
"return",
"num_collected",
",",
"num_dropped"
] |
https://github.com/ChunyuanLI/Optimus/blob/f63f4a7ca10aea022978500a37d72dd53a37a576/code/examples/big_ae/run_data_filtering.py#L120-L269
|
|
1012598167/flask_mongodb_game
|
60c7e0351586656ec38f851592886338e50b4110
|
python_flask/venv/Lib/site-packages/pymongo/aggregation.py
|
python
|
_AggregationCommand._cursor_collection
|
(self, cursor_doc)
|
The Collection used for the aggregate command cursor.
|
The Collection used for the aggregate command cursor.
|
[
"The",
"Collection",
"used",
"for",
"the",
"aggregate",
"command",
"cursor",
"."
] |
def _cursor_collection(self, cursor_doc):
"""The Collection used for the aggregate command cursor."""
raise NotImplementedError
|
[
"def",
"_cursor_collection",
"(",
"self",
",",
"cursor_doc",
")",
":",
"raise",
"NotImplementedError"
] |
https://github.com/1012598167/flask_mongodb_game/blob/60c7e0351586656ec38f851592886338e50b4110/python_flask/venv/Lib/site-packages/pymongo/aggregation.py#L82-L84
|
||
FederatedAI/FATE
|
32540492623568ecd1afcb367360133616e02fa3
|
python/fate_arch/abc/_computing.py
|
python
|
CTableABC.save
|
(self, address: AddressABC, partitions: int, schema: dict, **kwargs)
|
save table
Parameters
----------
address: AddressABC
address to save table to
partitions: int
number of partitions to save as
schema: dict
table schema
|
save table
|
[
"save",
"table"
] |
def save(self, address: AddressABC, partitions: int, schema: dict, **kwargs):
"""
save table
Parameters
----------
address: AddressABC
address to save table to
partitions: int
number of partitions to save as
schema: dict
table schema
"""
...
|
[
"def",
"save",
"(",
"self",
",",
"address",
":",
"AddressABC",
",",
"partitions",
":",
"int",
",",
"schema",
":",
"dict",
",",
"*",
"*",
"kwargs",
")",
":",
"..."
] |
https://github.com/FederatedAI/FATE/blob/32540492623568ecd1afcb367360133616e02fa3/python/fate_arch/abc/_computing.py#L65-L78
|
||
Project-MONAI/MONAI
|
83f8b06372a3803ebe9281300cb794a1f3395018
|
monai/transforms/spatial/array.py
|
python
|
RandGridDistortion.__init__
|
(
self,
num_cells: Union[Tuple[int], int] = 5,
prob: float = 0.1,
distort_limit: Union[Tuple[float, float], float] = (-0.03, 0.03),
mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,
padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER,
device: Optional[torch.device] = None,
)
|
Random grid distortion transform. Refer to:
https://github.com/albumentations-team/albumentations/blob/master/albumentations/augmentations/transforms.py
Args:
num_cells: number of grid cells on each dimension.
prob: probability of returning a randomized grid distortion transform. Defaults to 0.1.
distort_limit: range to randomly distort.
If single number, distort_limit is picked from (-distort_limit, distort_limit).
Defaults to (-0.03, 0.03).
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"border"``.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
device: device on which the tensor will be allocated.
|
Random grid distortion transform. Refer to:
https://github.com/albumentations-team/albumentations/blob/master/albumentations/augmentations/transforms.py
|
[
"Random",
"grid",
"distortion",
"transform",
".",
"Refer",
"to",
":",
"https",
":",
"//",
"github",
".",
"com",
"/",
"albumentations",
"-",
"team",
"/",
"albumentations",
"/",
"blob",
"/",
"master",
"/",
"albumentations",
"/",
"augmentations",
"/",
"transforms",
".",
"py"
] |
def __init__(
self,
num_cells: Union[Tuple[int], int] = 5,
prob: float = 0.1,
distort_limit: Union[Tuple[float, float], float] = (-0.03, 0.03),
mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,
padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER,
device: Optional[torch.device] = None,
) -> None:
"""
Random grid distortion transform. Refer to:
https://github.com/albumentations-team/albumentations/blob/master/albumentations/augmentations/transforms.py
Args:
num_cells: number of grid cells on each dimension.
prob: probability of returning a randomized grid distortion transform. Defaults to 0.1.
distort_limit: range to randomly distort.
If single number, distort_limit is picked from (-distort_limit, distort_limit).
Defaults to (-0.03, 0.03).
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"border"``.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
device: device on which the tensor will be allocated.
"""
RandomizableTransform.__init__(self, prob)
self.num_cells = num_cells
if isinstance(distort_limit, (int, float)):
self.distort_limit = (min(-distort_limit, distort_limit), max(-distort_limit, distort_limit))
else:
self.distort_limit = (min(distort_limit), max(distort_limit))
self.distort_steps: Sequence[Sequence[float]] = ((1.0,),)
self.grid_distortion = GridDistortion(
num_cells=num_cells, distort_steps=self.distort_steps, mode=mode, padding_mode=padding_mode, device=device
)
|
[
"def",
"__init__",
"(",
"self",
",",
"num_cells",
":",
"Union",
"[",
"Tuple",
"[",
"int",
"]",
",",
"int",
"]",
"=",
"5",
",",
"prob",
":",
"float",
"=",
"0.1",
",",
"distort_limit",
":",
"Union",
"[",
"Tuple",
"[",
"float",
",",
"float",
"]",
",",
"float",
"]",
"=",
"(",
"-",
"0.03",
",",
"0.03",
")",
",",
"mode",
":",
"Union",
"[",
"GridSampleMode",
",",
"str",
"]",
"=",
"GridSampleMode",
".",
"BILINEAR",
",",
"padding_mode",
":",
"Union",
"[",
"GridSamplePadMode",
",",
"str",
"]",
"=",
"GridSamplePadMode",
".",
"BORDER",
",",
"device",
":",
"Optional",
"[",
"torch",
".",
"device",
"]",
"=",
"None",
",",
")",
"->",
"None",
":",
"RandomizableTransform",
".",
"__init__",
"(",
"self",
",",
"prob",
")",
"self",
".",
"num_cells",
"=",
"num_cells",
"if",
"isinstance",
"(",
"distort_limit",
",",
"(",
"int",
",",
"float",
")",
")",
":",
"self",
".",
"distort_limit",
"=",
"(",
"min",
"(",
"-",
"distort_limit",
",",
"distort_limit",
")",
",",
"max",
"(",
"-",
"distort_limit",
",",
"distort_limit",
")",
")",
"else",
":",
"self",
".",
"distort_limit",
"=",
"(",
"min",
"(",
"distort_limit",
")",
",",
"max",
"(",
"distort_limit",
")",
")",
"self",
".",
"distort_steps",
":",
"Sequence",
"[",
"Sequence",
"[",
"float",
"]",
"]",
"=",
"(",
"(",
"1.0",
",",
")",
",",
")",
"self",
".",
"grid_distortion",
"=",
"GridDistortion",
"(",
"num_cells",
"=",
"num_cells",
",",
"distort_steps",
"=",
"self",
".",
"distort_steps",
",",
"mode",
"=",
"mode",
",",
"padding_mode",
"=",
"padding_mode",
",",
"device",
"=",
"device",
")"
] |
https://github.com/Project-MONAI/MONAI/blob/83f8b06372a3803ebe9281300cb794a1f3395018/monai/transforms/spatial/array.py#L2116-L2153
|
||
microsoft/azure-devops-python-api
|
451cade4c475482792cbe9e522c1fee32393139e
|
azure-devops/azure/devops/v6_0/git/git_client_base.py
|
python
|
GitClientBase.get_pull_request_reviewer
|
(self, repository_id, pull_request_id, reviewer_id, project=None)
|
return self._deserialize('IdentityRefWithVote', response)
|
GetPullRequestReviewer.
[Preview API] Retrieve information about a particular reviewer on a pull request
:param str repository_id: The repository ID of the pull request's target branch.
:param int pull_request_id: ID of the pull request.
:param str reviewer_id: ID of the reviewer.
:param str project: Project ID or project name
:rtype: :class:`<IdentityRefWithVote> <azure.devops.v6_0.git.models.IdentityRefWithVote>`
|
GetPullRequestReviewer.
[Preview API] Retrieve information about a particular reviewer on a pull request
:param str repository_id: The repository ID of the pull request's target branch.
:param int pull_request_id: ID of the pull request.
:param str reviewer_id: ID of the reviewer.
:param str project: Project ID or project name
:rtype: :class:`<IdentityRefWithVote> <azure.devops.v6_0.git.models.IdentityRefWithVote>`
|
[
"GetPullRequestReviewer",
".",
"[",
"Preview",
"API",
"]",
"Retrieve",
"information",
"about",
"a",
"particular",
"reviewer",
"on",
"a",
"pull",
"request",
":",
"param",
"str",
"repository_id",
":",
"The",
"repository",
"ID",
"of",
"the",
"pull",
"request",
"s",
"target",
"branch",
".",
":",
"param",
"int",
"pull_request_id",
":",
"ID",
"of",
"the",
"pull",
"request",
".",
":",
"param",
"str",
"reviewer_id",
":",
"ID",
"of",
"the",
"reviewer",
".",
":",
"param",
"str",
"project",
":",
"Project",
"ID",
"or",
"project",
"name",
":",
"rtype",
":",
":",
"class",
":",
"<IdentityRefWithVote",
">",
"<azure",
".",
"devops",
".",
"v6_0",
".",
"git",
".",
"models",
".",
"IdentityRefWithVote",
">"
] |
def get_pull_request_reviewer(self, repository_id, pull_request_id, reviewer_id, project=None):
"""GetPullRequestReviewer.
[Preview API] Retrieve information about a particular reviewer on a pull request
:param str repository_id: The repository ID of the pull request's target branch.
:param int pull_request_id: ID of the pull request.
:param str reviewer_id: ID of the reviewer.
:param str project: Project ID or project name
:rtype: :class:`<IdentityRefWithVote> <azure.devops.v6_0.git.models.IdentityRefWithVote>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if repository_id is not None:
route_values['repositoryId'] = self._serialize.url('repository_id', repository_id, 'str')
if pull_request_id is not None:
route_values['pullRequestId'] = self._serialize.url('pull_request_id', pull_request_id, 'int')
if reviewer_id is not None:
route_values['reviewerId'] = self._serialize.url('reviewer_id', reviewer_id, 'str')
response = self._send(http_method='GET',
location_id='4b6702c7-aa35-4b89-9c96-b9abf6d3e540',
version='6.0-preview.1',
route_values=route_values)
return self._deserialize('IdentityRefWithVote', response)
|
[
"def",
"get_pull_request_reviewer",
"(",
"self",
",",
"repository_id",
",",
"pull_request_id",
",",
"reviewer_id",
",",
"project",
"=",
"None",
")",
":",
"route_values",
"=",
"{",
"}",
"if",
"project",
"is",
"not",
"None",
":",
"route_values",
"[",
"'project'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'project'",
",",
"project",
",",
"'str'",
")",
"if",
"repository_id",
"is",
"not",
"None",
":",
"route_values",
"[",
"'repositoryId'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'repository_id'",
",",
"repository_id",
",",
"'str'",
")",
"if",
"pull_request_id",
"is",
"not",
"None",
":",
"route_values",
"[",
"'pullRequestId'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'pull_request_id'",
",",
"pull_request_id",
",",
"'int'",
")",
"if",
"reviewer_id",
"is",
"not",
"None",
":",
"route_values",
"[",
"'reviewerId'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'reviewer_id'",
",",
"reviewer_id",
",",
"'str'",
")",
"response",
"=",
"self",
".",
"_send",
"(",
"http_method",
"=",
"'GET'",
",",
"location_id",
"=",
"'4b6702c7-aa35-4b89-9c96-b9abf6d3e540'",
",",
"version",
"=",
"'6.0-preview.1'",
",",
"route_values",
"=",
"route_values",
")",
"return",
"self",
".",
"_deserialize",
"(",
"'IdentityRefWithVote'",
",",
"response",
")"
] |
https://github.com/microsoft/azure-devops-python-api/blob/451cade4c475482792cbe9e522c1fee32393139e/azure-devops/azure/devops/v6_0/git/git_client_base.py#L1960-L1982
|
|
chen3feng/blade-build
|
360b4c9ddb9087fb811af3aef2830301cf48805e
|
src/blade/target.py
|
python
|
Target._target_dir
|
(self)
|
return self.target_dir
|
Return the full path of target dir.
|
Return the full path of target dir.
|
[
"Return",
"the",
"full",
"path",
"of",
"target",
"dir",
"."
] |
def _target_dir(self):
"""Return the full path of target dir."""
return self.target_dir
|
[
"def",
"_target_dir",
"(",
"self",
")",
":",
"return",
"self",
".",
"target_dir"
] |
https://github.com/chen3feng/blade-build/blob/360b4c9ddb9087fb811af3aef2830301cf48805e/src/blade/target.py#L586-L588
|
|
plotly/plotly.py
|
cfad7862594b35965c0e000813bd7805e8494a5b
|
packages/python/plotly/plotly/graph_objs/scatterpolar/selected/_marker.py
|
python
|
Marker.color
|
(self)
|
return self["color"]
|
Sets the marker color of selected points.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
|
Sets the marker color of selected points.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
|
[
"Sets",
"the",
"marker",
"color",
"of",
"selected",
"points",
".",
"The",
"color",
"property",
"is",
"a",
"color",
"and",
"may",
"be",
"specified",
"as",
":",
"-",
"A",
"hex",
"string",
"(",
"e",
".",
"g",
".",
"#ff0000",
")",
"-",
"An",
"rgb",
"/",
"rgba",
"string",
"(",
"e",
".",
"g",
".",
"rgb",
"(",
"255",
"0",
"0",
")",
")",
"-",
"An",
"hsl",
"/",
"hsla",
"string",
"(",
"e",
".",
"g",
".",
"hsl",
"(",
"0",
"100%",
"50%",
")",
")",
"-",
"An",
"hsv",
"/",
"hsva",
"string",
"(",
"e",
".",
"g",
".",
"hsv",
"(",
"0",
"100%",
"100%",
")",
")",
"-",
"A",
"named",
"CSS",
"color",
":",
"aliceblue",
"antiquewhite",
"aqua",
"aquamarine",
"azure",
"beige",
"bisque",
"black",
"blanchedalmond",
"blue",
"blueviolet",
"brown",
"burlywood",
"cadetblue",
"chartreuse",
"chocolate",
"coral",
"cornflowerblue",
"cornsilk",
"crimson",
"cyan",
"darkblue",
"darkcyan",
"darkgoldenrod",
"darkgray",
"darkgrey",
"darkgreen",
"darkkhaki",
"darkmagenta",
"darkolivegreen",
"darkorange",
"darkorchid",
"darkred",
"darksalmon",
"darkseagreen",
"darkslateblue",
"darkslategray",
"darkslategrey",
"darkturquoise",
"darkviolet",
"deeppink",
"deepskyblue",
"dimgray",
"dimgrey",
"dodgerblue",
"firebrick",
"floralwhite",
"forestgreen",
"fuchsia",
"gainsboro",
"ghostwhite",
"gold",
"goldenrod",
"gray",
"grey",
"green",
"greenyellow",
"honeydew",
"hotpink",
"indianred",
"indigo",
"ivory",
"khaki",
"lavender",
"lavenderblush",
"lawngreen",
"lemonchiffon",
"lightblue",
"lightcoral",
"lightcyan",
"lightgoldenrodyellow",
"lightgray",
"lightgrey",
"lightgreen",
"lightpink",
"lightsalmon",
"lightseagreen",
"lightskyblue",
"lightslategray",
"lightslategrey",
"lightsteelblue",
"lightyellow",
"lime",
"limegreen",
"linen",
"magenta",
"maroon",
"mediumaquamarine",
"mediumblue",
"mediumorchid",
"mediumpurple",
"mediumseagreen",
"mediumslateblue",
"mediumspringgreen",
"mediumturquoise",
"mediumvioletred",
"midnightblue",
"mintcream",
"mistyrose",
"moccasin",
"navajowhite",
"navy",
"oldlace",
"olive",
"olivedrab",
"orange",
"orangered",
"orchid",
"palegoldenrod",
"palegreen",
"paleturquoise",
"palevioletred",
"papayawhip",
"peachpuff",
"peru",
"pink",
"plum",
"powderblue",
"purple",
"red",
"rosybrown",
"royalblue",
"rebeccapurple",
"saddlebrown",
"salmon",
"sandybrown",
"seagreen",
"seashell",
"sienna",
"silver",
"skyblue",
"slateblue",
"slategray",
"slategrey",
"snow",
"springgreen",
"steelblue",
"tan",
"teal",
"thistle",
"tomato",
"turquoise",
"violet",
"wheat",
"white",
"whitesmoke",
"yellow",
"yellowgreen"
] |
def color(self):
"""
Sets the marker color of selected points.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
|
[
"def",
"color",
"(",
"self",
")",
":",
"return",
"self",
"[",
"\"color\"",
"]"
] |
https://github.com/plotly/plotly.py/blob/cfad7862594b35965c0e000813bd7805e8494a5b/packages/python/plotly/plotly/graph_objs/scatterpolar/selected/_marker.py#L16-L66
|
|
golismero/golismero
|
7d605b937e241f51c1ca4f47b20f755eeefb9d76
|
thirdparty_libs/nltk/inference/mace.py
|
python
|
test_transform_output
|
(argument_pair)
|
Transform the model into various Mace4 ``interpformat`` formats.
|
Transform the model into various Mace4 ``interpformat`` formats.
|
[
"Transform",
"the",
"model",
"into",
"various",
"Mace4",
"interpformat",
"formats",
"."
] |
def test_transform_output(argument_pair):
"""
Transform the model into various Mace4 ``interpformat`` formats.
"""
lp = LogicParser()
g = lp.parse(argument_pair[0])
alist = [lp.parse(a) for a in argument_pair[1]]
m = MaceCommand(g, assumptions=alist)
m.build_model()
for a in alist:
print ' %s' % a
print '|- %s: %s\n' % (g, m.build_model())
for format in ['standard', 'portable', 'xml', 'cooked']:
spacer()
print "Using '%s' format" % format
spacer()
print m.model(format=format)
|
[
"def",
"test_transform_output",
"(",
"argument_pair",
")",
":",
"lp",
"=",
"LogicParser",
"(",
")",
"g",
"=",
"lp",
".",
"parse",
"(",
"argument_pair",
"[",
"0",
"]",
")",
"alist",
"=",
"[",
"lp",
".",
"parse",
"(",
"a",
")",
"for",
"a",
"in",
"argument_pair",
"[",
"1",
"]",
"]",
"m",
"=",
"MaceCommand",
"(",
"g",
",",
"assumptions",
"=",
"alist",
")",
"m",
".",
"build_model",
"(",
")",
"for",
"a",
"in",
"alist",
":",
"print",
"' %s'",
"%",
"a",
"print",
"'|- %s: %s\\n'",
"%",
"(",
"g",
",",
"m",
".",
"build_model",
"(",
")",
")",
"for",
"format",
"in",
"[",
"'standard'",
",",
"'portable'",
",",
"'xml'",
",",
"'cooked'",
"]",
":",
"spacer",
"(",
")",
"print",
"\"Using '%s' format\"",
"%",
"format",
"spacer",
"(",
")",
"print",
"m",
".",
"model",
"(",
"format",
"=",
"format",
")"
] |
https://github.com/golismero/golismero/blob/7d605b937e241f51c1ca4f47b20f755eeefb9d76/thirdparty_libs/nltk/inference/mace.py#L282-L298
|
||
scottslowe/learning-tools
|
5a2abe30e269055d89f6ff4210f0f9f52d632680
|
traefik/tf-ans-swarm/ec2.py
|
python
|
Ec2Inventory.get_elasticache_replication_groups_by_region
|
(self, region)
|
Makes an AWS API call to the list of ElastiCache replication groups
in a particular region.
|
Makes an AWS API call to the list of ElastiCache replication groups
in a particular region.
|
[
"Makes",
"an",
"AWS",
"API",
"call",
"to",
"the",
"list",
"of",
"ElastiCache",
"replication",
"groups",
"in",
"a",
"particular",
"region",
"."
] |
def get_elasticache_replication_groups_by_region(self, region):
''' Makes an AWS API call to the list of ElastiCache replication groups
in a particular region.'''
# ElastiCache boto module doesn't provide a get_all_intances method,
# that's why we need to call describe directly (it would be called by
# the shorthand method anyway...)
try:
conn = self.connect_to_aws(elasticache, region)
if conn:
response = conn.describe_replication_groups()
except boto.exception.BotoServerError as e:
error = e.reason
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
if not e.reason == "Forbidden":
error = "Looks like AWS ElastiCache [Replication Groups] is down:\n%s" % e.message
self.fail_with_error(error, 'getting ElastiCache clusters')
try:
# Boto also doesn't provide wrapper classes to ReplicationGroups
# Because of that we can't make use of the get_list method in the
# AWSQueryConnection. Let's do the work manually
replication_groups = response['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']['ReplicationGroups']
except KeyError as e:
error = "ElastiCache [Replication Groups] query to AWS failed (unexpected format)."
self.fail_with_error(error, 'getting ElastiCache clusters')
for replication_group in replication_groups:
self.add_elasticache_replication_group(replication_group, region)
|
[
"def",
"get_elasticache_replication_groups_by_region",
"(",
"self",
",",
"region",
")",
":",
"# ElastiCache boto module doesn't provide a get_all_intances method,",
"# that's why we need to call describe directly (it would be called by",
"# the shorthand method anyway...)",
"try",
":",
"conn",
"=",
"self",
".",
"connect_to_aws",
"(",
"elasticache",
",",
"region",
")",
"if",
"conn",
":",
"response",
"=",
"conn",
".",
"describe_replication_groups",
"(",
")",
"except",
"boto",
".",
"exception",
".",
"BotoServerError",
"as",
"e",
":",
"error",
"=",
"e",
".",
"reason",
"if",
"e",
".",
"error_code",
"==",
"'AuthFailure'",
":",
"error",
"=",
"self",
".",
"get_auth_error_message",
"(",
")",
"if",
"not",
"e",
".",
"reason",
"==",
"\"Forbidden\"",
":",
"error",
"=",
"\"Looks like AWS ElastiCache [Replication Groups] is down:\\n%s\"",
"%",
"e",
".",
"message",
"self",
".",
"fail_with_error",
"(",
"error",
",",
"'getting ElastiCache clusters'",
")",
"try",
":",
"# Boto also doesn't provide wrapper classes to ReplicationGroups",
"# Because of that we can't make use of the get_list method in the",
"# AWSQueryConnection. Let's do the work manually",
"replication_groups",
"=",
"response",
"[",
"'DescribeReplicationGroupsResponse'",
"]",
"[",
"'DescribeReplicationGroupsResult'",
"]",
"[",
"'ReplicationGroups'",
"]",
"except",
"KeyError",
"as",
"e",
":",
"error",
"=",
"\"ElastiCache [Replication Groups] query to AWS failed (unexpected format).\"",
"self",
".",
"fail_with_error",
"(",
"error",
",",
"'getting ElastiCache clusters'",
")",
"for",
"replication_group",
"in",
"replication_groups",
":",
"self",
".",
"add_elasticache_replication_group",
"(",
"replication_group",
",",
"region",
")"
] |
https://github.com/scottslowe/learning-tools/blob/5a2abe30e269055d89f6ff4210f0f9f52d632680/traefik/tf-ans-swarm/ec2.py#L741-L773
|
||
golismero/golismero
|
7d605b937e241f51c1ca4f47b20f755eeefb9d76
|
tools/sqlmap/thirdparty/xdot/xdot.py
|
python
|
Pen.copy
|
(self)
|
return pen
|
Create a copy of this pen.
|
Create a copy of this pen.
|
[
"Create",
"a",
"copy",
"of",
"this",
"pen",
"."
] |
def copy(self):
"""Create a copy of this pen."""
pen = Pen()
pen.__dict__ = self.__dict__.copy()
return pen
|
[
"def",
"copy",
"(",
"self",
")",
":",
"pen",
"=",
"Pen",
"(",
")",
"pen",
".",
"__dict__",
"=",
"self",
".",
"__dict__",
".",
"copy",
"(",
")",
"return",
"pen"
] |
https://github.com/golismero/golismero/blob/7d605b937e241f51c1ca4f47b20f755eeefb9d76/tools/sqlmap/thirdparty/xdot/xdot.py#L62-L66
|
|
nltk/nltk
|
3f74ac55681667d7ef78b664557487145f51eb02
|
nltk/translate/stack_decoder.py
|
python
|
StackDecoder.__init__
|
(self, phrase_table, language_model)
|
:param phrase_table: Table of translations for source language
phrases and the log probabilities for those translations.
:type phrase_table: PhraseTable
:param language_model: Target language model. Must define a
``probability_change`` method that calculates the change in
log probability of a sentence, if a given string is appended
to it.
This interface is experimental and will likely be replaced
with nltk.model once it is implemented.
:type language_model: object
|
:param phrase_table: Table of translations for source language
phrases and the log probabilities for those translations.
:type phrase_table: PhraseTable
|
[
":",
"param",
"phrase_table",
":",
"Table",
"of",
"translations",
"for",
"source",
"language",
"phrases",
"and",
"the",
"log",
"probabilities",
"for",
"those",
"translations",
".",
":",
"type",
"phrase_table",
":",
"PhraseTable"
] |
def __init__(self, phrase_table, language_model):
"""
:param phrase_table: Table of translations for source language
phrases and the log probabilities for those translations.
:type phrase_table: PhraseTable
:param language_model: Target language model. Must define a
``probability_change`` method that calculates the change in
log probability of a sentence, if a given string is appended
to it.
This interface is experimental and will likely be replaced
with nltk.model once it is implemented.
:type language_model: object
"""
self.phrase_table = phrase_table
self.language_model = language_model
self.word_penalty = 0.0
"""
float: Influences the translation length exponentially.
If positive, shorter translations are preferred.
If negative, longer translations are preferred.
If zero, no penalty is applied.
"""
self.beam_threshold = 0.0
"""
float: Hypotheses that score below this factor of the best
hypothesis in a stack are dropped from consideration.
Value between 0.0 and 1.0.
"""
self.stack_size = 100
"""
int: Maximum number of hypotheses to consider in a stack.
Higher values increase the likelihood of a good translation,
but increases processing time.
"""
self.__distortion_factor = 0.5
self.__compute_log_distortion()
|
[
"def",
"__init__",
"(",
"self",
",",
"phrase_table",
",",
"language_model",
")",
":",
"self",
".",
"phrase_table",
"=",
"phrase_table",
"self",
".",
"language_model",
"=",
"language_model",
"self",
".",
"word_penalty",
"=",
"0.0",
"\"\"\"\n float: Influences the translation length exponentially.\n If positive, shorter translations are preferred.\n If negative, longer translations are preferred.\n If zero, no penalty is applied.\n \"\"\"",
"self",
".",
"beam_threshold",
"=",
"0.0",
"\"\"\"\n float: Hypotheses that score below this factor of the best\n hypothesis in a stack are dropped from consideration.\n Value between 0.0 and 1.0.\n \"\"\"",
"self",
".",
"stack_size",
"=",
"100",
"\"\"\"\n int: Maximum number of hypotheses to consider in a stack.\n Higher values increase the likelihood of a good translation,\n but increases processing time.\n \"\"\"",
"self",
".",
"__distortion_factor",
"=",
"0.5",
"self",
".",
"__compute_log_distortion",
"(",
")"
] |
https://github.com/nltk/nltk/blob/3f74ac55681667d7ef78b664557487145f51eb02/nltk/translate/stack_decoder.py#L79-L119
|
||
oracle/graalpython
|
577e02da9755d916056184ec441c26e00b70145c
|
graalpython/lib-python/3/asyncio/streams.py
|
python
|
StreamReader.readline
|
(self)
|
return line
|
Read chunk of data from the stream until newline (b'\n') is found.
On success, return chunk that ends with newline. If only partial
line can be read due to EOF, return incomplete line without
terminating newline. When EOF was reached while no bytes read, empty
bytes object is returned.
If limit is reached, ValueError will be raised. In that case, if
newline was found, complete line including newline will be removed
from internal buffer. Else, internal buffer will be cleared. Limit is
compared against part of the line without newline.
If stream was paused, this function will automatically resume it if
needed.
|
Read chunk of data from the stream until newline (b'\n') is found.
|
[
"Read",
"chunk",
"of",
"data",
"from",
"the",
"stream",
"until",
"newline",
"(",
"b",
"\\",
"n",
")",
"is",
"found",
"."
] |
async def readline(self):
"""Read chunk of data from the stream until newline (b'\n') is found.
On success, return chunk that ends with newline. If only partial
line can be read due to EOF, return incomplete line without
terminating newline. When EOF was reached while no bytes read, empty
bytes object is returned.
If limit is reached, ValueError will be raised. In that case, if
newline was found, complete line including newline will be removed
from internal buffer. Else, internal buffer will be cleared. Limit is
compared against part of the line without newline.
If stream was paused, this function will automatically resume it if
needed.
"""
sep = b'\n'
seplen = len(sep)
try:
line = await self.readuntil(sep)
except exceptions.IncompleteReadError as e:
return e.partial
except exceptions.LimitOverrunError as e:
if self._buffer.startswith(sep, e.consumed):
del self._buffer[:e.consumed + seplen]
else:
self._buffer.clear()
self._maybe_resume_transport()
raise ValueError(e.args[0])
return line
|
[
"async",
"def",
"readline",
"(",
"self",
")",
":",
"sep",
"=",
"b'\\n'",
"seplen",
"=",
"len",
"(",
"sep",
")",
"try",
":",
"line",
"=",
"await",
"self",
".",
"readuntil",
"(",
"sep",
")",
"except",
"exceptions",
".",
"IncompleteReadError",
"as",
"e",
":",
"return",
"e",
".",
"partial",
"except",
"exceptions",
".",
"LimitOverrunError",
"as",
"e",
":",
"if",
"self",
".",
"_buffer",
".",
"startswith",
"(",
"sep",
",",
"e",
".",
"consumed",
")",
":",
"del",
"self",
".",
"_buffer",
"[",
":",
"e",
".",
"consumed",
"+",
"seplen",
"]",
"else",
":",
"self",
".",
"_buffer",
".",
"clear",
"(",
")",
"self",
".",
"_maybe_resume_transport",
"(",
")",
"raise",
"ValueError",
"(",
"e",
".",
"args",
"[",
"0",
"]",
")",
"return",
"line"
] |
https://github.com/oracle/graalpython/blob/577e02da9755d916056184ec441c26e00b70145c/graalpython/lib-python/3/asyncio/streams.py#L521-L550
|
|
LiDan456/MAD-GANs
|
3139a73a4112d3f3f18182c9a6cdc2c671e7cfe8
|
plotting.py
|
python
|
visualise_latent
|
(Z, identifier)
|
return True
|
visualise a SINGLE point in the latent space
|
visualise a SINGLE point in the latent space
|
[
"visualise",
"a",
"SINGLE",
"point",
"in",
"the",
"latent",
"space"
] |
def visualise_latent(Z, identifier):
"""
visualise a SINGLE point in the latent space
"""
seq_length = Z.shape[0]
latent_dim = Z.shape[1]
if latent_dim > 2:
print('WARNING: Only visualising first two dimensions of latent space.')
h = np.random.random()
colours = np.array([hsv_to_rgb((h, i/seq_length, 0.96)) for i in range(seq_length)])
# plt.plot(Z[:, 0], Z[:, 1], c='grey', alpha=0.5)
for i in range(seq_length):
plt.scatter(Z[i, 0], Z[i, 1], marker='o', c=colours[i])
plt.savefig('./experiments/plots/' + identifier + '_Z.png')
plt.clf()
plt.close()
return True
|
[
"def",
"visualise_latent",
"(",
"Z",
",",
"identifier",
")",
":",
"seq_length",
"=",
"Z",
".",
"shape",
"[",
"0",
"]",
"latent_dim",
"=",
"Z",
".",
"shape",
"[",
"1",
"]",
"if",
"latent_dim",
">",
"2",
":",
"print",
"(",
"'WARNING: Only visualising first two dimensions of latent space.'",
")",
"h",
"=",
"np",
".",
"random",
".",
"random",
"(",
")",
"colours",
"=",
"np",
".",
"array",
"(",
"[",
"hsv_to_rgb",
"(",
"(",
"h",
",",
"i",
"/",
"seq_length",
",",
"0.96",
")",
")",
"for",
"i",
"in",
"range",
"(",
"seq_length",
")",
"]",
")",
"# plt.plot(Z[:, 0], Z[:, 1], c='grey', alpha=0.5)",
"for",
"i",
"in",
"range",
"(",
"seq_length",
")",
":",
"plt",
".",
"scatter",
"(",
"Z",
"[",
"i",
",",
"0",
"]",
",",
"Z",
"[",
"i",
",",
"1",
"]",
",",
"marker",
"=",
"'o'",
",",
"c",
"=",
"colours",
"[",
"i",
"]",
")",
"plt",
".",
"savefig",
"(",
"'./experiments/plots/'",
"+",
"identifier",
"+",
"'_Z.png'",
")",
"plt",
".",
"clf",
"(",
")",
"plt",
".",
"close",
"(",
")",
"return",
"True"
] |
https://github.com/LiDan456/MAD-GANs/blob/3139a73a4112d3f3f18182c9a6cdc2c671e7cfe8/plotting.py#L436-L452
|
|
inspurer/WorkAttendanceSystem
|
1221e2d67bdf5bb15fe99517cc3ded58ccb066df
|
V1.0/venv/Lib/site-packages/pip-9.0.1-py3.5.egg/pip/_vendor/distlib/_backport/shutil.py
|
python
|
copy
|
(src, dst)
|
Copy data and mode bits ("cp src dst").
The destination may be a directory.
|
Copy data and mode bits ("cp src dst").
|
[
"Copy",
"data",
"and",
"mode",
"bits",
"(",
"cp",
"src",
"dst",
")",
"."
] |
def copy(src, dst):
"""Copy data and mode bits ("cp src dst").
The destination may be a directory.
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst)
copymode(src, dst)
|
[
"def",
"copy",
"(",
"src",
",",
"dst",
")",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"dst",
")",
":",
"dst",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dst",
",",
"os",
".",
"path",
".",
"basename",
"(",
"src",
")",
")",
"copyfile",
"(",
"src",
",",
"dst",
")",
"copymode",
"(",
"src",
",",
"dst",
")"
] |
https://github.com/inspurer/WorkAttendanceSystem/blob/1221e2d67bdf5bb15fe99517cc3ded58ccb066df/V1.0/venv/Lib/site-packages/pip-9.0.1-py3.5.egg/pip/_vendor/distlib/_backport/shutil.py#L130-L139
|
||
GeneralMills/pytrends
|
bac0caea18817630e98b503a38e9b445e7b5add1
|
pytrends/dailydata.py
|
python
|
convert_dates_to_timeframe
|
(start: date, stop: date)
|
return f"{start.strftime('%Y-%m-%d')} {stop.strftime('%Y-%m-%d')}"
|
Given two dates, returns a stringified version of the interval between
the two dates which is used to retrieve data for a specific time frame
from Google Trends.
|
Given two dates, returns a stringified version of the interval between
the two dates which is used to retrieve data for a specific time frame
from Google Trends.
|
[
"Given",
"two",
"dates",
"returns",
"a",
"stringified",
"version",
"of",
"the",
"interval",
"between",
"the",
"two",
"dates",
"which",
"is",
"used",
"to",
"retrieve",
"data",
"for",
"a",
"specific",
"time",
"frame",
"from",
"Google",
"Trends",
"."
] |
def convert_dates_to_timeframe(start: date, stop: date) -> str:
"""Given two dates, returns a stringified version of the interval between
the two dates which is used to retrieve data for a specific time frame
from Google Trends.
"""
return f"{start.strftime('%Y-%m-%d')} {stop.strftime('%Y-%m-%d')}"
|
[
"def",
"convert_dates_to_timeframe",
"(",
"start",
":",
"date",
",",
"stop",
":",
"date",
")",
"->",
"str",
":",
"return",
"f\"{start.strftime('%Y-%m-%d')} {stop.strftime('%Y-%m-%d')}\""
] |
https://github.com/GeneralMills/pytrends/blob/bac0caea18817630e98b503a38e9b445e7b5add1/pytrends/dailydata.py#L21-L26
|
|
thunlp/OpenNRE
|
dbc58f5da049cc97e6e9a9a750839d595ea38471
|
opennre/encoder/pcnn_encoder.py
|
python
|
PCNNEncoder.forward
|
(self, token, pos1, pos2, mask)
|
return x
|
Args:
token: (B, L), index of tokens
pos1: (B, L), relative position to head entity
pos2: (B, L), relative position to tail entity
Return:
(B, EMBED), representations for sentences
|
Args:
token: (B, L), index of tokens
pos1: (B, L), relative position to head entity
pos2: (B, L), relative position to tail entity
Return:
(B, EMBED), representations for sentences
|
[
"Args",
":",
"token",
":",
"(",
"B",
"L",
")",
"index",
"of",
"tokens",
"pos1",
":",
"(",
"B",
"L",
")",
"relative",
"position",
"to",
"head",
"entity",
"pos2",
":",
"(",
"B",
"L",
")",
"relative",
"position",
"to",
"tail",
"entity",
"Return",
":",
"(",
"B",
"EMBED",
")",
"representations",
"for",
"sentences"
] |
def forward(self, token, pos1, pos2, mask):
"""
Args:
token: (B, L), index of tokens
pos1: (B, L), relative position to head entity
pos2: (B, L), relative position to tail entity
Return:
(B, EMBED), representations for sentences
"""
# Check size of tensors
if len(token.size()) != 2 or token.size() != pos1.size() or token.size() != pos2.size():
raise Exception("Size of token, pos1 ans pos2 should be (B, L)")
x = torch.cat([self.word_embedding(token),
self.pos1_embedding(pos1),
self.pos2_embedding(pos2)], 2) # (B, L, EMBED)
x = x.transpose(1, 2) # (B, EMBED, L)
x = self.conv(x) # (B, H, L)
mask = 1 - self.mask_embedding(mask).transpose(1, 2) # (B, L) -> (B, L, 3) -> (B, 3, L)
pool1 = self.pool(self.act(x + self._minus * mask[:, 0:1, :])) # (B, H, 1)
pool2 = self.pool(self.act(x + self._minus * mask[:, 1:2, :]))
pool3 = self.pool(self.act(x + self._minus * mask[:, 2:3, :]))
x = torch.cat([pool1, pool2, pool3], 1) # (B, 3H, 1)
x = x.squeeze(2) # (B, 3H)
x = self.drop(x)
return x
|
[
"def",
"forward",
"(",
"self",
",",
"token",
",",
"pos1",
",",
"pos2",
",",
"mask",
")",
":",
"# Check size of tensors",
"if",
"len",
"(",
"token",
".",
"size",
"(",
")",
")",
"!=",
"2",
"or",
"token",
".",
"size",
"(",
")",
"!=",
"pos1",
".",
"size",
"(",
")",
"or",
"token",
".",
"size",
"(",
")",
"!=",
"pos2",
".",
"size",
"(",
")",
":",
"raise",
"Exception",
"(",
"\"Size of token, pos1 ans pos2 should be (B, L)\"",
")",
"x",
"=",
"torch",
".",
"cat",
"(",
"[",
"self",
".",
"word_embedding",
"(",
"token",
")",
",",
"self",
".",
"pos1_embedding",
"(",
"pos1",
")",
",",
"self",
".",
"pos2_embedding",
"(",
"pos2",
")",
"]",
",",
"2",
")",
"# (B, L, EMBED)",
"x",
"=",
"x",
".",
"transpose",
"(",
"1",
",",
"2",
")",
"# (B, EMBED, L)",
"x",
"=",
"self",
".",
"conv",
"(",
"x",
")",
"# (B, H, L)",
"mask",
"=",
"1",
"-",
"self",
".",
"mask_embedding",
"(",
"mask",
")",
".",
"transpose",
"(",
"1",
",",
"2",
")",
"# (B, L) -> (B, L, 3) -> (B, 3, L)",
"pool1",
"=",
"self",
".",
"pool",
"(",
"self",
".",
"act",
"(",
"x",
"+",
"self",
".",
"_minus",
"*",
"mask",
"[",
":",
",",
"0",
":",
"1",
",",
":",
"]",
")",
")",
"# (B, H, 1)",
"pool2",
"=",
"self",
".",
"pool",
"(",
"self",
".",
"act",
"(",
"x",
"+",
"self",
".",
"_minus",
"*",
"mask",
"[",
":",
",",
"1",
":",
"2",
",",
":",
"]",
")",
")",
"pool3",
"=",
"self",
".",
"pool",
"(",
"self",
".",
"act",
"(",
"x",
"+",
"self",
".",
"_minus",
"*",
"mask",
"[",
":",
",",
"2",
":",
"3",
",",
":",
"]",
")",
")",
"x",
"=",
"torch",
".",
"cat",
"(",
"[",
"pool1",
",",
"pool2",
",",
"pool3",
"]",
",",
"1",
")",
"# (B, 3H, 1)",
"x",
"=",
"x",
".",
"squeeze",
"(",
"2",
")",
"# (B, 3H)",
"x",
"=",
"self",
".",
"drop",
"(",
"x",
")",
"return",
"x"
] |
https://github.com/thunlp/OpenNRE/blob/dbc58f5da049cc97e6e9a9a750839d595ea38471/opennre/encoder/pcnn_encoder.py#L54-L80
|
|
zlai0/MAST
|
a57b043ca597b9b7ef6842b1fa965c9f1ee71526
|
models/submodule.py
|
python
|
conv3x3
|
(in_planes, out_planes, stride=1, groups=1, dilation=1)
|
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
|
3x3 convolution with padding
|
3x3 convolution with padding
|
[
"3x3",
"convolution",
"with",
"padding"
] |
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
|
[
"def",
"conv3x3",
"(",
"in_planes",
",",
"out_planes",
",",
"stride",
"=",
"1",
",",
"groups",
"=",
"1",
",",
"dilation",
"=",
"1",
")",
":",
"return",
"nn",
".",
"Conv2d",
"(",
"in_planes",
",",
"out_planes",
",",
"kernel_size",
"=",
"3",
",",
"stride",
"=",
"stride",
",",
"padding",
"=",
"dilation",
",",
"groups",
"=",
"groups",
",",
"bias",
"=",
"False",
",",
"dilation",
"=",
"dilation",
")"
] |
https://github.com/zlai0/MAST/blob/a57b043ca597b9b7ef6842b1fa965c9f1ee71526/models/submodule.py#L353-L356
|
|
OpenEndedGroup/Field
|
4f7c8edfb01bb0ccc927b78d3c500f018a4ae37c
|
Contents/lib/python/javapath.py
|
python
|
isdir
|
(path)
|
return File(sys.getPath(path)).isDirectory()
|
Test whether a path is a directory
|
Test whether a path is a directory
|
[
"Test",
"whether",
"a",
"path",
"is",
"a",
"directory"
] |
def isdir(path):
"""Test whether a path is a directory"""
path = _tostr(path, "isdir")
return File(sys.getPath(path)).isDirectory()
|
[
"def",
"isdir",
"(",
"path",
")",
":",
"path",
"=",
"_tostr",
"(",
"path",
",",
"\"isdir\"",
")",
"return",
"File",
"(",
"sys",
".",
"getPath",
"(",
"path",
")",
")",
".",
"isDirectory",
"(",
")"
] |
https://github.com/OpenEndedGroup/Field/blob/4f7c8edfb01bb0ccc927b78d3c500f018a4ae37c/Contents/lib/python/javapath.py#L115-L118
|
|
AppScale/gts
|
46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9
|
AppServer/lib/cherrypy/cherrypy/wsgiserver/wsgiserver3.py
|
python
|
HTTPRequest.send_headers
|
(self)
|
Assert, process, and send the HTTP response message-headers.
You must set self.status, and self.outheaders before calling this.
|
Assert, process, and send the HTTP response message-headers.
You must set self.status, and self.outheaders before calling this.
|
[
"Assert",
"process",
"and",
"send",
"the",
"HTTP",
"response",
"message",
"-",
"headers",
".",
"You",
"must",
"set",
"self",
".",
"status",
"and",
"self",
".",
"outheaders",
"before",
"calling",
"this",
"."
] |
def send_headers(self):
"""Assert, process, and send the HTTP response message-headers.
You must set self.status, and self.outheaders before calling this.
"""
hkeys = [key.lower() for key, value in self.outheaders]
status = int(self.status[:3])
if status == 413:
# Request Entity Too Large. Close conn to avoid garbage.
self.close_connection = True
elif b"content-length" not in hkeys:
# "All 1xx (informational), 204 (no content),
# and 304 (not modified) responses MUST NOT
# include a message-body." So no point chunking.
if status < 200 or status in (204, 205, 304):
pass
else:
if (self.response_protocol == 'HTTP/1.1'
and self.method != b'HEAD'):
# Use the chunked transfer-coding
self.chunked_write = True
self.outheaders.append((b"Transfer-Encoding", b"chunked"))
else:
# Closing the conn is the only way to determine len.
self.close_connection = True
if b"connection" not in hkeys:
if self.response_protocol == 'HTTP/1.1':
# Both server and client are HTTP/1.1 or better
if self.close_connection:
self.outheaders.append((b"Connection", b"close"))
else:
# Server and/or client are HTTP/1.0
if not self.close_connection:
self.outheaders.append((b"Connection", b"Keep-Alive"))
if (not self.close_connection) and (not self.chunked_read):
# Read any remaining request body data on the socket.
# "If an origin server receives a request that does not include an
# Expect request-header field with the "100-continue" expectation,
# the request includes a request body, and the server responds
# with a final status code before reading the entire request body
# from the transport connection, then the server SHOULD NOT close
# the transport connection until it has read the entire request,
# or until the client closes the connection. Otherwise, the client
# might not reliably receive the response message. However, this
# requirement is not be construed as preventing a server from
# defending itself against denial-of-service attacks, or from
# badly broken client implementations."
remaining = getattr(self.rfile, 'remaining', 0)
if remaining > 0:
self.rfile.read(remaining)
if b"date" not in hkeys:
self.outheaders.append(
(b"Date", email.utils.formatdate(usegmt=True).encode('ISO-8859-1')))
if b"server" not in hkeys:
self.outheaders.append(
(b"Server", self.server.server_name.encode('ISO-8859-1')))
buf = [self.server.protocol.encode('ascii') + SPACE + self.status + CRLF]
for k, v in self.outheaders:
buf.append(k + COLON + SPACE + v + CRLF)
buf.append(CRLF)
self.conn.wfile.write(EMPTY.join(buf))
|
[
"def",
"send_headers",
"(",
"self",
")",
":",
"hkeys",
"=",
"[",
"key",
".",
"lower",
"(",
")",
"for",
"key",
",",
"value",
"in",
"self",
".",
"outheaders",
"]",
"status",
"=",
"int",
"(",
"self",
".",
"status",
"[",
":",
"3",
"]",
")",
"if",
"status",
"==",
"413",
":",
"# Request Entity Too Large. Close conn to avoid garbage.",
"self",
".",
"close_connection",
"=",
"True",
"elif",
"b\"content-length\"",
"not",
"in",
"hkeys",
":",
"# \"All 1xx (informational), 204 (no content),",
"# and 304 (not modified) responses MUST NOT",
"# include a message-body.\" So no point chunking.",
"if",
"status",
"<",
"200",
"or",
"status",
"in",
"(",
"204",
",",
"205",
",",
"304",
")",
":",
"pass",
"else",
":",
"if",
"(",
"self",
".",
"response_protocol",
"==",
"'HTTP/1.1'",
"and",
"self",
".",
"method",
"!=",
"b'HEAD'",
")",
":",
"# Use the chunked transfer-coding",
"self",
".",
"chunked_write",
"=",
"True",
"self",
".",
"outheaders",
".",
"append",
"(",
"(",
"b\"Transfer-Encoding\"",
",",
"b\"chunked\"",
")",
")",
"else",
":",
"# Closing the conn is the only way to determine len.",
"self",
".",
"close_connection",
"=",
"True",
"if",
"b\"connection\"",
"not",
"in",
"hkeys",
":",
"if",
"self",
".",
"response_protocol",
"==",
"'HTTP/1.1'",
":",
"# Both server and client are HTTP/1.1 or better",
"if",
"self",
".",
"close_connection",
":",
"self",
".",
"outheaders",
".",
"append",
"(",
"(",
"b\"Connection\"",
",",
"b\"close\"",
")",
")",
"else",
":",
"# Server and/or client are HTTP/1.0",
"if",
"not",
"self",
".",
"close_connection",
":",
"self",
".",
"outheaders",
".",
"append",
"(",
"(",
"b\"Connection\"",
",",
"b\"Keep-Alive\"",
")",
")",
"if",
"(",
"not",
"self",
".",
"close_connection",
")",
"and",
"(",
"not",
"self",
".",
"chunked_read",
")",
":",
"# Read any remaining request body data on the socket.",
"# \"If an origin server receives a request that does not include an",
"# Expect request-header field with the \"100-continue\" expectation,",
"# the request includes a request body, and the server responds",
"# with a final status code before reading the entire request body",
"# from the transport connection, then the server SHOULD NOT close",
"# the transport connection until it has read the entire request,",
"# or until the client closes the connection. Otherwise, the client",
"# might not reliably receive the response message. However, this",
"# requirement is not be construed as preventing a server from",
"# defending itself against denial-of-service attacks, or from",
"# badly broken client implementations.\"",
"remaining",
"=",
"getattr",
"(",
"self",
".",
"rfile",
",",
"'remaining'",
",",
"0",
")",
"if",
"remaining",
">",
"0",
":",
"self",
".",
"rfile",
".",
"read",
"(",
"remaining",
")",
"if",
"b\"date\"",
"not",
"in",
"hkeys",
":",
"self",
".",
"outheaders",
".",
"append",
"(",
"(",
"b\"Date\"",
",",
"email",
".",
"utils",
".",
"formatdate",
"(",
"usegmt",
"=",
"True",
")",
".",
"encode",
"(",
"'ISO-8859-1'",
")",
")",
")",
"if",
"b\"server\"",
"not",
"in",
"hkeys",
":",
"self",
".",
"outheaders",
".",
"append",
"(",
"(",
"b\"Server\"",
",",
"self",
".",
"server",
".",
"server_name",
".",
"encode",
"(",
"'ISO-8859-1'",
")",
")",
")",
"buf",
"=",
"[",
"self",
".",
"server",
".",
"protocol",
".",
"encode",
"(",
"'ascii'",
")",
"+",
"SPACE",
"+",
"self",
".",
"status",
"+",
"CRLF",
"]",
"for",
"k",
",",
"v",
"in",
"self",
".",
"outheaders",
":",
"buf",
".",
"append",
"(",
"k",
"+",
"COLON",
"+",
"SPACE",
"+",
"v",
"+",
"CRLF",
")",
"buf",
".",
"append",
"(",
"CRLF",
")",
"self",
".",
"conn",
".",
"wfile",
".",
"write",
"(",
"EMPTY",
".",
"join",
"(",
"buf",
")",
")"
] |
https://github.com/AppScale/gts/blob/46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9/AppServer/lib/cherrypy/cherrypy/wsgiserver/wsgiserver3.py#L882-L948
|
||
nortikin/sverchok
|
7b460f01317c15f2681bfa3e337c5e7346f3711b
|
core/sockets.py
|
python
|
SvSocketCommon.hide_safe
|
(self)
|
return self.hide
|
It will hide even linked sockets
|
It will hide even linked sockets
|
[
"It",
"will",
"hide",
"even",
"linked",
"sockets"
] |
def hide_safe(self):
"""It will hide even linked sockets"""
return self.hide
|
[
"def",
"hide_safe",
"(",
"self",
")",
":",
"return",
"self",
".",
"hide"
] |
https://github.com/nortikin/sverchok/blob/7b460f01317c15f2681bfa3e337c5e7346f3711b/core/sockets.py#L372-L374
|
|
ShivamSarodia/ShivyC
|
e7d72eff237e1ef49ec70333497348baf86be425
|
shivyc/il_gen.py
|
python
|
SymbolTable.add_typedef
|
(self, identifier, ctype)
|
Add a type definition to the symbol table.
|
Add a type definition to the symbol table.
|
[
"Add",
"a",
"type",
"definition",
"to",
"the",
"symbol",
"table",
"."
] |
def add_typedef(self, identifier, ctype):
"""Add a type definition to the symbol table."""
name = identifier.content
if name in self.tables[-1].vars:
old_ctype = self.tables[-1].vars[name]
if isinstance(old_ctype, ILValue):
err = f"'{name}' redeclared as type definition in same scope"
raise CompilerError(err, identifier.r)
elif not old_ctype.compatible(ctype):
err = f"'{name}' redeclared as incompatible type in same scope"
raise CompilerError(err, identifier.r)
else:
return
self.tables[-1].vars[name] = ctype
|
[
"def",
"add_typedef",
"(",
"self",
",",
"identifier",
",",
"ctype",
")",
":",
"name",
"=",
"identifier",
".",
"content",
"if",
"name",
"in",
"self",
".",
"tables",
"[",
"-",
"1",
"]",
".",
"vars",
":",
"old_ctype",
"=",
"self",
".",
"tables",
"[",
"-",
"1",
"]",
".",
"vars",
"[",
"name",
"]",
"if",
"isinstance",
"(",
"old_ctype",
",",
"ILValue",
")",
":",
"err",
"=",
"f\"'{name}' redeclared as type definition in same scope\"",
"raise",
"CompilerError",
"(",
"err",
",",
"identifier",
".",
"r",
")",
"elif",
"not",
"old_ctype",
".",
"compatible",
"(",
"ctype",
")",
":",
"err",
"=",
"f\"'{name}' redeclared as incompatible type in same scope\"",
"raise",
"CompilerError",
"(",
"err",
",",
"identifier",
".",
"r",
")",
"else",
":",
"return",
"self",
".",
"tables",
"[",
"-",
"1",
"]",
".",
"vars",
"[",
"name",
"]",
"=",
"ctype"
] |
https://github.com/ShivamSarodia/ShivyC/blob/e7d72eff237e1ef49ec70333497348baf86be425/shivyc/il_gen.py#L316-L331
|
||
ladybug-tools/butterfly
|
c8fc0bbe317bb41bfe5f28305782a82347b8c776
|
butterfly/solution.py
|
python
|
SolutionParameter.isSolutionParameter
|
(self)
|
return True
|
Return True.
|
Return True.
|
[
"Return",
"True",
"."
] |
def isSolutionParameter(self):
"""Return True."""
return True
|
[
"def",
"isSolutionParameter",
"(",
"self",
")",
":",
"return",
"True"
] |
https://github.com/ladybug-tools/butterfly/blob/c8fc0bbe317bb41bfe5f28305782a82347b8c776/butterfly/solution.py#L397-L399
|
|
GNS3/gns3-gui
|
da8adbaa18ab60e053af2a619efd468f4c8950f3
|
gns3/main_window.py
|
python
|
MainWindow.openProjectActionSlot
|
(self)
|
Slot called to open a project.
|
Slot called to open a project.
|
[
"Slot",
"called",
"to",
"open",
"a",
"project",
"."
] |
def openProjectActionSlot(self):
"""
Slot called to open a project.
"""
if Controller.instance().isRemote():
# If the server is remote we use the new project windows with the project library
self._newProjectActionSlot()
else:
directory = self._project_dir
if self._project_dir is None or not os.path.exists(self._project_dir):
directory = Topology.instance().projectsDirPath()
path, _ = QtWidgets.QFileDialog.getOpenFileName(self, "Open project", directory,
"All files (*.*);;GNS3 Project (*.gns3);;GNS3 Portable Project (*.gns3project *.gns3p);;NET files (*.net)",
"GNS3 Project (*.gns3)")
if path:
self.loadPath(path)
self._project_dir = os.path.dirname(path)
|
[
"def",
"openProjectActionSlot",
"(",
"self",
")",
":",
"if",
"Controller",
".",
"instance",
"(",
")",
".",
"isRemote",
"(",
")",
":",
"# If the server is remote we use the new project windows with the project library",
"self",
".",
"_newProjectActionSlot",
"(",
")",
"else",
":",
"directory",
"=",
"self",
".",
"_project_dir",
"if",
"self",
".",
"_project_dir",
"is",
"None",
"or",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"_project_dir",
")",
":",
"directory",
"=",
"Topology",
".",
"instance",
"(",
")",
".",
"projectsDirPath",
"(",
")",
"path",
",",
"_",
"=",
"QtWidgets",
".",
"QFileDialog",
".",
"getOpenFileName",
"(",
"self",
",",
"\"Open project\"",
",",
"directory",
",",
"\"All files (*.*);;GNS3 Project (*.gns3);;GNS3 Portable Project (*.gns3project *.gns3p);;NET files (*.net)\"",
",",
"\"GNS3 Project (*.gns3)\"",
")",
"if",
"path",
":",
"self",
".",
"loadPath",
"(",
"path",
")",
"self",
".",
"_project_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")"
] |
https://github.com/GNS3/gns3-gui/blob/da8adbaa18ab60e053af2a619efd468f4c8950f3/gns3/main_window.py#L419-L436
|
||
MegEngine/Models
|
4c55d28bad03652a4e352bf5e736a75df041d84a
|
official/nlp/bert/model.py
|
python
|
transpose
|
(inp, a, b)
|
return inp.transpose(cur_shape)
|
[] |
def transpose(inp, a, b):
cur_shape = list(range(0, inp.ndim))
cur_shape[a], cur_shape[b] = cur_shape[b], cur_shape[a]
return inp.transpose(cur_shape)
|
[
"def",
"transpose",
"(",
"inp",
",",
"a",
",",
"b",
")",
":",
"cur_shape",
"=",
"list",
"(",
"range",
"(",
"0",
",",
"inp",
".",
"ndim",
")",
")",
"cur_shape",
"[",
"a",
"]",
",",
"cur_shape",
"[",
"b",
"]",
"=",
"cur_shape",
"[",
"b",
"]",
",",
"cur_shape",
"[",
"a",
"]",
"return",
"inp",
".",
"transpose",
"(",
"cur_shape",
")"
] |
https://github.com/MegEngine/Models/blob/4c55d28bad03652a4e352bf5e736a75df041d84a/official/nlp/bert/model.py#L37-L40
|
|||
visionml/pytracking
|
3e6a8980db7a2275252abcc398ed0c2494f0ceab
|
ltr/models/loss/kl_regression.py
|
python
|
KLRegressionGrid.forward
|
(self, scores, gt_density, grid_dim=-1, grid_scale=1.0)
|
return L.mean()
|
Args:
scores: predicted score values
gt_density: probability density of the ground truth distribution
grid_dim: dimension(s) of the grid
grid_scale: area of one grid cell
|
Args:
scores: predicted score values
gt_density: probability density of the ground truth distribution
grid_dim: dimension(s) of the grid
grid_scale: area of one grid cell
|
[
"Args",
":",
"scores",
":",
"predicted",
"score",
"values",
"gt_density",
":",
"probability",
"density",
"of",
"the",
"ground",
"truth",
"distribution",
"grid_dim",
":",
"dimension",
"(",
"s",
")",
"of",
"the",
"grid",
"grid_scale",
":",
"area",
"of",
"one",
"grid",
"cell"
] |
def forward(self, scores, gt_density, grid_dim=-1, grid_scale=1.0):
"""Args:
scores: predicted score values
gt_density: probability density of the ground truth distribution
grid_dim: dimension(s) of the grid
grid_scale: area of one grid cell"""
score_corr = grid_scale * torch.sum(scores * gt_density, dim=grid_dim)
L = torch.logsumexp(scores, dim=grid_dim) + math.log(grid_scale) - score_corr
return L.mean()
|
[
"def",
"forward",
"(",
"self",
",",
"scores",
",",
"gt_density",
",",
"grid_dim",
"=",
"-",
"1",
",",
"grid_scale",
"=",
"1.0",
")",
":",
"score_corr",
"=",
"grid_scale",
"*",
"torch",
".",
"sum",
"(",
"scores",
"*",
"gt_density",
",",
"dim",
"=",
"grid_dim",
")",
"L",
"=",
"torch",
".",
"logsumexp",
"(",
"scores",
",",
"dim",
"=",
"grid_dim",
")",
"+",
"math",
".",
"log",
"(",
"grid_scale",
")",
"-",
"score_corr",
"return",
"L",
".",
"mean",
"(",
")"
] |
https://github.com/visionml/pytracking/blob/3e6a8980db7a2275252abcc398ed0c2494f0ceab/ltr/models/loss/kl_regression.py#L59-L70
|
|
dagwieers/mrepo
|
a55cbc737d8bade92070d38e4dbb9a24be4b477f
|
rhn/transports.py
|
python
|
BaseOutput.__init__
|
(self, transfer=0, encoding=0, connection=None, method="POST")
|
[] |
def __init__(self, transfer=0, encoding=0, connection=None, method="POST"):
# Assumes connection is an instance of HTTPConnection
if connection:
if not isinstance(connection, connections.HTTPConnection):
raise Exception("Expected an HTTPConnection type object")
self.method = method
# Store the connection
self._connection = connection
self.data = None
self.headers = UserDictCase()
self.encoding = 0
self.transfer = 0
self.transport_flags = {}
# for authenticated proxies
self.username = None
self.password = None
# Fields to keep the information about the server
self._host = None
self._handler = None
self._http_type = None
self._protocol = None
# Initialize self.transfer and self.encoding
self.set_transport_flags(transfer=transfer, encoding=encoding)
# internal flags
self.__processed = 0
|
[
"def",
"__init__",
"(",
"self",
",",
"transfer",
"=",
"0",
",",
"encoding",
"=",
"0",
",",
"connection",
"=",
"None",
",",
"method",
"=",
"\"POST\"",
")",
":",
"# Assumes connection is an instance of HTTPConnection",
"if",
"connection",
":",
"if",
"not",
"isinstance",
"(",
"connection",
",",
"connections",
".",
"HTTPConnection",
")",
":",
"raise",
"Exception",
"(",
"\"Expected an HTTPConnection type object\"",
")",
"self",
".",
"method",
"=",
"method",
"# Store the connection",
"self",
".",
"_connection",
"=",
"connection",
"self",
".",
"data",
"=",
"None",
"self",
".",
"headers",
"=",
"UserDictCase",
"(",
")",
"self",
".",
"encoding",
"=",
"0",
"self",
".",
"transfer",
"=",
"0",
"self",
".",
"transport_flags",
"=",
"{",
"}",
"# for authenticated proxies",
"self",
".",
"username",
"=",
"None",
"self",
".",
"password",
"=",
"None",
"# Fields to keep the information about the server",
"self",
".",
"_host",
"=",
"None",
"self",
".",
"_handler",
"=",
"None",
"self",
".",
"_http_type",
"=",
"None",
"self",
".",
"_protocol",
"=",
"None",
"# Initialize self.transfer and self.encoding",
"self",
".",
"set_transport_flags",
"(",
"transfer",
"=",
"transfer",
",",
"encoding",
"=",
"encoding",
")",
"# internal flags",
"self",
".",
"__processed",
"=",
"0"
] |
https://github.com/dagwieers/mrepo/blob/a55cbc737d8bade92070d38e4dbb9a24be4b477f/rhn/transports.py#L589-L617
|
||||
tosher/Mediawiker
|
81bf97cace59bedcb1668e7830b85c36e014428e
|
lib/Crypto.lin.x64/Crypto/Hash/SHA3_384.py
|
python
|
SHA3_384_Hash.hexdigest
|
(self)
|
return "".join(["%02x" % bord(x) for x in self.digest()])
|
Return the **printable** digest of the message that has been hashed so far.
:return: The hash digest, computed over the data processed so far.
Hexadecimal encoded.
:rtype: string
|
Return the **printable** digest of the message that has been hashed so far.
|
[
"Return",
"the",
"**",
"printable",
"**",
"digest",
"of",
"the",
"message",
"that",
"has",
"been",
"hashed",
"so",
"far",
"."
] |
def hexdigest(self):
"""Return the **printable** digest of the message that has been hashed so far.
:return: The hash digest, computed over the data processed so far.
Hexadecimal encoded.
:rtype: string
"""
return "".join(["%02x" % bord(x) for x in self.digest()])
|
[
"def",
"hexdigest",
"(",
"self",
")",
":",
"return",
"\"\"",
".",
"join",
"(",
"[",
"\"%02x\"",
"%",
"bord",
"(",
"x",
")",
"for",
"x",
"in",
"self",
".",
"digest",
"(",
")",
"]",
")"
] |
https://github.com/tosher/Mediawiker/blob/81bf97cace59bedcb1668e7830b85c36e014428e/lib/Crypto.lin.x64/Crypto/Hash/SHA3_384.py#L104-L112
|
|
apache/libcloud
|
90971e17bfd7b6bb97b2489986472c531cc8e140
|
libcloud/loadbalancer/drivers/nttcis.py
|
python
|
NttCisLBDriver.list_protocols
|
(self)
|
return ["http", "https", "tcp", "udp", "ftp", "smtp"]
|
Return a list of supported protocols.
Since all protocols are support by NTTC-CIS, this is a list
of common protocols.
:rtype: ``list`` of ``str``
|
Return a list of supported protocols.
|
[
"Return",
"a",
"list",
"of",
"supported",
"protocols",
"."
] |
def list_protocols(self):
"""
Return a list of supported protocols.
Since all protocols are support by NTTC-CIS, this is a list
of common protocols.
:rtype: ``list`` of ``str``
"""
return ["http", "https", "tcp", "udp", "ftp", "smtp"]
|
[
"def",
"list_protocols",
"(",
"self",
")",
":",
"return",
"[",
"\"http\"",
",",
"\"https\"",
",",
"\"tcp\"",
",",
"\"udp\"",
",",
"\"ftp\"",
",",
"\"smtp\"",
"]"
] |
https://github.com/apache/libcloud/blob/90971e17bfd7b6bb97b2489986472c531cc8e140/libcloud/loadbalancer/drivers/nttcis.py#L286-L295
|
|
rootpy/rootpy
|
3926935e1f2100d8ba68070c2ab44055d4800f73
|
rootpy/stats/histfactory/utils.py
|
python
|
split_norm_shape
|
(histosys, nominal_hist)
|
return norm, shape
|
Split a HistoSys into normalization (OverallSys) and shape (HistoSys)
components.
It is recommended to use OverallSys as much as possible, which tries to
enforce continuity up to the second derivative during
interpolation/extrapolation. So, if there is indeed a shape variation, then
factorize it into shape and normalization components.
|
Split a HistoSys into normalization (OverallSys) and shape (HistoSys)
components.
|
[
"Split",
"a",
"HistoSys",
"into",
"normalization",
"(",
"OverallSys",
")",
"and",
"shape",
"(",
"HistoSys",
")",
"components",
"."
] |
def split_norm_shape(histosys, nominal_hist):
"""
Split a HistoSys into normalization (OverallSys) and shape (HistoSys)
components.
It is recommended to use OverallSys as much as possible, which tries to
enforce continuity up to the second derivative during
interpolation/extrapolation. So, if there is indeed a shape variation, then
factorize it into shape and normalization components.
"""
up = histosys.GetHistoHigh()
dn = histosys.GetHistoLow()
up = up.Clone(name=up.name + '_shape')
dn = dn.Clone(name=dn.name + '_shape')
n_nominal = nominal_hist.integral(overflow=True)
n_up = up.integral(overflow=True)
n_dn = dn.integral(overflow=True)
if n_up != 0:
up.Scale(n_nominal / n_up)
if n_dn != 0:
dn.Scale(n_nominal / n_dn)
shape = HistoSys(histosys.GetName(), low=dn, high=up)
norm = OverallSys(histosys.GetName(),
low=n_dn / n_nominal if n_nominal != 0 else 1.,
high=n_up / n_nominal if n_nominal != 0 else 1.)
return norm, shape
|
[
"def",
"split_norm_shape",
"(",
"histosys",
",",
"nominal_hist",
")",
":",
"up",
"=",
"histosys",
".",
"GetHistoHigh",
"(",
")",
"dn",
"=",
"histosys",
".",
"GetHistoLow",
"(",
")",
"up",
"=",
"up",
".",
"Clone",
"(",
"name",
"=",
"up",
".",
"name",
"+",
"'_shape'",
")",
"dn",
"=",
"dn",
".",
"Clone",
"(",
"name",
"=",
"dn",
".",
"name",
"+",
"'_shape'",
")",
"n_nominal",
"=",
"nominal_hist",
".",
"integral",
"(",
"overflow",
"=",
"True",
")",
"n_up",
"=",
"up",
".",
"integral",
"(",
"overflow",
"=",
"True",
")",
"n_dn",
"=",
"dn",
".",
"integral",
"(",
"overflow",
"=",
"True",
")",
"if",
"n_up",
"!=",
"0",
":",
"up",
".",
"Scale",
"(",
"n_nominal",
"/",
"n_up",
")",
"if",
"n_dn",
"!=",
"0",
":",
"dn",
".",
"Scale",
"(",
"n_nominal",
"/",
"n_dn",
")",
"shape",
"=",
"HistoSys",
"(",
"histosys",
".",
"GetName",
"(",
")",
",",
"low",
"=",
"dn",
",",
"high",
"=",
"up",
")",
"norm",
"=",
"OverallSys",
"(",
"histosys",
".",
"GetName",
"(",
")",
",",
"low",
"=",
"n_dn",
"/",
"n_nominal",
"if",
"n_nominal",
"!=",
"0",
"else",
"1.",
",",
"high",
"=",
"n_up",
"/",
"n_nominal",
"if",
"n_nominal",
"!=",
"0",
"else",
"1.",
")",
"return",
"norm",
",",
"shape"
] |
https://github.com/rootpy/rootpy/blob/3926935e1f2100d8ba68070c2ab44055d4800f73/rootpy/stats/histfactory/utils.py#L357-L382
|
|
nipy/nipy
|
d16d268938dcd5c15748ca051532c21f57cf8a22
|
nipy/core/reference/coordinate_map.py
|
python
|
AffineTransform.renamed_range
|
(self, newnames, name='')
|
return renamed_range(self, newnames)
|
New AffineTransform with renamed function_domain
Parameters
----------
newnames : dict
A dictionary whose keys are integers or are in
mapping.function_range.coord_names and whose values are the
new names.
Returns
-------
newmapping : AffineTransform
A new AffineTransform with renamed function_range.
Examples
--------
>>> affine_domain = CoordinateSystem('ijk')
>>> affine_range = CoordinateSystem('xyz')
>>> affine_matrix = np.identity(4)
>>> affine_mapping = AffineTransform(affine_domain, affine_range, affine_matrix)
>>> new_affine_mapping = affine_mapping.renamed_range({'x':'u'})
>>> new_affine_mapping.function_range
CoordinateSystem(coord_names=('u', 'y', 'z'), name='', coord_dtype=float64)
>>> new_affine_mapping = affine_mapping.renamed_range({'w':'u'})
Traceback (most recent call last):
...
ValueError: no range coordinate named w
|
New AffineTransform with renamed function_domain
|
[
"New",
"AffineTransform",
"with",
"renamed",
"function_domain"
] |
def renamed_range(self, newnames, name=''):
""" New AffineTransform with renamed function_domain
Parameters
----------
newnames : dict
A dictionary whose keys are integers or are in
mapping.function_range.coord_names and whose values are the
new names.
Returns
-------
newmapping : AffineTransform
A new AffineTransform with renamed function_range.
Examples
--------
>>> affine_domain = CoordinateSystem('ijk')
>>> affine_range = CoordinateSystem('xyz')
>>> affine_matrix = np.identity(4)
>>> affine_mapping = AffineTransform(affine_domain, affine_range, affine_matrix)
>>> new_affine_mapping = affine_mapping.renamed_range({'x':'u'})
>>> new_affine_mapping.function_range
CoordinateSystem(coord_names=('u', 'y', 'z'), name='', coord_dtype=float64)
>>> new_affine_mapping = affine_mapping.renamed_range({'w':'u'})
Traceback (most recent call last):
...
ValueError: no range coordinate named w
"""
return renamed_range(self, newnames)
|
[
"def",
"renamed_range",
"(",
"self",
",",
"newnames",
",",
"name",
"=",
"''",
")",
":",
"return",
"renamed_range",
"(",
"self",
",",
"newnames",
")"
] |
https://github.com/nipy/nipy/blob/d16d268938dcd5c15748ca051532c21f57cf8a22/nipy/core/reference/coordinate_map.py#L897-L928
|
|
IdentityPython/pysaml2
|
6badb32d212257bd83ffcc816f9b625f68281b47
|
src/saml2/xmldsig/__init__.py
|
python
|
key_value_from_string
|
(xml_string)
|
return saml2.create_class_from_xml_string(KeyValue, xml_string)
|
[] |
def key_value_from_string(xml_string):
return saml2.create_class_from_xml_string(KeyValue, xml_string)
|
[
"def",
"key_value_from_string",
"(",
"xml_string",
")",
":",
"return",
"saml2",
".",
"create_class_from_xml_string",
"(",
"KeyValue",
",",
"xml_string",
")"
] |
https://github.com/IdentityPython/pysaml2/blob/6badb32d212257bd83ffcc816f9b625f68281b47/src/saml2/xmldsig/__init__.py#L1270-L1271
|
|||
sydney0zq/PTSNet
|
1a9be3eb12216be354a77294cde75f330d278796
|
coupled_otn_opn/tracking/maskrcnn/lib/model/utils/net_utils.py
|
python
|
_crop_pool_layer
|
(bottom, rois, max_pool=True)
|
return crops, grid
|
[ x2-x1 x1 + x2 - W + 1 ]
[ ----- 0 --------------- ]
[ W - 1 W - 1 ]
[ ]
[ y2-y1 y1 + y2 - H + 1 ]
[ 0 ----- --------------- ]
[ H - 1 H - 1 ]
|
[ x2-x1 x1 + x2 - W + 1 ]
[ ----- 0 --------------- ]
[ W - 1 W - 1 ]
[ ]
[ y2-y1 y1 + y2 - H + 1 ]
[ 0 ----- --------------- ]
[ H - 1 H - 1 ]
|
[
"[",
"x2",
"-",
"x1",
"x1",
"+",
"x2",
"-",
"W",
"+",
"1",
"]",
"[",
"-----",
"0",
"---------------",
"]",
"[",
"W",
"-",
"1",
"W",
"-",
"1",
"]",
"[",
"]",
"[",
"y2",
"-",
"y1",
"y1",
"+",
"y2",
"-",
"H",
"+",
"1",
"]",
"[",
"0",
"-----",
"---------------",
"]",
"[",
"H",
"-",
"1",
"H",
"-",
"1",
"]"
] |
def _crop_pool_layer(bottom, rois, max_pool=True):
# code modified from
# https://github.com/ruotianluo/pytorch-faster-rcnn
# implement it using stn
# box to affine
# input (x1,y1,x2,y2)
"""
[ x2-x1 x1 + x2 - W + 1 ]
[ ----- 0 --------------- ]
[ W - 1 W - 1 ]
[ ]
[ y2-y1 y1 + y2 - H + 1 ]
[ 0 ----- --------------- ]
[ H - 1 H - 1 ]
"""
rois = rois.detach()
batch_size = bottom.size(0)
D = bottom.size(1)
H = bottom.size(2)
W = bottom.size(3)
roi_per_batch = rois.size(0) / batch_size
x1 = rois[:, 1::4] / 16.0
y1 = rois[:, 2::4] / 16.0
x2 = rois[:, 3::4] / 16.0
y2 = rois[:, 4::4] / 16.0
height = bottom.size(2)
width = bottom.size(3)
# affine theta
zero = Variable(rois.data.new(rois.size(0), 1).zero_())
theta = torch.cat([\
(x2 - x1) / (width - 1),
zero,
(x1 + x2 - width + 1) / (width - 1),
zero,
(y2 - y1) / (height - 1),
(y1 + y2 - height + 1) / (height - 1)], 1).view(-1, 2, 3)
if max_pool:
pre_pool_size = cfg.POOLING_SIZE * 2
grid = F.affine_grid(theta, torch.Size((rois.size(0), 1, pre_pool_size, pre_pool_size)))
bottom = bottom.view(1, batch_size, D, H, W).contiguous().expand(roi_per_batch, batch_size, D, H, W)\
.contiguous().view(-1, D, H, W)
crops = F.grid_sample(bottom, grid)
crops = F.max_pool2d(crops, 2, 2)
else:
grid = F.affine_grid(theta, torch.Size((rois.size(0), 1, cfg.POOLING_SIZE, cfg.POOLING_SIZE)))
bottom = bottom.view(1, batch_size, D, H, W).contiguous().expand(roi_per_batch, batch_size, D, H, W)\
.contiguous().view(-1, D, H, W)
crops = F.grid_sample(bottom, grid)
return crops, grid
|
[
"def",
"_crop_pool_layer",
"(",
"bottom",
",",
"rois",
",",
"max_pool",
"=",
"True",
")",
":",
"# code modified from ",
"# https://github.com/ruotianluo/pytorch-faster-rcnn",
"# implement it using stn",
"# box to affine",
"# input (x1,y1,x2,y2)",
"rois",
"=",
"rois",
".",
"detach",
"(",
")",
"batch_size",
"=",
"bottom",
".",
"size",
"(",
"0",
")",
"D",
"=",
"bottom",
".",
"size",
"(",
"1",
")",
"H",
"=",
"bottom",
".",
"size",
"(",
"2",
")",
"W",
"=",
"bottom",
".",
"size",
"(",
"3",
")",
"roi_per_batch",
"=",
"rois",
".",
"size",
"(",
"0",
")",
"/",
"batch_size",
"x1",
"=",
"rois",
"[",
":",
",",
"1",
":",
":",
"4",
"]",
"/",
"16.0",
"y1",
"=",
"rois",
"[",
":",
",",
"2",
":",
":",
"4",
"]",
"/",
"16.0",
"x2",
"=",
"rois",
"[",
":",
",",
"3",
":",
":",
"4",
"]",
"/",
"16.0",
"y2",
"=",
"rois",
"[",
":",
",",
"4",
":",
":",
"4",
"]",
"/",
"16.0",
"height",
"=",
"bottom",
".",
"size",
"(",
"2",
")",
"width",
"=",
"bottom",
".",
"size",
"(",
"3",
")",
"# affine theta",
"zero",
"=",
"Variable",
"(",
"rois",
".",
"data",
".",
"new",
"(",
"rois",
".",
"size",
"(",
"0",
")",
",",
"1",
")",
".",
"zero_",
"(",
")",
")",
"theta",
"=",
"torch",
".",
"cat",
"(",
"[",
"(",
"x2",
"-",
"x1",
")",
"/",
"(",
"width",
"-",
"1",
")",
",",
"zero",
",",
"(",
"x1",
"+",
"x2",
"-",
"width",
"+",
"1",
")",
"/",
"(",
"width",
"-",
"1",
")",
",",
"zero",
",",
"(",
"y2",
"-",
"y1",
")",
"/",
"(",
"height",
"-",
"1",
")",
",",
"(",
"y1",
"+",
"y2",
"-",
"height",
"+",
"1",
")",
"/",
"(",
"height",
"-",
"1",
")",
"]",
",",
"1",
")",
".",
"view",
"(",
"-",
"1",
",",
"2",
",",
"3",
")",
"if",
"max_pool",
":",
"pre_pool_size",
"=",
"cfg",
".",
"POOLING_SIZE",
"*",
"2",
"grid",
"=",
"F",
".",
"affine_grid",
"(",
"theta",
",",
"torch",
".",
"Size",
"(",
"(",
"rois",
".",
"size",
"(",
"0",
")",
",",
"1",
",",
"pre_pool_size",
",",
"pre_pool_size",
")",
")",
")",
"bottom",
"=",
"bottom",
".",
"view",
"(",
"1",
",",
"batch_size",
",",
"D",
",",
"H",
",",
"W",
")",
".",
"contiguous",
"(",
")",
".",
"expand",
"(",
"roi_per_batch",
",",
"batch_size",
",",
"D",
",",
"H",
",",
"W",
")",
".",
"contiguous",
"(",
")",
".",
"view",
"(",
"-",
"1",
",",
"D",
",",
"H",
",",
"W",
")",
"crops",
"=",
"F",
".",
"grid_sample",
"(",
"bottom",
",",
"grid",
")",
"crops",
"=",
"F",
".",
"max_pool2d",
"(",
"crops",
",",
"2",
",",
"2",
")",
"else",
":",
"grid",
"=",
"F",
".",
"affine_grid",
"(",
"theta",
",",
"torch",
".",
"Size",
"(",
"(",
"rois",
".",
"size",
"(",
"0",
")",
",",
"1",
",",
"cfg",
".",
"POOLING_SIZE",
",",
"cfg",
".",
"POOLING_SIZE",
")",
")",
")",
"bottom",
"=",
"bottom",
".",
"view",
"(",
"1",
",",
"batch_size",
",",
"D",
",",
"H",
",",
"W",
")",
".",
"contiguous",
"(",
")",
".",
"expand",
"(",
"roi_per_batch",
",",
"batch_size",
",",
"D",
",",
"H",
",",
"W",
")",
".",
"contiguous",
"(",
")",
".",
"view",
"(",
"-",
"1",
",",
"D",
",",
"H",
",",
"W",
")",
"crops",
"=",
"F",
".",
"grid_sample",
"(",
"bottom",
",",
"grid",
")",
"return",
"crops",
",",
"grid"
] |
https://github.com/sydney0zq/PTSNet/blob/1a9be3eb12216be354a77294cde75f330d278796/coupled_otn_opn/tracking/maskrcnn/lib/model/utils/net_utils.py#L38-L90
|
|
art-programmer/PlaneNet
|
ccc4423d278388d01cb3300be992b951b90acc7a
|
code/html.py
|
python
|
TestCase.test_iadd_tag
|
(self)
|
test iadd'ing a tag
|
test iadd'ing a tag
|
[
"test",
"iadd",
"ing",
"a",
"tag"
] |
def test_iadd_tag(self):
"test iadd'ing a tag"
h = XML('xml')
h += XML('some-tag', 'spam', newlines=False)
h += XML('text', 'spam', newlines=False)
self.assertEquals(str(h),
'<xml>\n<some-tag>spam</some-tag>\n<text>spam</text>\n</xml>')
|
[
"def",
"test_iadd_tag",
"(",
"self",
")",
":",
"h",
"=",
"XML",
"(",
"'xml'",
")",
"h",
"+=",
"XML",
"(",
"'some-tag'",
",",
"'spam'",
",",
"newlines",
"=",
"False",
")",
"h",
"+=",
"XML",
"(",
"'text'",
",",
"'spam'",
",",
"newlines",
"=",
"False",
")",
"self",
".",
"assertEquals",
"(",
"str",
"(",
"h",
")",
",",
"'<xml>\\n<some-tag>spam</some-tag>\\n<text>spam</text>\\n</xml>'",
")"
] |
https://github.com/art-programmer/PlaneNet/blob/ccc4423d278388d01cb3300be992b951b90acc7a/code/html.py#L435-L441
|
||
Tautulli/Tautulli
|
2410eb33805aaac4bd1c5dad0f71e4f15afaf742
|
lib/cheroot/makefile.py
|
python
|
BufferedWriter.write
|
(self, b)
|
Write bytes to buffer.
|
Write bytes to buffer.
|
[
"Write",
"bytes",
"to",
"buffer",
"."
] |
def write(self, b):
"""Write bytes to buffer."""
self._checkClosed()
if isinstance(b, str):
raise TypeError("can't write str to binary stream")
with self._write_lock:
self._write_buf.extend(b)
self._flush_unlocked()
return len(b)
|
[
"def",
"write",
"(",
"self",
",",
"b",
")",
":",
"self",
".",
"_checkClosed",
"(",
")",
"if",
"isinstance",
"(",
"b",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"\"can't write str to binary stream\"",
")",
"with",
"self",
".",
"_write_lock",
":",
"self",
".",
"_write_buf",
".",
"extend",
"(",
"b",
")",
"self",
".",
"_flush_unlocked",
"(",
")",
"return",
"len",
"(",
"b",
")"
] |
https://github.com/Tautulli/Tautulli/blob/2410eb33805aaac4bd1c5dad0f71e4f15afaf742/lib/cheroot/makefile.py#L28-L37
|
||
iclavera/learning_to_adapt
|
bd7d99ba402521c96631e7d09714128f549db0f1
|
learning_to_adapt/envs/mujoco_env.py
|
python
|
MujocoEnv.start_viewer
|
(self)
|
[] |
def start_viewer(self):
viewer = self.get_viewer()
if not viewer.running:
viewer.start()
|
[
"def",
"start_viewer",
"(",
"self",
")",
":",
"viewer",
"=",
"self",
".",
"get_viewer",
"(",
")",
"if",
"not",
"viewer",
".",
"running",
":",
"viewer",
".",
"start",
"(",
")"
] |
https://github.com/iclavera/learning_to_adapt/blob/bd7d99ba402521c96631e7d09714128f549db0f1/learning_to_adapt/envs/mujoco_env.py#L193-L196
|
||||
aio-libs/aioredis-py
|
56d6b325ee246a3eb0fc8bb6803247c86bb2f494
|
aioredis/client.py
|
python
|
PubSub.execute_command
|
(self, *args: EncodableT)
|
Execute a publish/subscribe command
|
Execute a publish/subscribe command
|
[
"Execute",
"a",
"publish",
"/",
"subscribe",
"command"
] |
async def execute_command(self, *args: EncodableT):
"""Execute a publish/subscribe command"""
# NOTE: don't parse the response in this function -- it could pull a
# legitimate message off the stack if the connection is already
# subscribed to one or more channels
if self.connection is None:
self.connection = await self.connection_pool.get_connection(
"pubsub", self.shard_hint
)
# register a callback that re-subscribes to any channels we
# were listening to when we were disconnected
self.connection.register_connect_callback(self.on_connect)
connection = self.connection
kwargs = {"check_health": not self.subscribed}
await self._execute(connection, connection.send_command, *args, **kwargs)
|
[
"async",
"def",
"execute_command",
"(",
"self",
",",
"*",
"args",
":",
"EncodableT",
")",
":",
"# NOTE: don't parse the response in this function -- it could pull a",
"# legitimate message off the stack if the connection is already",
"# subscribed to one or more channels",
"if",
"self",
".",
"connection",
"is",
"None",
":",
"self",
".",
"connection",
"=",
"await",
"self",
".",
"connection_pool",
".",
"get_connection",
"(",
"\"pubsub\"",
",",
"self",
".",
"shard_hint",
")",
"# register a callback that re-subscribes to any channels we",
"# were listening to when we were disconnected",
"self",
".",
"connection",
".",
"register_connect_callback",
"(",
"self",
".",
"on_connect",
")",
"connection",
"=",
"self",
".",
"connection",
"kwargs",
"=",
"{",
"\"check_health\"",
":",
"not",
"self",
".",
"subscribed",
"}",
"await",
"self",
".",
"_execute",
"(",
"connection",
",",
"connection",
".",
"send_command",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/aio-libs/aioredis-py/blob/56d6b325ee246a3eb0fc8bb6803247c86bb2f494/aioredis/client.py#L4007-L4023
|
||
facebookresearch/ParlAI
|
e4d59c30eef44f1f67105961b82a83fd28d7d78b
|
parlai/tasks/multiwoz_v22/agents.py
|
python
|
MultiwozV22Parser._get_find_api_response
|
(self, intent, raw_slots, sys_dialog_act)
|
return results
|
Get an API response out of the lookup databases.
|
Get an API response out of the lookup databases.
|
[
"Get",
"an",
"API",
"response",
"out",
"of",
"the",
"lookup",
"databases",
"."
] |
def _get_find_api_response(self, intent, raw_slots, sys_dialog_act):
"""
Get an API response out of the lookup databases.
"""
domain = ""
for cand in DOMAINS:
if cand in intent:
domain = cand
if domain == "taxi": # handle separately cause funky
for action in sys_dialog_act:
if action == "Taxi-Inform":
return {x[0]: x[1] for x in sys_dialog_act[action]}
return {domain: domain} # too much work to do this right...
if domain == "hospital": # handle separately cause funky
res = self.hospital_address
if "hospital-department" in raw_slots:
for blob in self.hospital_department_details:
if blob["department"] in raw_slots["hospital-department"]:
res[blob["department"]] = blob
return res
slots = {}
for raw_key in raw_slots:
key = raw_key[len(domain + "-") :]
slots[key] = raw_slots[raw_key]
for action in sys_dialog_act:
if "Recommend" in action:
add_slots = {}
for x in sys_dialog_act[action]:
name = x[0]
val = x[1]
if self._slot_in_schema(name, intent):
if name not in add_slots:
add_slots[name] = []
add_slots[name].append(val)
for key in add_slots:
slots[key] = add_slots[key]
find = self.dbs[domain]
for slot, values in slots.items():
if slot == "arriveby":
condition = find[slot] < values[0]
elif slot == "leaveat":
condition = find[slot] > values[0]
else:
condition = find[slot].isin(values)
find = find[condition]
filtered = self.dbs[domain].iloc[find.index]
count = len(filtered.index)
if count == 0:
return {}
blob = filtered.head(1).to_dict('records')
results = {}
results["COUNT"] = count
results["OPTIONS"] = json.dumps(blob)
return results
|
[
"def",
"_get_find_api_response",
"(",
"self",
",",
"intent",
",",
"raw_slots",
",",
"sys_dialog_act",
")",
":",
"domain",
"=",
"\"\"",
"for",
"cand",
"in",
"DOMAINS",
":",
"if",
"cand",
"in",
"intent",
":",
"domain",
"=",
"cand",
"if",
"domain",
"==",
"\"taxi\"",
":",
"# handle separately cause funky",
"for",
"action",
"in",
"sys_dialog_act",
":",
"if",
"action",
"==",
"\"Taxi-Inform\"",
":",
"return",
"{",
"x",
"[",
"0",
"]",
":",
"x",
"[",
"1",
"]",
"for",
"x",
"in",
"sys_dialog_act",
"[",
"action",
"]",
"}",
"return",
"{",
"domain",
":",
"domain",
"}",
"# too much work to do this right...",
"if",
"domain",
"==",
"\"hospital\"",
":",
"# handle separately cause funky",
"res",
"=",
"self",
".",
"hospital_address",
"if",
"\"hospital-department\"",
"in",
"raw_slots",
":",
"for",
"blob",
"in",
"self",
".",
"hospital_department_details",
":",
"if",
"blob",
"[",
"\"department\"",
"]",
"in",
"raw_slots",
"[",
"\"hospital-department\"",
"]",
":",
"res",
"[",
"blob",
"[",
"\"department\"",
"]",
"]",
"=",
"blob",
"return",
"res",
"slots",
"=",
"{",
"}",
"for",
"raw_key",
"in",
"raw_slots",
":",
"key",
"=",
"raw_key",
"[",
"len",
"(",
"domain",
"+",
"\"-\"",
")",
":",
"]",
"slots",
"[",
"key",
"]",
"=",
"raw_slots",
"[",
"raw_key",
"]",
"for",
"action",
"in",
"sys_dialog_act",
":",
"if",
"\"Recommend\"",
"in",
"action",
":",
"add_slots",
"=",
"{",
"}",
"for",
"x",
"in",
"sys_dialog_act",
"[",
"action",
"]",
":",
"name",
"=",
"x",
"[",
"0",
"]",
"val",
"=",
"x",
"[",
"1",
"]",
"if",
"self",
".",
"_slot_in_schema",
"(",
"name",
",",
"intent",
")",
":",
"if",
"name",
"not",
"in",
"add_slots",
":",
"add_slots",
"[",
"name",
"]",
"=",
"[",
"]",
"add_slots",
"[",
"name",
"]",
".",
"append",
"(",
"val",
")",
"for",
"key",
"in",
"add_slots",
":",
"slots",
"[",
"key",
"]",
"=",
"add_slots",
"[",
"key",
"]",
"find",
"=",
"self",
".",
"dbs",
"[",
"domain",
"]",
"for",
"slot",
",",
"values",
"in",
"slots",
".",
"items",
"(",
")",
":",
"if",
"slot",
"==",
"\"arriveby\"",
":",
"condition",
"=",
"find",
"[",
"slot",
"]",
"<",
"values",
"[",
"0",
"]",
"elif",
"slot",
"==",
"\"leaveat\"",
":",
"condition",
"=",
"find",
"[",
"slot",
"]",
">",
"values",
"[",
"0",
"]",
"else",
":",
"condition",
"=",
"find",
"[",
"slot",
"]",
".",
"isin",
"(",
"values",
")",
"find",
"=",
"find",
"[",
"condition",
"]",
"filtered",
"=",
"self",
".",
"dbs",
"[",
"domain",
"]",
".",
"iloc",
"[",
"find",
".",
"index",
"]",
"count",
"=",
"len",
"(",
"filtered",
".",
"index",
")",
"if",
"count",
"==",
"0",
":",
"return",
"{",
"}",
"blob",
"=",
"filtered",
".",
"head",
"(",
"1",
")",
".",
"to_dict",
"(",
"'records'",
")",
"results",
"=",
"{",
"}",
"results",
"[",
"\"COUNT\"",
"]",
"=",
"count",
"results",
"[",
"\"OPTIONS\"",
"]",
"=",
"json",
".",
"dumps",
"(",
"blob",
")",
"return",
"results"
] |
https://github.com/facebookresearch/ParlAI/blob/e4d59c30eef44f1f67105961b82a83fd28d7d78b/parlai/tasks/multiwoz_v22/agents.py#L159-L216
|
|
geometalab/Vector-Tiles-Reader-QGIS-Plugin
|
a31ae86959c8f3b7d6f332f84191cd7ca4683e1d
|
ext-libs/shapely/geometry/polygon.py
|
python
|
geos_polygon_from_py
|
(shell, holes=None)
|
[] |
def geos_polygon_from_py(shell, holes=None):
if shell is None:
return None
if isinstance(shell, Polygon):
return geos_geom_from_py(shell)
if shell is not None:
ret = geos_linearring_from_py(shell)
if ret is None:
return None
geos_shell, ndim = ret
if holes is not None and len(holes) > 0:
ob = holes
L = len(ob)
exemplar = ob[0]
try:
N = len(exemplar[0])
except TypeError:
N = exemplar._ndim
if not L >= 1:
raise ValueError("number of holes must be non zero")
if not N in (2, 3):
raise ValueError("insufficiant coordinate dimension")
# Array of pointers to ring geometries
geos_holes = (c_void_p * L)()
# add to coordinate sequence
for l in range(L):
geom, ndim = geos_linearring_from_py(ob[l])
geos_holes[l] = cast(geom, c_void_p)
else:
geos_holes = POINTER(c_void_p)()
L = 0
return (
lgeos.GEOSGeom_createPolygon(
c_void_p(geos_shell), geos_holes, L), ndim)
|
[
"def",
"geos_polygon_from_py",
"(",
"shell",
",",
"holes",
"=",
"None",
")",
":",
"if",
"shell",
"is",
"None",
":",
"return",
"None",
"if",
"isinstance",
"(",
"shell",
",",
"Polygon",
")",
":",
"return",
"geos_geom_from_py",
"(",
"shell",
")",
"if",
"shell",
"is",
"not",
"None",
":",
"ret",
"=",
"geos_linearring_from_py",
"(",
"shell",
")",
"if",
"ret",
"is",
"None",
":",
"return",
"None",
"geos_shell",
",",
"ndim",
"=",
"ret",
"if",
"holes",
"is",
"not",
"None",
"and",
"len",
"(",
"holes",
")",
">",
"0",
":",
"ob",
"=",
"holes",
"L",
"=",
"len",
"(",
"ob",
")",
"exemplar",
"=",
"ob",
"[",
"0",
"]",
"try",
":",
"N",
"=",
"len",
"(",
"exemplar",
"[",
"0",
"]",
")",
"except",
"TypeError",
":",
"N",
"=",
"exemplar",
".",
"_ndim",
"if",
"not",
"L",
">=",
"1",
":",
"raise",
"ValueError",
"(",
"\"number of holes must be non zero\"",
")",
"if",
"not",
"N",
"in",
"(",
"2",
",",
"3",
")",
":",
"raise",
"ValueError",
"(",
"\"insufficiant coordinate dimension\"",
")",
"# Array of pointers to ring geometries",
"geos_holes",
"=",
"(",
"c_void_p",
"*",
"L",
")",
"(",
")",
"# add to coordinate sequence",
"for",
"l",
"in",
"range",
"(",
"L",
")",
":",
"geom",
",",
"ndim",
"=",
"geos_linearring_from_py",
"(",
"ob",
"[",
"l",
"]",
")",
"geos_holes",
"[",
"l",
"]",
"=",
"cast",
"(",
"geom",
",",
"c_void_p",
")",
"else",
":",
"geos_holes",
"=",
"POINTER",
"(",
"c_void_p",
")",
"(",
")",
"L",
"=",
"0",
"return",
"(",
"lgeos",
".",
"GEOSGeom_createPolygon",
"(",
"c_void_p",
"(",
"geos_shell",
")",
",",
"geos_holes",
",",
"L",
")",
",",
"ndim",
")"
] |
https://github.com/geometalab/Vector-Tiles-Reader-QGIS-Plugin/blob/a31ae86959c8f3b7d6f332f84191cd7ca4683e1d/ext-libs/shapely/geometry/polygon.py#L485-L525
|
||||
caiiiac/Machine-Learning-with-Python
|
1a26c4467da41ca4ebc3d5bd789ea942ef79422f
|
MachineLearning/venv/lib/python3.5/site-packages/scipy/io/matlab/miobase.py
|
python
|
MatFileReader.__init__
|
(self, mat_stream,
byte_order=None,
mat_dtype=False,
squeeze_me=False,
chars_as_strings=True,
matlab_compatible=False,
struct_as_record=True,
verify_compressed_data_integrity=True
)
|
Initializer for mat file reader
mat_stream : file-like
object with file API, open for reading
%(load_args)s
|
Initializer for mat file reader
|
[
"Initializer",
"for",
"mat",
"file",
"reader"
] |
def __init__(self, mat_stream,
byte_order=None,
mat_dtype=False,
squeeze_me=False,
chars_as_strings=True,
matlab_compatible=False,
struct_as_record=True,
verify_compressed_data_integrity=True
):
'''
Initializer for mat file reader
mat_stream : file-like
object with file API, open for reading
%(load_args)s
'''
# Initialize stream
self.mat_stream = mat_stream
self.dtypes = {}
if not byte_order:
byte_order = self.guess_byte_order()
else:
byte_order = boc.to_numpy_code(byte_order)
self.byte_order = byte_order
self.struct_as_record = struct_as_record
if matlab_compatible:
self.set_matlab_compatible()
else:
self.squeeze_me = squeeze_me
self.chars_as_strings = chars_as_strings
self.mat_dtype = mat_dtype
self.verify_compressed_data_integrity = verify_compressed_data_integrity
|
[
"def",
"__init__",
"(",
"self",
",",
"mat_stream",
",",
"byte_order",
"=",
"None",
",",
"mat_dtype",
"=",
"False",
",",
"squeeze_me",
"=",
"False",
",",
"chars_as_strings",
"=",
"True",
",",
"matlab_compatible",
"=",
"False",
",",
"struct_as_record",
"=",
"True",
",",
"verify_compressed_data_integrity",
"=",
"True",
")",
":",
"# Initialize stream",
"self",
".",
"mat_stream",
"=",
"mat_stream",
"self",
".",
"dtypes",
"=",
"{",
"}",
"if",
"not",
"byte_order",
":",
"byte_order",
"=",
"self",
".",
"guess_byte_order",
"(",
")",
"else",
":",
"byte_order",
"=",
"boc",
".",
"to_numpy_code",
"(",
"byte_order",
")",
"self",
".",
"byte_order",
"=",
"byte_order",
"self",
".",
"struct_as_record",
"=",
"struct_as_record",
"if",
"matlab_compatible",
":",
"self",
".",
"set_matlab_compatible",
"(",
")",
"else",
":",
"self",
".",
"squeeze_me",
"=",
"squeeze_me",
"self",
".",
"chars_as_strings",
"=",
"chars_as_strings",
"self",
".",
"mat_dtype",
"=",
"mat_dtype",
"self",
".",
"verify_compressed_data_integrity",
"=",
"verify_compressed_data_integrity"
] |
https://github.com/caiiiac/Machine-Learning-with-Python/blob/1a26c4467da41ca4ebc3d5bd789ea942ef79422f/MachineLearning/venv/lib/python3.5/site-packages/scipy/io/matlab/miobase.py#L346-L377
|
||
google/clusterfuzz
|
f358af24f414daa17a3649b143e71ea71871ef59
|
src/clusterfuzz/_internal/bot/tokenizer/antlr_tokenizer.py
|
python
|
AntlrTokenizer.fill
|
(self, stream)
|
return i
|
Helper function. antlr4.CommonTokenStream.fill should work, but
it does not fetch all of the tokens. This is a replacement that works.
|
Helper function. antlr4.CommonTokenStream.fill should work, but
it does not fetch all of the tokens. This is a replacement that works.
|
[
"Helper",
"function",
".",
"antlr4",
".",
"CommonTokenStream",
".",
"fill",
"should",
"work",
"but",
"it",
"does",
"not",
"fetch",
"all",
"of",
"the",
"tokens",
".",
"This",
"is",
"a",
"replacement",
"that",
"works",
"."
] |
def fill(self, stream):
"""Helper function. antlr4.CommonTokenStream.fill should work, but
it does not fetch all of the tokens. This is a replacement that works."""
i = 0
while stream.fetch(1):
i += 1
return i
|
[
"def",
"fill",
"(",
"self",
",",
"stream",
")",
":",
"i",
"=",
"0",
"while",
"stream",
".",
"fetch",
"(",
"1",
")",
":",
"i",
"+=",
"1",
"return",
"i"
] |
https://github.com/google/clusterfuzz/blob/f358af24f414daa17a3649b143e71ea71871ef59/src/clusterfuzz/_internal/bot/tokenizer/antlr_tokenizer.py#L30-L36
|
|
SpockBotMC/SpockBot
|
f89911551f18357720034fbaa52837a0d09f66ea
|
spockbot/mcp/mcpacket.py
|
python
|
Packet.new_ident
|
(self, ident)
|
[] |
def new_ident(self, ident):
self.__init__(ident, self.data)
|
[
"def",
"new_ident",
"(",
"self",
",",
"ident",
")",
":",
"self",
".",
"__init__",
"(",
"ident",
",",
"self",
".",
"data",
")"
] |
https://github.com/SpockBotMC/SpockBot/blob/f89911551f18357720034fbaa52837a0d09f66ea/spockbot/mcp/mcpacket.py#L45-L46
|
||||
tp4a/teleport
|
1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad
|
server/www/packages/packages-linux/x64/tornado/template.py
|
python
|
_IncludeBlock.find_named_blocks
|
(
self, loader: Optional[BaseLoader], named_blocks: Dict[str, _NamedBlock]
)
|
[] |
def find_named_blocks(
self, loader: Optional[BaseLoader], named_blocks: Dict[str, _NamedBlock]
) -> None:
assert loader is not None
included = loader.load(self.name, self.template_name)
included.file.find_named_blocks(loader, named_blocks)
|
[
"def",
"find_named_blocks",
"(",
"self",
",",
"loader",
":",
"Optional",
"[",
"BaseLoader",
"]",
",",
"named_blocks",
":",
"Dict",
"[",
"str",
",",
"_NamedBlock",
"]",
")",
"->",
"None",
":",
"assert",
"loader",
"is",
"not",
"None",
"included",
"=",
"loader",
".",
"load",
"(",
"self",
".",
"name",
",",
"self",
".",
"template_name",
")",
"included",
".",
"file",
".",
"find_named_blocks",
"(",
"loader",
",",
"named_blocks",
")"
] |
https://github.com/tp4a/teleport/blob/1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad/server/www/packages/packages-linux/x64/tornado/template.py#L580-L585
|
||||
Jenyay/outwiker
|
50530cf7b3f71480bb075b2829bc0669773b835b
|
src/outwiker/core/spellchecker/spelldict.py
|
python
|
create_new_dic_file
|
(dic_file: str)
|
Create .dic file if it is not exists
|
Create .dic file if it is not exists
|
[
"Create",
".",
"dic",
"file",
"if",
"it",
"is",
"not",
"exists"
] |
def create_new_dic_file(dic_file: str):
'''
Create .dic file if it is not exists
'''
if not os.path.exists(dic_file):
logger.debug('Create .dic file: {}'.format(dic_file))
with open(dic_file, 'w', encoding='utf8') as fp:
fp.write('1\ntest')
|
[
"def",
"create_new_dic_file",
"(",
"dic_file",
":",
"str",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"dic_file",
")",
":",
"logger",
".",
"debug",
"(",
"'Create .dic file: {}'",
".",
"format",
"(",
"dic_file",
")",
")",
"with",
"open",
"(",
"dic_file",
",",
"'w'",
",",
"encoding",
"=",
"'utf8'",
")",
"as",
"fp",
":",
"fp",
".",
"write",
"(",
"'1\\ntest'",
")"
] |
https://github.com/Jenyay/outwiker/blob/50530cf7b3f71480bb075b2829bc0669773b835b/src/outwiker/core/spellchecker/spelldict.py#L40-L47
|
||
pytorch/fairseq
|
1575f30dd0a9f7b3c499db0b4767aa4e9f79056c
|
fairseq/search.py
|
python
|
Sampling._sample_topp
|
(self, lprobs)
|
return trimed_probs, truncated_indices
|
Sample among the smallest set of elements whose cumulative probability mass exceeds p.
See `"The Curious Case of Neural Text Degeneration"
(Holtzman et al., 2019) <https://arxiv.org/abs/1904.09751>`_.
Args:
lprobs: (bsz x input_beam_size x vocab_size)
the model's log-probabilities over the vocabulary at the current step
Return: A tuple of (trimed_probs, truncated_indices) where:
trimed_probs: (bsz x input_beam_size x ?)
the model's probabilities over the elements selected to sample from. The
width of the third dimension is determined by top-P.
truncated_indices: (bsz x input_beam_size x ?)
the indices of the chosen elements.
|
Sample among the smallest set of elements whose cumulative probability mass exceeds p.
|
[
"Sample",
"among",
"the",
"smallest",
"set",
"of",
"elements",
"whose",
"cumulative",
"probability",
"mass",
"exceeds",
"p",
"."
] |
def _sample_topp(self, lprobs):
"""Sample among the smallest set of elements whose cumulative probability mass exceeds p.
See `"The Curious Case of Neural Text Degeneration"
(Holtzman et al., 2019) <https://arxiv.org/abs/1904.09751>`_.
Args:
lprobs: (bsz x input_beam_size x vocab_size)
the model's log-probabilities over the vocabulary at the current step
Return: A tuple of (trimed_probs, truncated_indices) where:
trimed_probs: (bsz x input_beam_size x ?)
the model's probabilities over the elements selected to sample from. The
width of the third dimension is determined by top-P.
truncated_indices: (bsz x input_beam_size x ?)
the indices of the chosen elements.
"""
probs = lprobs.exp_()
# sort the last dimension (vocab dimension) in descending order
sorted_probs, sorted_indices = probs.sort(descending=True)
# compute a mask to indicate the words to be included in the top-P set.
cumsum_probs = sorted_probs.cumsum(dim=2)
mask = cumsum_probs.lt(self.sampling_topp)
# note that mask was computed by 'lt'. One more word needs to be included
# so that the cumulative probability mass can exceed p.
cumsum_mask = mask.cumsum(dim=2)
last_included = cumsum_mask[:, :, -1:]
last_included.clamp_(0, mask.size()[2] - 1)
mask = mask.scatter_(2, last_included, 1)
# truncate unnecessary dims.
max_dim = last_included.max()
truncated_mask = mask[:, :, : max_dim + 1]
truncated_probs = sorted_probs[:, :, : max_dim + 1]
truncated_indices = sorted_indices[:, :, : max_dim + 1]
# trim the words that are not in top-P by setting their probabilities
# to 0, so that they would not be sampled later.
trim_mask = ~truncated_mask
trimed_probs = truncated_probs.masked_fill_(trim_mask, 0)
return trimed_probs, truncated_indices
|
[
"def",
"_sample_topp",
"(",
"self",
",",
"lprobs",
")",
":",
"probs",
"=",
"lprobs",
".",
"exp_",
"(",
")",
"# sort the last dimension (vocab dimension) in descending order",
"sorted_probs",
",",
"sorted_indices",
"=",
"probs",
".",
"sort",
"(",
"descending",
"=",
"True",
")",
"# compute a mask to indicate the words to be included in the top-P set.",
"cumsum_probs",
"=",
"sorted_probs",
".",
"cumsum",
"(",
"dim",
"=",
"2",
")",
"mask",
"=",
"cumsum_probs",
".",
"lt",
"(",
"self",
".",
"sampling_topp",
")",
"# note that mask was computed by 'lt'. One more word needs to be included",
"# so that the cumulative probability mass can exceed p.",
"cumsum_mask",
"=",
"mask",
".",
"cumsum",
"(",
"dim",
"=",
"2",
")",
"last_included",
"=",
"cumsum_mask",
"[",
":",
",",
":",
",",
"-",
"1",
":",
"]",
"last_included",
".",
"clamp_",
"(",
"0",
",",
"mask",
".",
"size",
"(",
")",
"[",
"2",
"]",
"-",
"1",
")",
"mask",
"=",
"mask",
".",
"scatter_",
"(",
"2",
",",
"last_included",
",",
"1",
")",
"# truncate unnecessary dims.",
"max_dim",
"=",
"last_included",
".",
"max",
"(",
")",
"truncated_mask",
"=",
"mask",
"[",
":",
",",
":",
",",
":",
"max_dim",
"+",
"1",
"]",
"truncated_probs",
"=",
"sorted_probs",
"[",
":",
",",
":",
",",
":",
"max_dim",
"+",
"1",
"]",
"truncated_indices",
"=",
"sorted_indices",
"[",
":",
",",
":",
",",
":",
"max_dim",
"+",
"1",
"]",
"# trim the words that are not in top-P by setting their probabilities",
"# to 0, so that they would not be sampled later.",
"trim_mask",
"=",
"~",
"truncated_mask",
"trimed_probs",
"=",
"truncated_probs",
".",
"masked_fill_",
"(",
"trim_mask",
",",
"0",
")",
"return",
"trimed_probs",
",",
"truncated_indices"
] |
https://github.com/pytorch/fairseq/blob/1575f30dd0a9f7b3c499db0b4767aa4e9f79056c/fairseq/search.py#L630-L673
|
|
sagemath/sage
|
f9b2db94f675ff16963ccdefba4f1a3393b3fe0d
|
src/sage/combinat/dyck_word.py
|
python
|
replace_symbols
|
(x)
|
r"""
A map sending ``open_symbol`` to ``'('`` and ``close_symbol`` to ``')'``,
and raising an error on any input other than ``open_symbol`` and
``close_symbol``. The values of the constants ``open_symbol``
and ``close_symbol`` are subject to change.
This is the inverse map of :func:`replace_parens`.
INPUT:
- ``x`` -- either ``open_symbol`` or ``close_symbol``.
OUTPUT:
- If ``x`` is ``open_symbol``, replace ``x`` with ``'('``.
- If ``x`` is ``close_symbol``, replace ``x`` with ``')'``.
- If ``x`` is neither ``open_symbol`` nor ``close_symbol``, a
``ValueError`` is raised.
.. SEEALSO:: :func:`replace_parens`
EXAMPLES::
sage: from sage.combinat.dyck_word import replace_symbols
sage: replace_symbols(1)
'('
sage: replace_symbols(0)
')'
sage: replace_symbols(3)
Traceback (most recent call last):
...
ValueError
|
r"""
A map sending ``open_symbol`` to ``'('`` and ``close_symbol`` to ``')'``,
and raising an error on any input other than ``open_symbol`` and
``close_symbol``. The values of the constants ``open_symbol``
and ``close_symbol`` are subject to change.
|
[
"r",
"A",
"map",
"sending",
"open_symbol",
"to",
"(",
"and",
"close_symbol",
"to",
")",
"and",
"raising",
"an",
"error",
"on",
"any",
"input",
"other",
"than",
"open_symbol",
"and",
"close_symbol",
".",
"The",
"values",
"of",
"the",
"constants",
"open_symbol",
"and",
"close_symbol",
"are",
"subject",
"to",
"change",
"."
] |
def replace_symbols(x):
r"""
A map sending ``open_symbol`` to ``'('`` and ``close_symbol`` to ``')'``,
and raising an error on any input other than ``open_symbol`` and
``close_symbol``. The values of the constants ``open_symbol``
and ``close_symbol`` are subject to change.
This is the inverse map of :func:`replace_parens`.
INPUT:
- ``x`` -- either ``open_symbol`` or ``close_symbol``.
OUTPUT:
- If ``x`` is ``open_symbol``, replace ``x`` with ``'('``.
- If ``x`` is ``close_symbol``, replace ``x`` with ``')'``.
- If ``x`` is neither ``open_symbol`` nor ``close_symbol``, a
``ValueError`` is raised.
.. SEEALSO:: :func:`replace_parens`
EXAMPLES::
sage: from sage.combinat.dyck_word import replace_symbols
sage: replace_symbols(1)
'('
sage: replace_symbols(0)
')'
sage: replace_symbols(3)
Traceback (most recent call last):
...
ValueError
"""
if x == open_symbol:
return '('
if x == close_symbol:
return ')'
raise ValueError
|
[
"def",
"replace_symbols",
"(",
"x",
")",
":",
"if",
"x",
"==",
"open_symbol",
":",
"return",
"'('",
"if",
"x",
"==",
"close_symbol",
":",
"return",
"')'",
"raise",
"ValueError"
] |
https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/combinat/dyck_word.py#L150-L190
|
||
pypa/setuptools
|
9f37366aab9cd8f6baa23e6a77cfdb8daf97757e
|
pkg_resources/__init__.py
|
python
|
NullProvider._get
|
(self, path)
|
[] |
def _get(self, path):
if hasattr(self.loader, 'get_data'):
return self.loader.get_data(path)
raise NotImplementedError(
"Can't perform this operation for loaders without 'get_data()'"
)
|
[
"def",
"_get",
"(",
"self",
",",
"path",
")",
":",
"if",
"hasattr",
"(",
"self",
".",
"loader",
",",
"'get_data'",
")",
":",
"return",
"self",
".",
"loader",
".",
"get_data",
"(",
"path",
")",
"raise",
"NotImplementedError",
"(",
"\"Can't perform this operation for loaders without 'get_data()'\"",
")"
] |
https://github.com/pypa/setuptools/blob/9f37366aab9cd8f6baa23e6a77cfdb8daf97757e/pkg_resources/__init__.py#L1558-L1563
|
||||
pyamg/pyamg
|
e3fb6feaad2358e681f2f4affae3205bfe9a2350
|
pyamg/aggregation/rootnode.py
|
python
|
rootnode_solver
|
(A, B=None, BH=None,
symmetry='hermitian', strength='symmetric',
aggregate='standard', smooth='energy',
presmoother=('block_gauss_seidel',
{'sweep': 'symmetric'}),
postsmoother=('block_gauss_seidel',
{'sweep': 'symmetric'}),
improve_candidates=('block_gauss_seidel',
{'sweep': 'symmetric',
'iterations': 4}),
max_levels=10, max_coarse=10,
diagonal_dominance=False, keep=False, **kwargs)
|
return ml
|
Create a multilevel solver using root-node based Smoothed Aggregation (SA).
See the notes below, for the major differences with the classical-style
smoothed aggregation solver in aggregation.smoothed_aggregation_solver.
Parameters
----------
A : csr_matrix, bsr_matrix
Sparse NxN matrix in CSR or BSR format
B : None, array_like
Right near-nullspace candidates stored in the columns of an NxK array.
K must be >= the blocksize of A (see reference [2011OlScTu]_). The default value
B=None is equivalent to choosing the constant over each block-variable,
B=np.kron(np.ones((A.shape[0]/get_blocksize(A), 1)), np.eye(get_blocksize(A)))
BH : None, array_like
Left near-nullspace candidates stored in the columns of an NxK array.
BH is only used if symmetry='nonsymmetric'. K must be >= the
blocksize of A (see reference [2011OlScTu]_). The default value B=None is
equivalent to choosing the constant over each block-variable,
B=np.kron(np.ones((A.shape[0]/get_blocksize(A), 1)), np.eye(get_blocksize(A)))
symmetry : string
'symmetric' refers to both real and complex symmetric
'hermitian' refers to both complex Hermitian and real Hermitian
'nonsymmetric' i.e. nonsymmetric in a hermitian sense
Note that for the strictly real case, symmetric and hermitian are
the same
Note that this flag does not denote definiteness of the operator.
strength : list
Method used to determine the strength of connection between unknowns of
the linear system. Method-specific parameters may be passed in using a
tuple, e.g. strength=('symmetric',{'theta' : 0.25 }). If strength=None,
all nonzero entries of the matrix are considered strong.
aggregate : list
Method used to aggregate nodes.
smooth : list
Method used to smooth the tentative prolongator. Method-specific
parameters may be passed in using a tuple, e.g. smooth=
('energy',{'krylov' : 'gmres'}). Only 'energy' and None are valid
prolongation smoothing options.
presmoother : tuple, string, list
Defines the presmoother for the multilevel cycling. The default block
Gauss-Seidel option defaults to point-wise Gauss-Seidel, if the matrix
is CSR or is a BSR matrix with blocksize of 1. See notes below for
varying this parameter on a per level basis.
postsmoother : tuple, string, list
Same as presmoother, except defines the postsmoother.
improve_candidates : tuple, string, list
The ith entry defines the method used to improve the candidates B on
level i. If the list is shorter than max_levels, then the last entry
will define the method for all levels lower. If tuple or string, then
this single relaxation descriptor defines improve_candidates on all
levels.
The list elements are relaxation descriptors of the form used for
presmoother and postsmoother. A value of None implies no action on B.
max_levels : integer
Maximum number of levels to be used in the multilevel solver.
max_coarse : integer
Maximum number of variables permitted on the coarse grid.
diagonal_dominance : bool, tuple
If True (or the first tuple entry is True), then avoid coarsening
diagonally dominant rows. The second tuple entry requires a
dictionary, where the key value 'theta' is used to tune the diagonal
dominance threshold.
keep : bool
Flag to indicate keeping extra operators in the hierarchy for
diagnostics. For example, if True, then strength of connection (C),
tentative prolongation (T), aggregation (AggOp), and arrays
storing the C-points (Cpts) and F-points (Fpts) are kept at
each level.
Other Parameters
----------------
cycle_type : ['V','W','F']
Structrure of multigrid cycle
coarse_solver : ['splu', 'lu', 'cholesky, 'pinv', 'gauss_seidel', ... ]
Solver used at the coarsest level of the MG hierarchy.
Optionally, may be a tuple (fn, args), where fn is a string such as
['splu', 'lu', ...] or a callable function, and args is a dictionary of
arguments to be passed to fn.
Returns
-------
ml : MultilevelSolver
Multigrid hierarchy of matrices and prolongation operators
See Also
--------
MultilevelSolver, aggregation.smoothed_aggregation_solver,
classical.ruge_stuben_solver
Notes
-----
- Root-node style SA differs from classical SA primarily by preserving
and identity block in the interpolation operator, P. Each aggregate
has a 'root-node' or 'center-node' associated with it, and this
root-node is injected from the coarse grid to the fine grid. The
injection corresponds to the identity block.
- Only smooth={'energy', None} is supported for prolongation
smoothing. See reference [2011OlScTu]_ below for more details on why the
'energy' prolongation smoother is the natural counterpart to
root-node style SA.
- The additional parameters are passed through as arguments to
MultilevelSolver. Refer to pyamg.MultilevelSolver for additional
documentation.
- At each level, four steps are executed in order to define the coarser
level operator.
1. Matrix A is given and used to derive a strength matrix, C.
2. Based on the strength matrix, indices are grouped or aggregated.
3. The aggregates define coarse nodes and a tentative prolongation
operator T is defined by injection
4. The tentative prolongation operator is smoothed by a relaxation
scheme to improve the quality and extent of interpolation from the
aggregates to fine nodes.
- The parameters smooth, strength, aggregate, presmoother, postsmoother
can be varied on a per level basis. For different methods on
different levels, use a list as input so that the i-th entry defines
the method at the i-th level. If there are more levels in the
hierarchy than list entries, the last entry will define the method
for all levels lower.
Examples are:
smooth=[('jacobi', {'omega':1.0}), None, 'jacobi']
presmoother=[('block_gauss_seidel', {'sweep':symmetric}), 'sor']
aggregate=['standard', 'naive']
strength=[('symmetric', {'theta':0.25}), ('symmetric', {'theta':0.08})]
- Predefined strength of connection and aggregation schemes can be
specified. These options are best used together, but aggregation can
be predefined while strength of connection is not.
For predefined strength of connection, use a list consisting of
tuples of the form ('predefined', {'C' : C0}), where C0 is a
csr_matrix and each degree-of-freedom in C0 represents a supernode.
For instance to predefine a three-level hierarchy, use
[('predefined', {'C' : C0}), ('predefined', {'C' : C1}) ].
Similarly for predefined aggregation, use a list of tuples. For
instance to predefine a three-level hierarchy, use [('predefined',
{'AggOp' : Agg0}), ('predefined', {'AggOp' : Agg1}) ], where the
dimensions of A, Agg0 and Agg1 are compatible, i.e. Agg0.shape[1] ==
A.shape[0] and Agg1.shape[1] == Agg0.shape[0]. Each AggOp is a
csr_matrix.
Because this is a root-nodes solver, if a member of the predefined
aggregation list is predefined, it must be of the form
('predefined', {'AggOp' : Agg, 'Cnodes' : Cnodes}).
Examples
--------
>>> from pyamg import rootnode_solver
>>> from pyamg.gallery import poisson
>>> from scipy.sparse.linalg import cg
>>> import numpy as np
>>> A = poisson((100, 100), format='csr') # matrix
>>> b = np.ones((A.shape[0])) # RHS
>>> ml = rootnode_solver(A) # AMG solver
>>> M = ml.aspreconditioner(cycle='V') # preconditioner
>>> x, info = cg(A, b, tol=1e-8, maxiter=30, M=M) # solve with CG
References
----------
.. [1996VaMa] Vanek, P. and Mandel, J. and Brezina, M.,
"Algebraic Multigrid by Smoothed Aggregation for
Second and Fourth Order Elliptic Problems",
Computing, vol. 56, no. 3, pp. 179--196, 1996.
http://citeseer.ist.psu.edu/vanek96algebraic.html
.. [2011OlScTu] Olson, L. and Schroder, J. and Tuminaro, R.,
"A general interpolation strategy for algebraic
multigrid using energy minimization", SIAM Journal
on Scientific Computing (SISC), vol. 33, pp.
966--991, 2011.
|
Create a multilevel solver using root-node based Smoothed Aggregation (SA).
|
[
"Create",
"a",
"multilevel",
"solver",
"using",
"root",
"-",
"node",
"based",
"Smoothed",
"Aggregation",
"(",
"SA",
")",
"."
] |
def rootnode_solver(A, B=None, BH=None,
symmetry='hermitian', strength='symmetric',
aggregate='standard', smooth='energy',
presmoother=('block_gauss_seidel',
{'sweep': 'symmetric'}),
postsmoother=('block_gauss_seidel',
{'sweep': 'symmetric'}),
improve_candidates=('block_gauss_seidel',
{'sweep': 'symmetric',
'iterations': 4}),
max_levels=10, max_coarse=10,
diagonal_dominance=False, keep=False, **kwargs):
"""Create a multilevel solver using root-node based Smoothed Aggregation (SA).
See the notes below, for the major differences with the classical-style
smoothed aggregation solver in aggregation.smoothed_aggregation_solver.
Parameters
----------
A : csr_matrix, bsr_matrix
Sparse NxN matrix in CSR or BSR format
B : None, array_like
Right near-nullspace candidates stored in the columns of an NxK array.
K must be >= the blocksize of A (see reference [2011OlScTu]_). The default value
B=None is equivalent to choosing the constant over each block-variable,
B=np.kron(np.ones((A.shape[0]/get_blocksize(A), 1)), np.eye(get_blocksize(A)))
BH : None, array_like
Left near-nullspace candidates stored in the columns of an NxK array.
BH is only used if symmetry='nonsymmetric'. K must be >= the
blocksize of A (see reference [2011OlScTu]_). The default value B=None is
equivalent to choosing the constant over each block-variable,
B=np.kron(np.ones((A.shape[0]/get_blocksize(A), 1)), np.eye(get_blocksize(A)))
symmetry : string
'symmetric' refers to both real and complex symmetric
'hermitian' refers to both complex Hermitian and real Hermitian
'nonsymmetric' i.e. nonsymmetric in a hermitian sense
Note that for the strictly real case, symmetric and hermitian are
the same
Note that this flag does not denote definiteness of the operator.
strength : list
Method used to determine the strength of connection between unknowns of
the linear system. Method-specific parameters may be passed in using a
tuple, e.g. strength=('symmetric',{'theta' : 0.25 }). If strength=None,
all nonzero entries of the matrix are considered strong.
aggregate : list
Method used to aggregate nodes.
smooth : list
Method used to smooth the tentative prolongator. Method-specific
parameters may be passed in using a tuple, e.g. smooth=
('energy',{'krylov' : 'gmres'}). Only 'energy' and None are valid
prolongation smoothing options.
presmoother : tuple, string, list
Defines the presmoother for the multilevel cycling. The default block
Gauss-Seidel option defaults to point-wise Gauss-Seidel, if the matrix
is CSR or is a BSR matrix with blocksize of 1. See notes below for
varying this parameter on a per level basis.
postsmoother : tuple, string, list
Same as presmoother, except defines the postsmoother.
improve_candidates : tuple, string, list
The ith entry defines the method used to improve the candidates B on
level i. If the list is shorter than max_levels, then the last entry
will define the method for all levels lower. If tuple or string, then
this single relaxation descriptor defines improve_candidates on all
levels.
The list elements are relaxation descriptors of the form used for
presmoother and postsmoother. A value of None implies no action on B.
max_levels : integer
Maximum number of levels to be used in the multilevel solver.
max_coarse : integer
Maximum number of variables permitted on the coarse grid.
diagonal_dominance : bool, tuple
If True (or the first tuple entry is True), then avoid coarsening
diagonally dominant rows. The second tuple entry requires a
dictionary, where the key value 'theta' is used to tune the diagonal
dominance threshold.
keep : bool
Flag to indicate keeping extra operators in the hierarchy for
diagnostics. For example, if True, then strength of connection (C),
tentative prolongation (T), aggregation (AggOp), and arrays
storing the C-points (Cpts) and F-points (Fpts) are kept at
each level.
Other Parameters
----------------
cycle_type : ['V','W','F']
Structrure of multigrid cycle
coarse_solver : ['splu', 'lu', 'cholesky, 'pinv', 'gauss_seidel', ... ]
Solver used at the coarsest level of the MG hierarchy.
Optionally, may be a tuple (fn, args), where fn is a string such as
['splu', 'lu', ...] or a callable function, and args is a dictionary of
arguments to be passed to fn.
Returns
-------
ml : MultilevelSolver
Multigrid hierarchy of matrices and prolongation operators
See Also
--------
MultilevelSolver, aggregation.smoothed_aggregation_solver,
classical.ruge_stuben_solver
Notes
-----
- Root-node style SA differs from classical SA primarily by preserving
and identity block in the interpolation operator, P. Each aggregate
has a 'root-node' or 'center-node' associated with it, and this
root-node is injected from the coarse grid to the fine grid. The
injection corresponds to the identity block.
- Only smooth={'energy', None} is supported for prolongation
smoothing. See reference [2011OlScTu]_ below for more details on why the
'energy' prolongation smoother is the natural counterpart to
root-node style SA.
- The additional parameters are passed through as arguments to
MultilevelSolver. Refer to pyamg.MultilevelSolver for additional
documentation.
- At each level, four steps are executed in order to define the coarser
level operator.
1. Matrix A is given and used to derive a strength matrix, C.
2. Based on the strength matrix, indices are grouped or aggregated.
3. The aggregates define coarse nodes and a tentative prolongation
operator T is defined by injection
4. The tentative prolongation operator is smoothed by a relaxation
scheme to improve the quality and extent of interpolation from the
aggregates to fine nodes.
- The parameters smooth, strength, aggregate, presmoother, postsmoother
can be varied on a per level basis. For different methods on
different levels, use a list as input so that the i-th entry defines
the method at the i-th level. If there are more levels in the
hierarchy than list entries, the last entry will define the method
for all levels lower.
Examples are:
smooth=[('jacobi', {'omega':1.0}), None, 'jacobi']
presmoother=[('block_gauss_seidel', {'sweep':symmetric}), 'sor']
aggregate=['standard', 'naive']
strength=[('symmetric', {'theta':0.25}), ('symmetric', {'theta':0.08})]
- Predefined strength of connection and aggregation schemes can be
specified. These options are best used together, but aggregation can
be predefined while strength of connection is not.
For predefined strength of connection, use a list consisting of
tuples of the form ('predefined', {'C' : C0}), where C0 is a
csr_matrix and each degree-of-freedom in C0 represents a supernode.
For instance to predefine a three-level hierarchy, use
[('predefined', {'C' : C0}), ('predefined', {'C' : C1}) ].
Similarly for predefined aggregation, use a list of tuples. For
instance to predefine a three-level hierarchy, use [('predefined',
{'AggOp' : Agg0}), ('predefined', {'AggOp' : Agg1}) ], where the
dimensions of A, Agg0 and Agg1 are compatible, i.e. Agg0.shape[1] ==
A.shape[0] and Agg1.shape[1] == Agg0.shape[0]. Each AggOp is a
csr_matrix.
Because this is a root-nodes solver, if a member of the predefined
aggregation list is predefined, it must be of the form
('predefined', {'AggOp' : Agg, 'Cnodes' : Cnodes}).
Examples
--------
>>> from pyamg import rootnode_solver
>>> from pyamg.gallery import poisson
>>> from scipy.sparse.linalg import cg
>>> import numpy as np
>>> A = poisson((100, 100), format='csr') # matrix
>>> b = np.ones((A.shape[0])) # RHS
>>> ml = rootnode_solver(A) # AMG solver
>>> M = ml.aspreconditioner(cycle='V') # preconditioner
>>> x, info = cg(A, b, tol=1e-8, maxiter=30, M=M) # solve with CG
References
----------
.. [1996VaMa] Vanek, P. and Mandel, J. and Brezina, M.,
"Algebraic Multigrid by Smoothed Aggregation for
Second and Fourth Order Elliptic Problems",
Computing, vol. 56, no. 3, pp. 179--196, 1996.
http://citeseer.ist.psu.edu/vanek96algebraic.html
.. [2011OlScTu] Olson, L. and Schroder, J. and Tuminaro, R.,
"A general interpolation strategy for algebraic
multigrid using energy minimization", SIAM Journal
on Scientific Computing (SISC), vol. 33, pp.
966--991, 2011.
"""
if not (isspmatrix_csr(A) or isspmatrix_bsr(A)):
try:
A = csr_matrix(A)
warn('Implicit conversion of A to CSR',
SparseEfficiencyWarning)
except BaseException as e:
raise TypeError('Argument A must have type csr_matrix, '
'bsr_matrix, or be convertible to csr_matrix') from e
A = A.asfptype()
if symmetry not in ('symmetric', 'hermitian', 'nonsymmetric'):
raise ValueError('Expected "symmetric", "nonsymmetric" '
'or "hermitian" for the symmetry parameter.')
A.symmetry = symmetry
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix')
# Right near nullspace candidates use constant for each variable as default
if B is None:
B = np.kron(np.ones((int(A.shape[0]/get_blocksize(A)), 1), dtype=A.dtype),
np.eye(get_blocksize(A)))
else:
B = np.asarray(B, dtype=A.dtype)
if len(B.shape) == 1:
B = B.reshape(-1, 1)
if B.shape[0] != A.shape[0]:
raise ValueError('The near null-space modes B have incorrect \
dimensions for matrix A')
if B.shape[1] < get_blocksize(A):
raise ValueError('B.shape[1] must be >= the blocksize of A')
# Left near nullspace candidates
if A.symmetry == 'nonsymmetric':
if BH is None:
BH = B.copy()
else:
BH = np.asarray(BH, dtype=A.dtype)
if len(BH.shape) == 1:
BH = BH.reshape(-1, 1)
if BH.shape[1] != B.shape[1]:
raise ValueError('The number of left and right near \
null-space modes B and BH, must be equal')
if BH.shape[0] != A.shape[0]:
raise ValueError('The near null-space modes BH have \
incorrect dimensions for matrix A')
# Levelize the user parameters, so that they become lists describing the
# desired user option on each level.
max_levels, max_coarse, strength =\
levelize_strength_or_aggregation(strength, max_levels, max_coarse)
max_levels, max_coarse, aggregate =\
levelize_strength_or_aggregation(aggregate, max_levels, max_coarse)
improve_candidates =\
levelize_smooth_or_improve_candidates(improve_candidates, max_levels)
smooth = levelize_smooth_or_improve_candidates(smooth, max_levels)
# Construct multilevel structure
levels = []
levels.append(MultilevelSolver.Level())
levels[-1].A = A # matrix
# Append near nullspace candidates
levels[-1].B = B # right candidates
if A.symmetry == 'nonsymmetric':
levels[-1].BH = BH # left candidates
while len(levels) < max_levels and \
int(levels[-1].A.shape[0]/get_blocksize(levels[-1].A)) > max_coarse:
_extend_hierarchy(levels, strength, aggregate, smooth,
improve_candidates, diagonal_dominance, keep)
ml = MultilevelSolver(levels, **kwargs)
change_smoothers(ml, presmoother, postsmoother)
return ml
|
[
"def",
"rootnode_solver",
"(",
"A",
",",
"B",
"=",
"None",
",",
"BH",
"=",
"None",
",",
"symmetry",
"=",
"'hermitian'",
",",
"strength",
"=",
"'symmetric'",
",",
"aggregate",
"=",
"'standard'",
",",
"smooth",
"=",
"'energy'",
",",
"presmoother",
"=",
"(",
"'block_gauss_seidel'",
",",
"{",
"'sweep'",
":",
"'symmetric'",
"}",
")",
",",
"postsmoother",
"=",
"(",
"'block_gauss_seidel'",
",",
"{",
"'sweep'",
":",
"'symmetric'",
"}",
")",
",",
"improve_candidates",
"=",
"(",
"'block_gauss_seidel'",
",",
"{",
"'sweep'",
":",
"'symmetric'",
",",
"'iterations'",
":",
"4",
"}",
")",
",",
"max_levels",
"=",
"10",
",",
"max_coarse",
"=",
"10",
",",
"diagonal_dominance",
"=",
"False",
",",
"keep",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"(",
"isspmatrix_csr",
"(",
"A",
")",
"or",
"isspmatrix_bsr",
"(",
"A",
")",
")",
":",
"try",
":",
"A",
"=",
"csr_matrix",
"(",
"A",
")",
"warn",
"(",
"'Implicit conversion of A to CSR'",
",",
"SparseEfficiencyWarning",
")",
"except",
"BaseException",
"as",
"e",
":",
"raise",
"TypeError",
"(",
"'Argument A must have type csr_matrix, '",
"'bsr_matrix, or be convertible to csr_matrix'",
")",
"from",
"e",
"A",
"=",
"A",
".",
"asfptype",
"(",
")",
"if",
"symmetry",
"not",
"in",
"(",
"'symmetric'",
",",
"'hermitian'",
",",
"'nonsymmetric'",
")",
":",
"raise",
"ValueError",
"(",
"'Expected \"symmetric\", \"nonsymmetric\" '",
"'or \"hermitian\" for the symmetry parameter.'",
")",
"A",
".",
"symmetry",
"=",
"symmetry",
"if",
"A",
".",
"shape",
"[",
"0",
"]",
"!=",
"A",
".",
"shape",
"[",
"1",
"]",
":",
"raise",
"ValueError",
"(",
"'expected square matrix'",
")",
"# Right near nullspace candidates use constant for each variable as default",
"if",
"B",
"is",
"None",
":",
"B",
"=",
"np",
".",
"kron",
"(",
"np",
".",
"ones",
"(",
"(",
"int",
"(",
"A",
".",
"shape",
"[",
"0",
"]",
"/",
"get_blocksize",
"(",
"A",
")",
")",
",",
"1",
")",
",",
"dtype",
"=",
"A",
".",
"dtype",
")",
",",
"np",
".",
"eye",
"(",
"get_blocksize",
"(",
"A",
")",
")",
")",
"else",
":",
"B",
"=",
"np",
".",
"asarray",
"(",
"B",
",",
"dtype",
"=",
"A",
".",
"dtype",
")",
"if",
"len",
"(",
"B",
".",
"shape",
")",
"==",
"1",
":",
"B",
"=",
"B",
".",
"reshape",
"(",
"-",
"1",
",",
"1",
")",
"if",
"B",
".",
"shape",
"[",
"0",
"]",
"!=",
"A",
".",
"shape",
"[",
"0",
"]",
":",
"raise",
"ValueError",
"(",
"'The near null-space modes B have incorrect \\\n dimensions for matrix A'",
")",
"if",
"B",
".",
"shape",
"[",
"1",
"]",
"<",
"get_blocksize",
"(",
"A",
")",
":",
"raise",
"ValueError",
"(",
"'B.shape[1] must be >= the blocksize of A'",
")",
"# Left near nullspace candidates",
"if",
"A",
".",
"symmetry",
"==",
"'nonsymmetric'",
":",
"if",
"BH",
"is",
"None",
":",
"BH",
"=",
"B",
".",
"copy",
"(",
")",
"else",
":",
"BH",
"=",
"np",
".",
"asarray",
"(",
"BH",
",",
"dtype",
"=",
"A",
".",
"dtype",
")",
"if",
"len",
"(",
"BH",
".",
"shape",
")",
"==",
"1",
":",
"BH",
"=",
"BH",
".",
"reshape",
"(",
"-",
"1",
",",
"1",
")",
"if",
"BH",
".",
"shape",
"[",
"1",
"]",
"!=",
"B",
".",
"shape",
"[",
"1",
"]",
":",
"raise",
"ValueError",
"(",
"'The number of left and right near \\\n null-space modes B and BH, must be equal'",
")",
"if",
"BH",
".",
"shape",
"[",
"0",
"]",
"!=",
"A",
".",
"shape",
"[",
"0",
"]",
":",
"raise",
"ValueError",
"(",
"'The near null-space modes BH have \\\n incorrect dimensions for matrix A'",
")",
"# Levelize the user parameters, so that they become lists describing the",
"# desired user option on each level.",
"max_levels",
",",
"max_coarse",
",",
"strength",
"=",
"levelize_strength_or_aggregation",
"(",
"strength",
",",
"max_levels",
",",
"max_coarse",
")",
"max_levels",
",",
"max_coarse",
",",
"aggregate",
"=",
"levelize_strength_or_aggregation",
"(",
"aggregate",
",",
"max_levels",
",",
"max_coarse",
")",
"improve_candidates",
"=",
"levelize_smooth_or_improve_candidates",
"(",
"improve_candidates",
",",
"max_levels",
")",
"smooth",
"=",
"levelize_smooth_or_improve_candidates",
"(",
"smooth",
",",
"max_levels",
")",
"# Construct multilevel structure",
"levels",
"=",
"[",
"]",
"levels",
".",
"append",
"(",
"MultilevelSolver",
".",
"Level",
"(",
")",
")",
"levels",
"[",
"-",
"1",
"]",
".",
"A",
"=",
"A",
"# matrix",
"# Append near nullspace candidates",
"levels",
"[",
"-",
"1",
"]",
".",
"B",
"=",
"B",
"# right candidates",
"if",
"A",
".",
"symmetry",
"==",
"'nonsymmetric'",
":",
"levels",
"[",
"-",
"1",
"]",
".",
"BH",
"=",
"BH",
"# left candidates",
"while",
"len",
"(",
"levels",
")",
"<",
"max_levels",
"and",
"int",
"(",
"levels",
"[",
"-",
"1",
"]",
".",
"A",
".",
"shape",
"[",
"0",
"]",
"/",
"get_blocksize",
"(",
"levels",
"[",
"-",
"1",
"]",
".",
"A",
")",
")",
">",
"max_coarse",
":",
"_extend_hierarchy",
"(",
"levels",
",",
"strength",
",",
"aggregate",
",",
"smooth",
",",
"improve_candidates",
",",
"diagonal_dominance",
",",
"keep",
")",
"ml",
"=",
"MultilevelSolver",
"(",
"levels",
",",
"*",
"*",
"kwargs",
")",
"change_smoothers",
"(",
"ml",
",",
"presmoother",
",",
"postsmoother",
")",
"return",
"ml"
] |
https://github.com/pyamg/pyamg/blob/e3fb6feaad2358e681f2f4affae3205bfe9a2350/pyamg/aggregation/rootnode.py#L26-L306
|
|
schutzwerk/CANalyzat0r
|
6bc251e69f73d9f8554bcc6134354e18ab8ca426
|
src/Database.py
|
python
|
Database.checkDB
|
(self)
|
Checks if all the table count of the SQLite database matches the needed table count.
If the check does pass the user will be notified to create a project if no project is exisiting yet.
If the check does not pass the user will be prompted for an action:
- Truncate the database and create an empty one
- Keep the database and exit
:return: A boolean value indicating the database integrity status (True = good)
|
Checks if all the table count of the SQLite database matches the needed table count.
If the check does pass the user will be notified to create a project if no project is exisiting yet.
If the check does not pass the user will be prompted for an action:
- Truncate the database and create an empty one
- Keep the database and exit
|
[
"Checks",
"if",
"all",
"the",
"table",
"count",
"of",
"the",
"SQLite",
"database",
"matches",
"the",
"needed",
"table",
"count",
".",
"If",
"the",
"check",
"does",
"pass",
"the",
"user",
"will",
"be",
"notified",
"to",
"create",
"a",
"project",
"if",
"no",
"project",
"is",
"exisiting",
"yet",
".",
"If",
"the",
"check",
"does",
"not",
"pass",
"the",
"user",
"will",
"be",
"prompted",
"for",
"an",
"action",
":",
"-",
"Truncate",
"the",
"database",
"and",
"create",
"an",
"empty",
"one",
"-",
"Keep",
"the",
"database",
"and",
"exit"
] |
def checkDB(self):
"""
Checks if all the table count of the SQLite database matches the needed table count.
If the check does pass the user will be notified to create a project if no project is exisiting yet.
If the check does not pass the user will be prompted for an action:
- Truncate the database and create an empty one
- Keep the database and exit
:return: A boolean value indicating the database integrity status (True = good)
"""
cursor = self.connection.cursor()
cursor.execute(DatabaseStatements.checkTablesPresentStatement)
data = cursor.fetchall()
# All tables present
if len(data) == DatabaseStatements.tableCount:
# Check if theres at least one project
if self.getOverallTableCount(
DatabaseStatements.projectTableName) > 0:
return True
# Tell the user to setup a project
else:
QMessageBox.information(
Globals.ui.tabWidgetMain,
Strings.databaseFirstRunMessageBoxTitle,
Strings.databaseFirstRunMessageBoxText, QMessageBox.Ok)
return True
# Empty DB
elif len(data) == 0:
return False
# Table missing -- corrupt DB
elif len(data) > 0 and len(data) < DatabaseStatements.tableCount:
# Ask user for action
answer = QMessageBox.question(
Globals.ui.tabWidgetMain,
Strings.databaseCorruptMessageBoxTitle,
Strings.databaseCorruptMessageBoxText,
QMessageBox.Yes | QMessageBox.No)
if (answer == QMessageBox.Yes):
self.logger.info(Strings.databaseCorruptAction)
# Delete sqlite file and create a fresh db in the next step
os.remove(Settings.DB_PATH)
# Update the connection object
self.connection = self.connect()
return False
else:
self.logger.info(Strings.databaseCorruptNoAction)
exit(1)
|
[
"def",
"checkDB",
"(",
"self",
")",
":",
"cursor",
"=",
"self",
".",
"connection",
".",
"cursor",
"(",
")",
"cursor",
".",
"execute",
"(",
"DatabaseStatements",
".",
"checkTablesPresentStatement",
")",
"data",
"=",
"cursor",
".",
"fetchall",
"(",
")",
"# All tables present",
"if",
"len",
"(",
"data",
")",
"==",
"DatabaseStatements",
".",
"tableCount",
":",
"# Check if theres at least one project",
"if",
"self",
".",
"getOverallTableCount",
"(",
"DatabaseStatements",
".",
"projectTableName",
")",
">",
"0",
":",
"return",
"True",
"# Tell the user to setup a project",
"else",
":",
"QMessageBox",
".",
"information",
"(",
"Globals",
".",
"ui",
".",
"tabWidgetMain",
",",
"Strings",
".",
"databaseFirstRunMessageBoxTitle",
",",
"Strings",
".",
"databaseFirstRunMessageBoxText",
",",
"QMessageBox",
".",
"Ok",
")",
"return",
"True",
"# Empty DB",
"elif",
"len",
"(",
"data",
")",
"==",
"0",
":",
"return",
"False",
"# Table missing -- corrupt DB",
"elif",
"len",
"(",
"data",
")",
">",
"0",
"and",
"len",
"(",
"data",
")",
"<",
"DatabaseStatements",
".",
"tableCount",
":",
"# Ask user for action",
"answer",
"=",
"QMessageBox",
".",
"question",
"(",
"Globals",
".",
"ui",
".",
"tabWidgetMain",
",",
"Strings",
".",
"databaseCorruptMessageBoxTitle",
",",
"Strings",
".",
"databaseCorruptMessageBoxText",
",",
"QMessageBox",
".",
"Yes",
"|",
"QMessageBox",
".",
"No",
")",
"if",
"(",
"answer",
"==",
"QMessageBox",
".",
"Yes",
")",
":",
"self",
".",
"logger",
".",
"info",
"(",
"Strings",
".",
"databaseCorruptAction",
")",
"# Delete sqlite file and create a fresh db in the next step",
"os",
".",
"remove",
"(",
"Settings",
".",
"DB_PATH",
")",
"# Update the connection object",
"self",
".",
"connection",
"=",
"self",
".",
"connect",
"(",
")",
"return",
"False",
"else",
":",
"self",
".",
"logger",
".",
"info",
"(",
"Strings",
".",
"databaseCorruptNoAction",
")",
"exit",
"(",
"1",
")"
] |
https://github.com/schutzwerk/CANalyzat0r/blob/6bc251e69f73d9f8554bcc6134354e18ab8ca426/src/Database.py#L369-L421
|
||
kcunning/Katie-s-Rougish-PyGame
|
1b299ebc27e5f68a25b2e0462845f0b4423ebbbe
|
roguey/classes/gamescreen.py
|
python
|
GameScreen.draw_background
|
(self)
|
Draws my glorious background.
|
Draws my glorious background.
|
[
"Draws",
"my",
"glorious",
"background",
"."
] |
def draw_background(self):
''' Draws my glorious background.
'''
self.screen.blit(self.bg, (0,0))
|
[
"def",
"draw_background",
"(",
"self",
")",
":",
"self",
".",
"screen",
".",
"blit",
"(",
"self",
".",
"bg",
",",
"(",
"0",
",",
"0",
")",
")"
] |
https://github.com/kcunning/Katie-s-Rougish-PyGame/blob/1b299ebc27e5f68a25b2e0462845f0b4423ebbbe/roguey/classes/gamescreen.py#L151-L154
|
||
captainhammy/Houdini-Toolbox
|
a4e61c3c0296b3a3a153a8dd42297c316be1b0f3
|
houdini/pyfilter/ht-pyfilter.py
|
python
|
filterQuit
|
()
|
Perform actions just before Mantra quits.
|
Perform actions just before Mantra quits.
|
[
"Perform",
"actions",
"just",
"before",
"Mantra",
"quits",
"."
] |
def filterQuit():
"""Perform actions just before Mantra quits."""
_logger.debug("filterQuit")
_PYFILTER_MANAGER.run_operations_for_stage("filter_quit")
|
[
"def",
"filterQuit",
"(",
")",
":",
"_logger",
".",
"debug",
"(",
"\"filterQuit\"",
")",
"_PYFILTER_MANAGER",
".",
"run_operations_for_stage",
"(",
"\"filter_quit\"",
")"
] |
https://github.com/captainhammy/Houdini-Toolbox/blob/a4e61c3c0296b3a3a153a8dd42297c316be1b0f3/houdini/pyfilter/ht-pyfilter.py#L180-L184
|
||
Scifabric/pybossa
|
fd87953c067a94ae211cd8771d4eead130ef3c64
|
pybossa/view/account.py
|
python
|
delete
|
(name)
|
Delete user account.
|
Delete user account.
|
[
"Delete",
"user",
"account",
"."
] |
def delete(name):
"""
Delete user account.
"""
user = user_repo.get_by_name(name)
if not user:
return abort(404)
if current_user.name != name:
return abort(403)
super_queue.enqueue(delete_account, user.id)
if (request.headers.get('Content-Type') == 'application/json' or
request.args.get('response_format') == 'json'):
response = dict(job='enqueued', template='account/delete.html')
return handle_content_type(response)
else:
return redirect(url_for('account.signout'))
|
[
"def",
"delete",
"(",
"name",
")",
":",
"user",
"=",
"user_repo",
".",
"get_by_name",
"(",
"name",
")",
"if",
"not",
"user",
":",
"return",
"abort",
"(",
"404",
")",
"if",
"current_user",
".",
"name",
"!=",
"name",
":",
"return",
"abort",
"(",
"403",
")",
"super_queue",
".",
"enqueue",
"(",
"delete_account",
",",
"user",
".",
"id",
")",
"if",
"(",
"request",
".",
"headers",
".",
"get",
"(",
"'Content-Type'",
")",
"==",
"'application/json'",
"or",
"request",
".",
"args",
".",
"get",
"(",
"'response_format'",
")",
"==",
"'json'",
")",
":",
"response",
"=",
"dict",
"(",
"job",
"=",
"'enqueued'",
",",
"template",
"=",
"'account/delete.html'",
")",
"return",
"handle_content_type",
"(",
"response",
")",
"else",
":",
"return",
"redirect",
"(",
"url_for",
"(",
"'account.signout'",
")",
")"
] |
https://github.com/Scifabric/pybossa/blob/fd87953c067a94ae211cd8771d4eead130ef3c64/pybossa/view/account.py#L872-L890
|
||
saturday06/VRM_Addon_for_Blender
|
0fc59703bb203dca760501221d34ecc4a566e64f
|
io_scene_vrm/editor/mesh_from_bone_envelopes.py
|
python
|
ICYP_OT_make_mesh_from_bone_envelopes.poll
|
(cls, _context: bpy.types.Context)
|
return True
|
[] |
def poll(cls, _context: bpy.types.Context) -> bool:
return True
|
[
"def",
"poll",
"(",
"cls",
",",
"_context",
":",
"bpy",
".",
"types",
".",
"Context",
")",
"->",
"bool",
":",
"return",
"True"
] |
https://github.com/saturday06/VRM_Addon_for_Blender/blob/0fc59703bb203dca760501221d34ecc4a566e64f/io_scene_vrm/editor/mesh_from_bone_envelopes.py#L17-L18
|
|||
angr/angr
|
4b04d56ace135018083d36d9083805be8146688b
|
angr/engines/vex/claripy/ccall.py
|
python
|
x86g_calculate_daa_das_aaa_aas
|
(state, flags_and_AX, opcode)
|
return result
|
[] |
def x86g_calculate_daa_das_aaa_aas(state, flags_and_AX, opcode):
assert len(flags_and_AX) == 32
assert opcode.op == 'BVV'
opcode = opcode.args[0]
r_O = flags_and_AX[data['X86']['CondBitOffsets']['G_CC_SHIFT_O'] + 16].zero_extend(31)
r_S = flags_and_AX[data['X86']['CondBitOffsets']['G_CC_SHIFT_S'] + 16].zero_extend(31)
r_Z = flags_and_AX[data['X86']['CondBitOffsets']['G_CC_SHIFT_Z'] + 16].zero_extend(31)
r_A = flags_and_AX[data['X86']['CondBitOffsets']['G_CC_SHIFT_A'] + 16].zero_extend(31)
r_C = flags_and_AX[data['X86']['CondBitOffsets']['G_CC_SHIFT_C'] + 16].zero_extend(31)
r_P = flags_and_AX[data['X86']['CondBitOffsets']['G_CC_SHIFT_P'] + 16].zero_extend(31)
r_AL = (flags_and_AX >> 0) & 0xFF
r_AH = (flags_and_AX >> 8) & 0xFF
zero = claripy.BVV(0, 32)
one = claripy.BVV(1, 32)
if opcode == 0x27: # DAA
old_AL = r_AL
old_C = r_C
condition = claripy.Or((r_AL & 0xF) > 9, r_A == 1)
r_AL = claripy.If(condition, r_AL + 6, old_AL)
r_C = claripy.If(condition, claripy.If(r_AL >= 0x100, one, old_C), zero)
r_A = claripy.If(condition, one, zero)
condition = claripy.Or(old_AL > 0x99, old_C == 1)
r_AL = claripy.If(condition, r_AL + 0x60, r_AL)
r_C = claripy.If(condition, one, zero)
r_AL = r_AL&0xFF
r_O = zero
r_S = claripy.If((r_AL & 0x80) != 0, one, zero)
r_Z = claripy.If(r_AL == 0, one, zero)
r_P = calc_paritybit(r_AL).zero_extend(31)
elif opcode == 0x2F: # DAS
old_AL = r_AL
old_C = r_C
condition = claripy.Or((r_AL & 0xF) > 9, r_A == 1)
r_AL = claripy.If(condition, r_AL - 6, old_AL)
r_C = claripy.If(condition, claripy.If(r_AL < 6, one, zero), zero)
r_A = claripy.If(condition, one, zero)
condition = claripy.Or(old_AL > 0x99, old_C == 1)
r_AL = claripy.If(condition, r_AL - 0x60, r_AL)
r_C = claripy.If(condition, one, zero)
r_AL &= 0xFF
r_O = zero
r_S = claripy.If((r_AL & 0x80) != 0, one, zero)
r_Z = claripy.If(r_AL == 0, one, zero)
r_P = calc_paritybit(r_AL).zero_extend(31)
elif opcode == 0x37: # AAA
nudge = r_AL > 0xF9
condition = claripy.Or((r_AL & 0xF) > 9, r_A == 1)
r_AL = claripy.If(condition, (r_AL + 6) & 0xF, r_AL & 0xF)
r_AH = claripy.If(condition, claripy.If(nudge, r_AH + 2, r_AH + 1), r_AH)
r_A = claripy.If(condition, one, zero)
r_C = claripy.If(condition, one, zero)
r_O = r_S = r_Z = r_P = 0
elif opcode == 0x3F: # AAS
nudge = r_AL < 0x06
condition = claripy.Or((r_AL & 0xF) > 9, r_A == 1)
r_AL = claripy.If(condition, (r_AL - 6) & 0xF, r_AL & 0xF)
r_AH = claripy.If(condition, claripy.If(nudge, r_AH - 2, r_AH - 1), r_AH)
r_A = claripy.If(condition, one, zero)
r_C = claripy.If(condition, one, zero)
r_O = r_S = r_Z = r_P = 0
result = ( (r_O & 1) << (16 + data['X86']['CondBitOffsets']['G_CC_SHIFT_O']) ) \
| ( (r_S & 1) << (16 + data['X86']['CondBitOffsets']['G_CC_SHIFT_S']) ) \
| ( (r_Z & 1) << (16 + data['X86']['CondBitOffsets']['G_CC_SHIFT_Z']) ) \
| ( (r_A & 1) << (16 + data['X86']['CondBitOffsets']['G_CC_SHIFT_A']) ) \
| ( (r_C & 1) << (16 + data['X86']['CondBitOffsets']['G_CC_SHIFT_C']) ) \
| ( (r_P & 1) << (16 + data['X86']['CondBitOffsets']['G_CC_SHIFT_P']) ) \
| ( (r_AH & 0xFF) << 8 ) \
| ( (r_AL & 0xFF) << 0 )
return result
|
[
"def",
"x86g_calculate_daa_das_aaa_aas",
"(",
"state",
",",
"flags_and_AX",
",",
"opcode",
")",
":",
"assert",
"len",
"(",
"flags_and_AX",
")",
"==",
"32",
"assert",
"opcode",
".",
"op",
"==",
"'BVV'",
"opcode",
"=",
"opcode",
".",
"args",
"[",
"0",
"]",
"r_O",
"=",
"flags_and_AX",
"[",
"data",
"[",
"'X86'",
"]",
"[",
"'CondBitOffsets'",
"]",
"[",
"'G_CC_SHIFT_O'",
"]",
"+",
"16",
"]",
".",
"zero_extend",
"(",
"31",
")",
"r_S",
"=",
"flags_and_AX",
"[",
"data",
"[",
"'X86'",
"]",
"[",
"'CondBitOffsets'",
"]",
"[",
"'G_CC_SHIFT_S'",
"]",
"+",
"16",
"]",
".",
"zero_extend",
"(",
"31",
")",
"r_Z",
"=",
"flags_and_AX",
"[",
"data",
"[",
"'X86'",
"]",
"[",
"'CondBitOffsets'",
"]",
"[",
"'G_CC_SHIFT_Z'",
"]",
"+",
"16",
"]",
".",
"zero_extend",
"(",
"31",
")",
"r_A",
"=",
"flags_and_AX",
"[",
"data",
"[",
"'X86'",
"]",
"[",
"'CondBitOffsets'",
"]",
"[",
"'G_CC_SHIFT_A'",
"]",
"+",
"16",
"]",
".",
"zero_extend",
"(",
"31",
")",
"r_C",
"=",
"flags_and_AX",
"[",
"data",
"[",
"'X86'",
"]",
"[",
"'CondBitOffsets'",
"]",
"[",
"'G_CC_SHIFT_C'",
"]",
"+",
"16",
"]",
".",
"zero_extend",
"(",
"31",
")",
"r_P",
"=",
"flags_and_AX",
"[",
"data",
"[",
"'X86'",
"]",
"[",
"'CondBitOffsets'",
"]",
"[",
"'G_CC_SHIFT_P'",
"]",
"+",
"16",
"]",
".",
"zero_extend",
"(",
"31",
")",
"r_AL",
"=",
"(",
"flags_and_AX",
">>",
"0",
")",
"&",
"0xFF",
"r_AH",
"=",
"(",
"flags_and_AX",
">>",
"8",
")",
"&",
"0xFF",
"zero",
"=",
"claripy",
".",
"BVV",
"(",
"0",
",",
"32",
")",
"one",
"=",
"claripy",
".",
"BVV",
"(",
"1",
",",
"32",
")",
"if",
"opcode",
"==",
"0x27",
":",
"# DAA",
"old_AL",
"=",
"r_AL",
"old_C",
"=",
"r_C",
"condition",
"=",
"claripy",
".",
"Or",
"(",
"(",
"r_AL",
"&",
"0xF",
")",
">",
"9",
",",
"r_A",
"==",
"1",
")",
"r_AL",
"=",
"claripy",
".",
"If",
"(",
"condition",
",",
"r_AL",
"+",
"6",
",",
"old_AL",
")",
"r_C",
"=",
"claripy",
".",
"If",
"(",
"condition",
",",
"claripy",
".",
"If",
"(",
"r_AL",
">=",
"0x100",
",",
"one",
",",
"old_C",
")",
",",
"zero",
")",
"r_A",
"=",
"claripy",
".",
"If",
"(",
"condition",
",",
"one",
",",
"zero",
")",
"condition",
"=",
"claripy",
".",
"Or",
"(",
"old_AL",
">",
"0x99",
",",
"old_C",
"==",
"1",
")",
"r_AL",
"=",
"claripy",
".",
"If",
"(",
"condition",
",",
"r_AL",
"+",
"0x60",
",",
"r_AL",
")",
"r_C",
"=",
"claripy",
".",
"If",
"(",
"condition",
",",
"one",
",",
"zero",
")",
"r_AL",
"=",
"r_AL",
"&",
"0xFF",
"r_O",
"=",
"zero",
"r_S",
"=",
"claripy",
".",
"If",
"(",
"(",
"r_AL",
"&",
"0x80",
")",
"!=",
"0",
",",
"one",
",",
"zero",
")",
"r_Z",
"=",
"claripy",
".",
"If",
"(",
"r_AL",
"==",
"0",
",",
"one",
",",
"zero",
")",
"r_P",
"=",
"calc_paritybit",
"(",
"r_AL",
")",
".",
"zero_extend",
"(",
"31",
")",
"elif",
"opcode",
"==",
"0x2F",
":",
"# DAS",
"old_AL",
"=",
"r_AL",
"old_C",
"=",
"r_C",
"condition",
"=",
"claripy",
".",
"Or",
"(",
"(",
"r_AL",
"&",
"0xF",
")",
">",
"9",
",",
"r_A",
"==",
"1",
")",
"r_AL",
"=",
"claripy",
".",
"If",
"(",
"condition",
",",
"r_AL",
"-",
"6",
",",
"old_AL",
")",
"r_C",
"=",
"claripy",
".",
"If",
"(",
"condition",
",",
"claripy",
".",
"If",
"(",
"r_AL",
"<",
"6",
",",
"one",
",",
"zero",
")",
",",
"zero",
")",
"r_A",
"=",
"claripy",
".",
"If",
"(",
"condition",
",",
"one",
",",
"zero",
")",
"condition",
"=",
"claripy",
".",
"Or",
"(",
"old_AL",
">",
"0x99",
",",
"old_C",
"==",
"1",
")",
"r_AL",
"=",
"claripy",
".",
"If",
"(",
"condition",
",",
"r_AL",
"-",
"0x60",
",",
"r_AL",
")",
"r_C",
"=",
"claripy",
".",
"If",
"(",
"condition",
",",
"one",
",",
"zero",
")",
"r_AL",
"&=",
"0xFF",
"r_O",
"=",
"zero",
"r_S",
"=",
"claripy",
".",
"If",
"(",
"(",
"r_AL",
"&",
"0x80",
")",
"!=",
"0",
",",
"one",
",",
"zero",
")",
"r_Z",
"=",
"claripy",
".",
"If",
"(",
"r_AL",
"==",
"0",
",",
"one",
",",
"zero",
")",
"r_P",
"=",
"calc_paritybit",
"(",
"r_AL",
")",
".",
"zero_extend",
"(",
"31",
")",
"elif",
"opcode",
"==",
"0x37",
":",
"# AAA",
"nudge",
"=",
"r_AL",
">",
"0xF9",
"condition",
"=",
"claripy",
".",
"Or",
"(",
"(",
"r_AL",
"&",
"0xF",
")",
">",
"9",
",",
"r_A",
"==",
"1",
")",
"r_AL",
"=",
"claripy",
".",
"If",
"(",
"condition",
",",
"(",
"r_AL",
"+",
"6",
")",
"&",
"0xF",
",",
"r_AL",
"&",
"0xF",
")",
"r_AH",
"=",
"claripy",
".",
"If",
"(",
"condition",
",",
"claripy",
".",
"If",
"(",
"nudge",
",",
"r_AH",
"+",
"2",
",",
"r_AH",
"+",
"1",
")",
",",
"r_AH",
")",
"r_A",
"=",
"claripy",
".",
"If",
"(",
"condition",
",",
"one",
",",
"zero",
")",
"r_C",
"=",
"claripy",
".",
"If",
"(",
"condition",
",",
"one",
",",
"zero",
")",
"r_O",
"=",
"r_S",
"=",
"r_Z",
"=",
"r_P",
"=",
"0",
"elif",
"opcode",
"==",
"0x3F",
":",
"# AAS",
"nudge",
"=",
"r_AL",
"<",
"0x06",
"condition",
"=",
"claripy",
".",
"Or",
"(",
"(",
"r_AL",
"&",
"0xF",
")",
">",
"9",
",",
"r_A",
"==",
"1",
")",
"r_AL",
"=",
"claripy",
".",
"If",
"(",
"condition",
",",
"(",
"r_AL",
"-",
"6",
")",
"&",
"0xF",
",",
"r_AL",
"&",
"0xF",
")",
"r_AH",
"=",
"claripy",
".",
"If",
"(",
"condition",
",",
"claripy",
".",
"If",
"(",
"nudge",
",",
"r_AH",
"-",
"2",
",",
"r_AH",
"-",
"1",
")",
",",
"r_AH",
")",
"r_A",
"=",
"claripy",
".",
"If",
"(",
"condition",
",",
"one",
",",
"zero",
")",
"r_C",
"=",
"claripy",
".",
"If",
"(",
"condition",
",",
"one",
",",
"zero",
")",
"r_O",
"=",
"r_S",
"=",
"r_Z",
"=",
"r_P",
"=",
"0",
"result",
"=",
"(",
"(",
"r_O",
"&",
"1",
")",
"<<",
"(",
"16",
"+",
"data",
"[",
"'X86'",
"]",
"[",
"'CondBitOffsets'",
"]",
"[",
"'G_CC_SHIFT_O'",
"]",
")",
")",
"|",
"(",
"(",
"r_S",
"&",
"1",
")",
"<<",
"(",
"16",
"+",
"data",
"[",
"'X86'",
"]",
"[",
"'CondBitOffsets'",
"]",
"[",
"'G_CC_SHIFT_S'",
"]",
")",
")",
"|",
"(",
"(",
"r_Z",
"&",
"1",
")",
"<<",
"(",
"16",
"+",
"data",
"[",
"'X86'",
"]",
"[",
"'CondBitOffsets'",
"]",
"[",
"'G_CC_SHIFT_Z'",
"]",
")",
")",
"|",
"(",
"(",
"r_A",
"&",
"1",
")",
"<<",
"(",
"16",
"+",
"data",
"[",
"'X86'",
"]",
"[",
"'CondBitOffsets'",
"]",
"[",
"'G_CC_SHIFT_A'",
"]",
")",
")",
"|",
"(",
"(",
"r_C",
"&",
"1",
")",
"<<",
"(",
"16",
"+",
"data",
"[",
"'X86'",
"]",
"[",
"'CondBitOffsets'",
"]",
"[",
"'G_CC_SHIFT_C'",
"]",
")",
")",
"|",
"(",
"(",
"r_P",
"&",
"1",
")",
"<<",
"(",
"16",
"+",
"data",
"[",
"'X86'",
"]",
"[",
"'CondBitOffsets'",
"]",
"[",
"'G_CC_SHIFT_P'",
"]",
")",
")",
"|",
"(",
"(",
"r_AH",
"&",
"0xFF",
")",
"<<",
"8",
")",
"|",
"(",
"(",
"r_AL",
"&",
"0xFF",
")",
"<<",
"0",
")",
"return",
"result"
] |
https://github.com/angr/angr/blob/4b04d56ace135018083d36d9083805be8146688b/angr/engines/vex/claripy/ccall.py#L1109-L1190
|
|||
zhaoolee/StarsAndClown
|
b2d4039cad2f9232b691e5976f787b49a0a2c113
|
node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/msvs.py
|
python
|
_EscapeEnvironmentVariableExpansion
|
(s)
|
return s
|
Escapes % characters.
Escapes any % characters so that Windows-style environment variable
expansions will leave them alone.
See http://connect.microsoft.com/VisualStudio/feedback/details/106127/cl-d-name-text-containing-percentage-characters-doesnt-compile
to understand why we have to do this.
Args:
s: The string to be escaped.
Returns:
The escaped string.
|
Escapes % characters.
|
[
"Escapes",
"%",
"characters",
"."
] |
def _EscapeEnvironmentVariableExpansion(s):
"""Escapes % characters.
Escapes any % characters so that Windows-style environment variable
expansions will leave them alone.
See http://connect.microsoft.com/VisualStudio/feedback/details/106127/cl-d-name-text-containing-percentage-characters-doesnt-compile
to understand why we have to do this.
Args:
s: The string to be escaped.
Returns:
The escaped string.
"""
s = s.replace('%', '%%')
return s
|
[
"def",
"_EscapeEnvironmentVariableExpansion",
"(",
"s",
")",
":",
"s",
"=",
"s",
".",
"replace",
"(",
"'%'",
",",
"'%%'",
")",
"return",
"s"
] |
https://github.com/zhaoolee/StarsAndClown/blob/b2d4039cad2f9232b691e5976f787b49a0a2c113/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/msvs.py#L664-L679
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.