nwo
stringlengths 5
106
| sha
stringlengths 40
40
| path
stringlengths 4
174
| language
stringclasses 1
value | identifier
stringlengths 1
140
| parameters
stringlengths 0
87.7k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
426k
| docstring
stringlengths 0
64.3k
| docstring_summary
stringlengths 0
26.3k
| docstring_tokens
list | function
stringlengths 18
4.83M
| function_tokens
list | url
stringlengths 83
304
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
MediaBrowser/plugin.video.emby
|
71162fc7704656833d8b228dc9014f88742215b1
|
resources/lib/sync.py
|
python
|
Sync.process_library
|
(self, library_id)
|
Add a library by it's id. Create a node and a playlist whenever appropriate.
|
Add a library by it's id. Create a node and a playlist whenever appropriate.
|
[
"Add",
"a",
"library",
"by",
"it",
"s",
"id",
".",
"Create",
"a",
"node",
"and",
"a",
"playlist",
"whenever",
"appropriate",
"."
] |
def process_library(self, library_id):
''' Add a library by it's id. Create a node and a playlist whenever appropriate.
'''
media = {
'movies': self.movies,
'musicvideos': self.musicvideos,
'tvshows': self.tvshows,
'music': self.music
}
try:
if library_id.startswith('Boxsets:'):
if library_id.endswith('Refresh'):
self.refresh_boxsets()
else:
self.boxsets(library_id.split('Boxsets:')[1] if len(library_id) > len('Boxsets:') else None)
return
library = self.server['api'].get_item(library_id.replace('Mixed:', ""))
if library_id.startswith('Mixed:'):
for mixed in ('movies', 'tvshows'):
media[mixed](library)
self.sync['RestorePoint'] = {}
else:
if library['CollectionType']:
settings('enableMusic.bool', True)
media[library['CollectionType']](library)
except LibraryException as error:
if error.status in ('StopCalled', 'StopWriteCalled'):
save_sync(self.sync)
raise
except Exception as error:
if not 'Failed to validate path' in error:
dialog("ok", heading="{emby}", line1=_(33119))
LOG.error("full sync exited unexpectedly")
save_sync(self.sync)
raise
|
[
"def",
"process_library",
"(",
"self",
",",
"library_id",
")",
":",
"media",
"=",
"{",
"'movies'",
":",
"self",
".",
"movies",
",",
"'musicvideos'",
":",
"self",
".",
"musicvideos",
",",
"'tvshows'",
":",
"self",
".",
"tvshows",
",",
"'music'",
":",
"self",
".",
"music",
"}",
"try",
":",
"if",
"library_id",
".",
"startswith",
"(",
"'Boxsets:'",
")",
":",
"if",
"library_id",
".",
"endswith",
"(",
"'Refresh'",
")",
":",
"self",
".",
"refresh_boxsets",
"(",
")",
"else",
":",
"self",
".",
"boxsets",
"(",
"library_id",
".",
"split",
"(",
"'Boxsets:'",
")",
"[",
"1",
"]",
"if",
"len",
"(",
"library_id",
")",
">",
"len",
"(",
"'Boxsets:'",
")",
"else",
"None",
")",
"return",
"library",
"=",
"self",
".",
"server",
"[",
"'api'",
"]",
".",
"get_item",
"(",
"library_id",
".",
"replace",
"(",
"'Mixed:'",
",",
"\"\"",
")",
")",
"if",
"library_id",
".",
"startswith",
"(",
"'Mixed:'",
")",
":",
"for",
"mixed",
"in",
"(",
"'movies'",
",",
"'tvshows'",
")",
":",
"media",
"[",
"mixed",
"]",
"(",
"library",
")",
"self",
".",
"sync",
"[",
"'RestorePoint'",
"]",
"=",
"{",
"}",
"else",
":",
"if",
"library",
"[",
"'CollectionType'",
"]",
":",
"settings",
"(",
"'enableMusic.bool'",
",",
"True",
")",
"media",
"[",
"library",
"[",
"'CollectionType'",
"]",
"]",
"(",
"library",
")",
"except",
"LibraryException",
"as",
"error",
":",
"if",
"error",
".",
"status",
"in",
"(",
"'StopCalled'",
",",
"'StopWriteCalled'",
")",
":",
"save_sync",
"(",
"self",
".",
"sync",
")",
"raise",
"except",
"Exception",
"as",
"error",
":",
"if",
"not",
"'Failed to validate path'",
"in",
"error",
":",
"dialog",
"(",
"\"ok\"",
",",
"heading",
"=",
"\"{emby}\"",
",",
"line1",
"=",
"_",
"(",
"33119",
")",
")",
"LOG",
".",
"error",
"(",
"\"full sync exited unexpectedly\"",
")",
"save_sync",
"(",
"self",
".",
"sync",
")",
"raise"
] |
https://github.com/MediaBrowser/plugin.video.emby/blob/71162fc7704656833d8b228dc9014f88742215b1/resources/lib/sync.py#L230-L277
|
||
ironport/shrapnel
|
9496a64c46271b0c5cef0feb8f2cdf33cb752bb6
|
old/coro_process.py
|
python
|
capture
|
(command, tie_out_err=True, cwd=None, env=None, timeout=0, pgrp=0)
|
return status, ''.join(result)
|
Run a program in the background and capture its output.
:Parameters:
- `command`: The command to execute. If it is a string, it will be
parsed for command-line arguments. Otherwise it assumes it is a
sequence of arguments, with the first element being the command to
execute.
If the command does not contain a slash (/) it will search the PATH
environment for the executable.
- `tie_out_err`: If true, it will also capture output to stderr. If
False, stderr output will go to ``/dev/null``.
- `cwd`: Change the working directory to this path if specified before
executing the program.
- `env`: The environment to use. If None, the environment is not
changed. May be a dictionary or a list of 'NAME=VALUE' strings.
- `timeout`: If specified, will use a coro timeout to ensure that the
process returns within the specified length of time. If it does not,
it is forcefully killed (with SIGKILL) and `ProcessTimeout` is
raised.
- `pgrp`: Set to -1 to keep process group unchanged, 0 to create a new
job (default) and >0 to set process group to pgrp
:Return:
Returns a tuple ``(status, output)``. Status is a
`process.ExitStatus` instance. Output is a string.
:Exceptions:
- `OSError`: Generic system error.
- `ValueError`: The command value is invalid.
- `ProcessTimeout`: The process did not return within `timeout`
seconds.
|
Run a program in the background and capture its output.
|
[
"Run",
"a",
"program",
"in",
"the",
"background",
"and",
"capture",
"its",
"output",
"."
] |
def capture(command, tie_out_err=True, cwd=None, env=None, timeout=0, pgrp=0):
"""Run a program in the background and capture its output.
:Parameters:
- `command`: The command to execute. If it is a string, it will be
parsed for command-line arguments. Otherwise it assumes it is a
sequence of arguments, with the first element being the command to
execute.
If the command does not contain a slash (/) it will search the PATH
environment for the executable.
- `tie_out_err`: If true, it will also capture output to stderr. If
False, stderr output will go to ``/dev/null``.
- `cwd`: Change the working directory to this path if specified before
executing the program.
- `env`: The environment to use. If None, the environment is not
changed. May be a dictionary or a list of 'NAME=VALUE' strings.
- `timeout`: If specified, will use a coro timeout to ensure that the
process returns within the specified length of time. If it does not,
it is forcefully killed (with SIGKILL) and `ProcessTimeout` is
raised.
- `pgrp`: Set to -1 to keep process group unchanged, 0 to create a new
job (default) and >0 to set process group to pgrp
:Return:
Returns a tuple ``(status, output)``. Status is a
`process.ExitStatus` instance. Output is a string.
:Exceptions:
- `OSError`: Generic system error.
- `ValueError`: The command value is invalid.
- `ProcessTimeout`: The process did not return within `timeout`
seconds.
"""
if tie_out_err:
stderr = STDOUT
else:
stderr = DEV_NULL
p = spawn_job_bg(command, stdin=DEV_NULL, stdout=PIPE, stderr=stderr, cwd=cwd, env=env, pgrp=pgrp)
status = None
result = []
def do_read():
while True:
block = p.stdout.read(1024)
if block:
result.append(block)
else:
break
return p.wait()
try:
if timeout:
status = coro.with_timeout(timeout, do_read)
else:
status = do_read()
except BaseException as e:
try:
p.killpg(signal.SIGKILL)
except OSError as kill_exc:
if kill_exc.errno != errno.ESRCH:
raise
# Make sure we clean up the zombie.
coro.spawn(p.wait)
if isinstance(e, coro.TimeoutError):
raise ProcessTimeout(''.join(result), None)
else:
raise
return status, ''.join(result)
|
[
"def",
"capture",
"(",
"command",
",",
"tie_out_err",
"=",
"True",
",",
"cwd",
"=",
"None",
",",
"env",
"=",
"None",
",",
"timeout",
"=",
"0",
",",
"pgrp",
"=",
"0",
")",
":",
"if",
"tie_out_err",
":",
"stderr",
"=",
"STDOUT",
"else",
":",
"stderr",
"=",
"DEV_NULL",
"p",
"=",
"spawn_job_bg",
"(",
"command",
",",
"stdin",
"=",
"DEV_NULL",
",",
"stdout",
"=",
"PIPE",
",",
"stderr",
"=",
"stderr",
",",
"cwd",
"=",
"cwd",
",",
"env",
"=",
"env",
",",
"pgrp",
"=",
"pgrp",
")",
"status",
"=",
"None",
"result",
"=",
"[",
"]",
"def",
"do_read",
"(",
")",
":",
"while",
"True",
":",
"block",
"=",
"p",
".",
"stdout",
".",
"read",
"(",
"1024",
")",
"if",
"block",
":",
"result",
".",
"append",
"(",
"block",
")",
"else",
":",
"break",
"return",
"p",
".",
"wait",
"(",
")",
"try",
":",
"if",
"timeout",
":",
"status",
"=",
"coro",
".",
"with_timeout",
"(",
"timeout",
",",
"do_read",
")",
"else",
":",
"status",
"=",
"do_read",
"(",
")",
"except",
"BaseException",
"as",
"e",
":",
"try",
":",
"p",
".",
"killpg",
"(",
"signal",
".",
"SIGKILL",
")",
"except",
"OSError",
"as",
"kill_exc",
":",
"if",
"kill_exc",
".",
"errno",
"!=",
"errno",
".",
"ESRCH",
":",
"raise",
"# Make sure we clean up the zombie.",
"coro",
".",
"spawn",
"(",
"p",
".",
"wait",
")",
"if",
"isinstance",
"(",
"e",
",",
"coro",
".",
"TimeoutError",
")",
":",
"raise",
"ProcessTimeout",
"(",
"''",
".",
"join",
"(",
"result",
")",
",",
"None",
")",
"else",
":",
"raise",
"return",
"status",
",",
"''",
".",
"join",
"(",
"result",
")"
] |
https://github.com/ironport/shrapnel/blob/9496a64c46271b0c5cef0feb8f2cdf33cb752bb6/old/coro_process.py#L55-L124
|
|
IJDykeman/wangTiles
|
7c1ee2095ebdf7f72bce07d94c6484915d5cae8b
|
experimental_code/tiles_3d/venv_mac_py3/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.py
|
python
|
make_archive
|
(base_name, format, root_dir=None, base_dir=None, verbose=0,
dry_run=0, owner=None, group=None, logger=None)
|
return filename
|
Create an archive file (eg. zip or tar).
'base_name' is the name of the file to create, minus any format-specific
extension; 'format' is the archive format: one of "zip", "tar", "bztar"
or "gztar".
'root_dir' is a directory that will be the root directory of the
archive; ie. we typically chdir into 'root_dir' before creating the
archive. 'base_dir' is the directory where we start archiving from;
ie. 'base_dir' will be the common prefix of all files and
directories in the archive. 'root_dir' and 'base_dir' both default
to the current directory. Returns the name of the archive file.
'owner' and 'group' are used when creating a tar archive. By default,
uses the current owner and group.
|
Create an archive file (eg. zip or tar).
|
[
"Create",
"an",
"archive",
"file",
"(",
"eg",
".",
"zip",
"or",
"tar",
")",
"."
] |
def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0,
dry_run=0, owner=None, group=None, logger=None):
"""Create an archive file (eg. zip or tar).
'base_name' is the name of the file to create, minus any format-specific
extension; 'format' is the archive format: one of "zip", "tar", "bztar"
or "gztar".
'root_dir' is a directory that will be the root directory of the
archive; ie. we typically chdir into 'root_dir' before creating the
archive. 'base_dir' is the directory where we start archiving from;
ie. 'base_dir' will be the common prefix of all files and
directories in the archive. 'root_dir' and 'base_dir' both default
to the current directory. Returns the name of the archive file.
'owner' and 'group' are used when creating a tar archive. By default,
uses the current owner and group.
"""
save_cwd = os.getcwd()
if root_dir is not None:
if logger is not None:
logger.debug("changing into '%s'", root_dir)
base_name = os.path.abspath(base_name)
if not dry_run:
os.chdir(root_dir)
if base_dir is None:
base_dir = os.curdir
kwargs = {'dry_run': dry_run, 'logger': logger}
try:
format_info = _ARCHIVE_FORMATS[format]
except KeyError:
raise ValueError("unknown archive format '%s'" % format)
func = format_info[0]
for arg, val in format_info[1]:
kwargs[arg] = val
if format != 'zip':
kwargs['owner'] = owner
kwargs['group'] = group
try:
filename = func(base_name, base_dir, **kwargs)
finally:
if root_dir is not None:
if logger is not None:
logger.debug("changing back to '%s'", save_cwd)
os.chdir(save_cwd)
return filename
|
[
"def",
"make_archive",
"(",
"base_name",
",",
"format",
",",
"root_dir",
"=",
"None",
",",
"base_dir",
"=",
"None",
",",
"verbose",
"=",
"0",
",",
"dry_run",
"=",
"0",
",",
"owner",
"=",
"None",
",",
"group",
"=",
"None",
",",
"logger",
"=",
"None",
")",
":",
"save_cwd",
"=",
"os",
".",
"getcwd",
"(",
")",
"if",
"root_dir",
"is",
"not",
"None",
":",
"if",
"logger",
"is",
"not",
"None",
":",
"logger",
".",
"debug",
"(",
"\"changing into '%s'\"",
",",
"root_dir",
")",
"base_name",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"base_name",
")",
"if",
"not",
"dry_run",
":",
"os",
".",
"chdir",
"(",
"root_dir",
")",
"if",
"base_dir",
"is",
"None",
":",
"base_dir",
"=",
"os",
".",
"curdir",
"kwargs",
"=",
"{",
"'dry_run'",
":",
"dry_run",
",",
"'logger'",
":",
"logger",
"}",
"try",
":",
"format_info",
"=",
"_ARCHIVE_FORMATS",
"[",
"format",
"]",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"\"unknown archive format '%s'\"",
"%",
"format",
")",
"func",
"=",
"format_info",
"[",
"0",
"]",
"for",
"arg",
",",
"val",
"in",
"format_info",
"[",
"1",
"]",
":",
"kwargs",
"[",
"arg",
"]",
"=",
"val",
"if",
"format",
"!=",
"'zip'",
":",
"kwargs",
"[",
"'owner'",
"]",
"=",
"owner",
"kwargs",
"[",
"'group'",
"]",
"=",
"group",
"try",
":",
"filename",
"=",
"func",
"(",
"base_name",
",",
"base_dir",
",",
"*",
"*",
"kwargs",
")",
"finally",
":",
"if",
"root_dir",
"is",
"not",
"None",
":",
"if",
"logger",
"is",
"not",
"None",
":",
"logger",
".",
"debug",
"(",
"\"changing back to '%s'\"",
",",
"save_cwd",
")",
"os",
".",
"chdir",
"(",
"save_cwd",
")",
"return",
"filename"
] |
https://github.com/IJDykeman/wangTiles/blob/7c1ee2095ebdf7f72bce07d94c6484915d5cae8b/experimental_code/tiles_3d/venv_mac_py3/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.py#L544-L596
|
|
graalvm/mx
|
29c0debab406352df3af246be2f8973be5db69ae
|
mx.py
|
python
|
JavaProject.eclipse_settings_sources
|
(self)
|
return esdict
|
Gets a dictionary from the name of an Eclipse settings file to
the list of files providing its generated content, in overriding order
(i.e., settings from files later in the list override settings from
files earlier in the list).
A new dictionary is created each time this method is called so it's
safe for the caller to modify it.
|
Gets a dictionary from the name of an Eclipse settings file to
the list of files providing its generated content, in overriding order
(i.e., settings from files later in the list override settings from
files earlier in the list).
A new dictionary is created each time this method is called so it's
safe for the caller to modify it.
|
[
"Gets",
"a",
"dictionary",
"from",
"the",
"name",
"of",
"an",
"Eclipse",
"settings",
"file",
"to",
"the",
"list",
"of",
"files",
"providing",
"its",
"generated",
"content",
"in",
"overriding",
"order",
"(",
"i",
".",
"e",
".",
"settings",
"from",
"files",
"later",
"in",
"the",
"list",
"override",
"settings",
"from",
"files",
"earlier",
"in",
"the",
"list",
")",
".",
"A",
"new",
"dictionary",
"is",
"created",
"each",
"time",
"this",
"method",
"is",
"called",
"so",
"it",
"s",
"safe",
"for",
"the",
"caller",
"to",
"modify",
"it",
"."
] |
def eclipse_settings_sources(self):
"""
Gets a dictionary from the name of an Eclipse settings file to
the list of files providing its generated content, in overriding order
(i.e., settings from files later in the list override settings from
files earlier in the list).
A new dictionary is created each time this method is called so it's
safe for the caller to modify it.
"""
esdict = self.suite.eclipse_settings_sources()
# check for project overrides
projectSettingsDir = join(self.dir, 'eclipse-settings')
if exists(projectSettingsDir):
for name in os.listdir(projectSettingsDir):
esdict.setdefault(name, []).append(os.path.abspath(join(projectSettingsDir, name)))
if not self.annotation_processors():
esdict.pop("org.eclipse.jdt.apt.core.prefs", None)
return esdict
|
[
"def",
"eclipse_settings_sources",
"(",
"self",
")",
":",
"esdict",
"=",
"self",
".",
"suite",
".",
"eclipse_settings_sources",
"(",
")",
"# check for project overrides",
"projectSettingsDir",
"=",
"join",
"(",
"self",
".",
"dir",
",",
"'eclipse-settings'",
")",
"if",
"exists",
"(",
"projectSettingsDir",
")",
":",
"for",
"name",
"in",
"os",
".",
"listdir",
"(",
"projectSettingsDir",
")",
":",
"esdict",
".",
"setdefault",
"(",
"name",
",",
"[",
"]",
")",
".",
"append",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"join",
"(",
"projectSettingsDir",
",",
"name",
")",
")",
")",
"if",
"not",
"self",
".",
"annotation_processors",
"(",
")",
":",
"esdict",
".",
"pop",
"(",
"\"org.eclipse.jdt.apt.core.prefs\"",
",",
"None",
")",
"return",
"esdict"
] |
https://github.com/graalvm/mx/blob/29c0debab406352df3af246be2f8973be5db69ae/mx.py#L6903-L6923
|
|
jython/jython3
|
def4f8ec47cb7a9c799ea4c745f12badf92c5769
|
Lib/xml/sax/saxlib.py
|
python
|
DeclHandler.elementDecl
|
(self, elem_name, content_model)
|
Report an element type declaration.
Only the first declaration will be reported.
content_model is the string 'EMPTY', the string 'ANY' or the content
model structure represented as tuple (separator, tokens, modifier)
where separator is the separator in the token list (that is, '|' or
','), tokens is the list of tokens (element type names or tuples
representing parentheses) and modifier is the quantity modifier
('*', '?' or '+').
|
Report an element type declaration.
|
[
"Report",
"an",
"element",
"type",
"declaration",
"."
] |
def elementDecl(self, elem_name, content_model):
"""Report an element type declaration.
Only the first declaration will be reported.
content_model is the string 'EMPTY', the string 'ANY' or the content
model structure represented as tuple (separator, tokens, modifier)
where separator is the separator in the token list (that is, '|' or
','), tokens is the list of tokens (element type names or tuples
representing parentheses) and modifier is the quantity modifier
('*', '?' or '+')."""
|
[
"def",
"elementDecl",
"(",
"self",
",",
"elem_name",
",",
"content_model",
")",
":"
] |
https://github.com/jython/jython3/blob/def4f8ec47cb7a9c799ea4c745f12badf92c5769/Lib/xml/sax/saxlib.py#L176-L186
|
||
wwqgtxx/wwqLyParse
|
33136508e52821babd9294fdecffbdf02d73a6fc
|
wwqLyParse/lib/python-3.7.2-embed-amd64/Crypto/Cipher/Salsa20.py
|
python
|
Salsa20Cipher.decrypt
|
(self, ciphertext)
|
Decrypt a piece of data.
:param ciphertext: The data to decrypt, of any size.
:type ciphertext: bytes/bytearray/memoryview
:returns: the decrypted byte string, of equal length as the
ciphertext.
|
Decrypt a piece of data.
|
[
"Decrypt",
"a",
"piece",
"of",
"data",
"."
] |
def decrypt(self, ciphertext):
"""Decrypt a piece of data.
:param ciphertext: The data to decrypt, of any size.
:type ciphertext: bytes/bytearray/memoryview
:returns: the decrypted byte string, of equal length as the
ciphertext.
"""
try:
return self.encrypt(ciphertext)
except ValueError as e:
raise ValueError(str(e).replace("enc", "dec"))
|
[
"def",
"decrypt",
"(",
"self",
",",
"ciphertext",
")",
":",
"try",
":",
"return",
"self",
".",
"encrypt",
"(",
"ciphertext",
")",
"except",
"ValueError",
"as",
"e",
":",
"raise",
"ValueError",
"(",
"str",
"(",
"e",
")",
".",
"replace",
"(",
"\"enc\"",
",",
"\"dec\"",
")",
")"
] |
https://github.com/wwqgtxx/wwqLyParse/blob/33136508e52821babd9294fdecffbdf02d73a6fc/wwqLyParse/lib/python-3.7.2-embed-amd64/Crypto/Cipher/Salsa20.py#L102-L114
|
||
thinkle/gourmet
|
8af29c8ded24528030e5ae2ea3461f61c1e5a575
|
gourmet/plugins/import_export/pdf_plugin/print_plugin.py
|
python
|
record_args
|
(func)
|
return _
|
[] |
def record_args (func):
def _ (self, *args, **kwargs):
self.export_commands.append(
(func.__name__,args,kwargs)
)
return _
|
[
"def",
"record_args",
"(",
"func",
")",
":",
"def",
"_",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"export_commands",
".",
"append",
"(",
"(",
"func",
".",
"__name__",
",",
"args",
",",
"kwargs",
")",
")",
"return",
"_"
] |
https://github.com/thinkle/gourmet/blob/8af29c8ded24528030e5ae2ea3461f61c1e5a575/gourmet/plugins/import_export/pdf_plugin/print_plugin.py#L127-L133
|
|||
vulscanteam/vulscan
|
787397e267c4e6469522ee0abe55b3e98f968d4a
|
pocsuite/thirdparty/requests/models.py
|
python
|
RequestHooksMixin.register_hook
|
(self, event, hook)
|
Properly register a hook.
|
Properly register a hook.
|
[
"Properly",
"register",
"a",
"hook",
"."
] |
def register_hook(self, event, hook):
"""Properly register a hook."""
if event not in self.hooks:
raise ValueError('Unsupported event specified, with event name "%s"' % (event))
if isinstance(hook, collections.Callable):
self.hooks[event].append(hook)
elif hasattr(hook, '__iter__'):
self.hooks[event].extend(h for h in hook if isinstance(h, collections.Callable))
|
[
"def",
"register_hook",
"(",
"self",
",",
"event",
",",
"hook",
")",
":",
"if",
"event",
"not",
"in",
"self",
".",
"hooks",
":",
"raise",
"ValueError",
"(",
"'Unsupported event specified, with event name \"%s\"'",
"%",
"(",
"event",
")",
")",
"if",
"isinstance",
"(",
"hook",
",",
"collections",
".",
"Callable",
")",
":",
"self",
".",
"hooks",
"[",
"event",
"]",
".",
"append",
"(",
"hook",
")",
"elif",
"hasattr",
"(",
"hook",
",",
"'__iter__'",
")",
":",
"self",
".",
"hooks",
"[",
"event",
"]",
".",
"extend",
"(",
"h",
"for",
"h",
"in",
"hook",
"if",
"isinstance",
"(",
"h",
",",
"collections",
".",
"Callable",
")",
")"
] |
https://github.com/vulscanteam/vulscan/blob/787397e267c4e6469522ee0abe55b3e98f968d4a/pocsuite/thirdparty/requests/models.py#L163-L172
|
||
xmengli/H-DenseUNet
|
06cc436a43196310fe933d114a353839907cc176
|
Keras-2.0.8/keras/engine/topology.py
|
python
|
load_weights_from_hdf5_group
|
(f, layers)
|
Implements topological (order-based) weight loading.
# Arguments
f: A pointer to a HDF5 group.
layers: a list of target layers.
# Raises
ValueError: in case of mismatch between provided layers
and weights file.
|
Implements topological (order-based) weight loading.
|
[
"Implements",
"topological",
"(",
"order",
"-",
"based",
")",
"weight",
"loading",
"."
] |
def load_weights_from_hdf5_group(f, layers):
"""Implements topological (order-based) weight loading.
# Arguments
f: A pointer to a HDF5 group.
layers: a list of target layers.
# Raises
ValueError: in case of mismatch between provided layers
and weights file.
"""
if 'keras_version' in f.attrs:
original_keras_version = f.attrs['keras_version'].decode('utf8')
else:
original_keras_version = '1'
if 'backend' in f.attrs:
original_backend = f.attrs['backend'].decode('utf8')
else:
original_backend = None
filtered_layers = []
for layer in layers:
weights = layer.weights
if weights:
filtered_layers.append(layer)
layer_names = [n.decode('utf8') for n in f.attrs['layer_names']]
filtered_layer_names = []
for name in layer_names:
g = f[name]
weight_names = [n.decode('utf8') for n in g.attrs['weight_names']]
if weight_names:
filtered_layer_names.append(name)
layer_names = filtered_layer_names
if len(layer_names) != len(filtered_layers):
raise ValueError('You are trying to load a weight file '
'containing ' + str(len(layer_names)) +
' layers into a model with ' +
str(len(filtered_layers)) + ' layers.')
# We batch weight value assignments in a single backend call
# which provides a speedup in TensorFlow.
weight_value_tuples = []
for k, name in enumerate(layer_names):
g = f[name]
weight_names = [n.decode('utf8') for n in g.attrs['weight_names']]
weight_values = [g[weight_name] for weight_name in weight_names]
layer = filtered_layers[k]
symbolic_weights = layer.weights
weight_values = preprocess_weights_for_loading(layer,
weight_values,
original_keras_version,
original_backend)
if len(weight_values) != len(symbolic_weights):
raise ValueError('Layer #' + str(k) +
' (named "' + layer.name +
'" in the current model) was found to '
'correspond to layer ' + name +
' in the save file. '
'However the new layer ' + layer.name +
' expects ' + str(len(symbolic_weights)) +
' weights, but the saved weights have ' +
str(len(weight_values)) +
' elements.')
weight_value_tuples += zip(symbolic_weights, weight_values)
K.batch_set_value(weight_value_tuples)
|
[
"def",
"load_weights_from_hdf5_group",
"(",
"f",
",",
"layers",
")",
":",
"if",
"'keras_version'",
"in",
"f",
".",
"attrs",
":",
"original_keras_version",
"=",
"f",
".",
"attrs",
"[",
"'keras_version'",
"]",
".",
"decode",
"(",
"'utf8'",
")",
"else",
":",
"original_keras_version",
"=",
"'1'",
"if",
"'backend'",
"in",
"f",
".",
"attrs",
":",
"original_backend",
"=",
"f",
".",
"attrs",
"[",
"'backend'",
"]",
".",
"decode",
"(",
"'utf8'",
")",
"else",
":",
"original_backend",
"=",
"None",
"filtered_layers",
"=",
"[",
"]",
"for",
"layer",
"in",
"layers",
":",
"weights",
"=",
"layer",
".",
"weights",
"if",
"weights",
":",
"filtered_layers",
".",
"append",
"(",
"layer",
")",
"layer_names",
"=",
"[",
"n",
".",
"decode",
"(",
"'utf8'",
")",
"for",
"n",
"in",
"f",
".",
"attrs",
"[",
"'layer_names'",
"]",
"]",
"filtered_layer_names",
"=",
"[",
"]",
"for",
"name",
"in",
"layer_names",
":",
"g",
"=",
"f",
"[",
"name",
"]",
"weight_names",
"=",
"[",
"n",
".",
"decode",
"(",
"'utf8'",
")",
"for",
"n",
"in",
"g",
".",
"attrs",
"[",
"'weight_names'",
"]",
"]",
"if",
"weight_names",
":",
"filtered_layer_names",
".",
"append",
"(",
"name",
")",
"layer_names",
"=",
"filtered_layer_names",
"if",
"len",
"(",
"layer_names",
")",
"!=",
"len",
"(",
"filtered_layers",
")",
":",
"raise",
"ValueError",
"(",
"'You are trying to load a weight file '",
"'containing '",
"+",
"str",
"(",
"len",
"(",
"layer_names",
")",
")",
"+",
"' layers into a model with '",
"+",
"str",
"(",
"len",
"(",
"filtered_layers",
")",
")",
"+",
"' layers.'",
")",
"# We batch weight value assignments in a single backend call",
"# which provides a speedup in TensorFlow.",
"weight_value_tuples",
"=",
"[",
"]",
"for",
"k",
",",
"name",
"in",
"enumerate",
"(",
"layer_names",
")",
":",
"g",
"=",
"f",
"[",
"name",
"]",
"weight_names",
"=",
"[",
"n",
".",
"decode",
"(",
"'utf8'",
")",
"for",
"n",
"in",
"g",
".",
"attrs",
"[",
"'weight_names'",
"]",
"]",
"weight_values",
"=",
"[",
"g",
"[",
"weight_name",
"]",
"for",
"weight_name",
"in",
"weight_names",
"]",
"layer",
"=",
"filtered_layers",
"[",
"k",
"]",
"symbolic_weights",
"=",
"layer",
".",
"weights",
"weight_values",
"=",
"preprocess_weights_for_loading",
"(",
"layer",
",",
"weight_values",
",",
"original_keras_version",
",",
"original_backend",
")",
"if",
"len",
"(",
"weight_values",
")",
"!=",
"len",
"(",
"symbolic_weights",
")",
":",
"raise",
"ValueError",
"(",
"'Layer #'",
"+",
"str",
"(",
"k",
")",
"+",
"' (named \"'",
"+",
"layer",
".",
"name",
"+",
"'\" in the current model) was found to '",
"'correspond to layer '",
"+",
"name",
"+",
"' in the save file. '",
"'However the new layer '",
"+",
"layer",
".",
"name",
"+",
"' expects '",
"+",
"str",
"(",
"len",
"(",
"symbolic_weights",
")",
")",
"+",
"' weights, but the saved weights have '",
"+",
"str",
"(",
"len",
"(",
"weight_values",
")",
")",
"+",
"' elements.'",
")",
"weight_value_tuples",
"+=",
"zip",
"(",
"symbolic_weights",
",",
"weight_values",
")",
"K",
".",
"batch_set_value",
"(",
"weight_value_tuples",
")"
] |
https://github.com/xmengli/H-DenseUNet/blob/06cc436a43196310fe933d114a353839907cc176/Keras-2.0.8/keras/engine/topology.py#L3038-L3104
|
||
wolever/pip2pi
|
7dd84371221a38b9dc934e6ea386969356d4a5a7
|
libpip2pi/commands.py
|
python
|
Pip2PiOptionParser.add_index_options
|
(self)
|
[] |
def add_index_options(self):
self.add_option(
'-n', '--normalize-package-names', dest="normalize_package_names",
default=True, action="store_true",
help=dedent("""
Normalize package names in the simple index using whatever
scheme is supported by the most recent version of pip (default).
"""))
self.add_option(
'-N', '--no-normalize-package-names', dest="normalize_package_names",
action="store_false")
self.add_option(
'-a', '--aggressive-normalization', dest="aggressive_normalization",
default=False, action="store_true",
help=dedent("""
Aggressive create symlinks to many different package name
normalizations to support multiple pip versions (for a package
named "Spam_Eggs--cheese.shop", the following normalizations
will be used: "spam-eggs-cheese-shop" (PEP-503),
"spam-eggs-cheese.shop" (pip 6, pip 7),
"Spam_Eggs--cheese.shop" (pip < 6)).
"""))
self.add_option(
'-x', '--no-build-html', dest="build_html", action="store_false",
default=True, help=dedent("""
Don't build index files for each directory, you might want to
to disable this if your server dynamically builds index files
using something like mod_autoindex and you want to make older
package/module versions continue to be available.
"""))
self.add_option(
'-z', '--also-get-source', dest="get_source", action="store_true",
default=False, help=dedent("""
In addition to downloading wheels, eggs or any other package
format also download the source tar.gz files in case the
platform using this index does not support the wheel/egg/etc
"""))
self.add_option(
'-s', '--symlink', dest="use_symlink",
default=OS_HAS_SYMLINK, action="store_true",
help=dedent("""
Use symlinks in PACKAGE_DIR/simple/ rather than copying files.
Default: %default
"""))
self.add_option(
'-S', '--no-symlink', dest="use_symlink", action="store_false")
self.add_option(
'-v', '--verbose', dest="verbose", action="store_true")
|
[
"def",
"add_index_options",
"(",
"self",
")",
":",
"self",
".",
"add_option",
"(",
"'-n'",
",",
"'--normalize-package-names'",
",",
"dest",
"=",
"\"normalize_package_names\"",
",",
"default",
"=",
"True",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"dedent",
"(",
"\"\"\"\n Normalize package names in the simple index using whatever\n scheme is supported by the most recent version of pip (default).\n \"\"\"",
")",
")",
"self",
".",
"add_option",
"(",
"'-N'",
",",
"'--no-normalize-package-names'",
",",
"dest",
"=",
"\"normalize_package_names\"",
",",
"action",
"=",
"\"store_false\"",
")",
"self",
".",
"add_option",
"(",
"'-a'",
",",
"'--aggressive-normalization'",
",",
"dest",
"=",
"\"aggressive_normalization\"",
",",
"default",
"=",
"False",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"dedent",
"(",
"\"\"\"\n Aggressive create symlinks to many different package name\n normalizations to support multiple pip versions (for a package\n named \"Spam_Eggs--cheese.shop\", the following normalizations\n will be used: \"spam-eggs-cheese-shop\" (PEP-503),\n \"spam-eggs-cheese.shop\" (pip 6, pip 7),\n \"Spam_Eggs--cheese.shop\" (pip < 6)).\n \"\"\"",
")",
")",
"self",
".",
"add_option",
"(",
"'-x'",
",",
"'--no-build-html'",
",",
"dest",
"=",
"\"build_html\"",
",",
"action",
"=",
"\"store_false\"",
",",
"default",
"=",
"True",
",",
"help",
"=",
"dedent",
"(",
"\"\"\"\n Don't build index files for each directory, you might want to\n to disable this if your server dynamically builds index files\n using something like mod_autoindex and you want to make older\n package/module versions continue to be available.\n \"\"\"",
")",
")",
"self",
".",
"add_option",
"(",
"'-z'",
",",
"'--also-get-source'",
",",
"dest",
"=",
"\"get_source\"",
",",
"action",
"=",
"\"store_true\"",
",",
"default",
"=",
"False",
",",
"help",
"=",
"dedent",
"(",
"\"\"\"\n In addition to downloading wheels, eggs or any other package\n format also download the source tar.gz files in case the\n platform using this index does not support the wheel/egg/etc\n \"\"\"",
")",
")",
"self",
".",
"add_option",
"(",
"'-s'",
",",
"'--symlink'",
",",
"dest",
"=",
"\"use_symlink\"",
",",
"default",
"=",
"OS_HAS_SYMLINK",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"dedent",
"(",
"\"\"\"\n Use symlinks in PACKAGE_DIR/simple/ rather than copying files.\n Default: %default\n \"\"\"",
")",
")",
"self",
".",
"add_option",
"(",
"'-S'",
",",
"'--no-symlink'",
",",
"dest",
"=",
"\"use_symlink\"",
",",
"action",
"=",
"\"store_false\"",
")",
"self",
".",
"add_option",
"(",
"'-v'",
",",
"'--verbose'",
",",
"dest",
"=",
"\"verbose\"",
",",
"action",
"=",
"\"store_true\"",
")"
] |
https://github.com/wolever/pip2pi/blob/7dd84371221a38b9dc934e6ea386969356d4a5a7/libpip2pi/commands.py#L202-L249
|
||||
sphinx-doc/sphinx
|
e79681c76843c1339863b365747079b2d662d0c1
|
sphinx/application.py
|
python
|
Sphinx.add_event
|
(self, name: str)
|
Register an event called *name*.
This is needed to be able to emit it.
:param name: The name of the event
|
Register an event called *name*.
|
[
"Register",
"an",
"event",
"called",
"*",
"name",
"*",
"."
] |
def add_event(self, name: str) -> None:
"""Register an event called *name*.
This is needed to be able to emit it.
:param name: The name of the event
"""
logger.debug('[app] adding event: %r', name)
self.events.add(name)
|
[
"def",
"add_event",
"(",
"self",
",",
"name",
":",
"str",
")",
"->",
"None",
":",
"logger",
".",
"debug",
"(",
"'[app] adding event: %r'",
",",
"name",
")",
"self",
".",
"events",
".",
"add",
"(",
"name",
")"
] |
https://github.com/sphinx-doc/sphinx/blob/e79681c76843c1339863b365747079b2d662d0c1/sphinx/application.py#L521-L529
|
||
zachwill/flask-engine
|
7c8ad4bfe36382a8c9286d873ec7b785715832a4
|
libs/werkzeug/contrib/sessions.py
|
python
|
FilesystemSessionStore.list
|
(self)
|
return result
|
Lists all sessions in the store.
.. versionadded:: 0.6
|
Lists all sessions in the store.
|
[
"Lists",
"all",
"sessions",
"in",
"the",
"store",
"."
] |
def list(self):
"""Lists all sessions in the store.
.. versionadded:: 0.6
"""
before, after = self.filename_template.split('%s', 1)
filename_re = re.compile(r'%s(.{5,})%s$' % (re.escape(before),
re.escape(after)))
result = []
for filename in os.listdir(self.path):
#: this is a session that is still being saved.
if filename.endswith(_fs_transaction_suffix):
continue
match = filename_re.match(filename)
if match is not None:
result.append(match.group(1))
return result
|
[
"def",
"list",
"(",
"self",
")",
":",
"before",
",",
"after",
"=",
"self",
".",
"filename_template",
".",
"split",
"(",
"'%s'",
",",
"1",
")",
"filename_re",
"=",
"re",
".",
"compile",
"(",
"r'%s(.{5,})%s$'",
"%",
"(",
"re",
".",
"escape",
"(",
"before",
")",
",",
"re",
".",
"escape",
"(",
"after",
")",
")",
")",
"result",
"=",
"[",
"]",
"for",
"filename",
"in",
"os",
".",
"listdir",
"(",
"self",
".",
"path",
")",
":",
"#: this is a session that is still being saved.",
"if",
"filename",
".",
"endswith",
"(",
"_fs_transaction_suffix",
")",
":",
"continue",
"match",
"=",
"filename_re",
".",
"match",
"(",
"filename",
")",
"if",
"match",
"is",
"not",
"None",
":",
"result",
".",
"append",
"(",
"match",
".",
"group",
"(",
"1",
")",
")",
"return",
"result"
] |
https://github.com/zachwill/flask-engine/blob/7c8ad4bfe36382a8c9286d873ec7b785715832a4/libs/werkzeug/contrib/sessions.py#L273-L289
|
|
postgres/pgadmin4
|
374c5e952fa594d749fadf1f88076c1cba8c5f64
|
web/pgadmin/tools/debugger/__init__.py
|
python
|
set_clear_breakpoint
|
(trans_id, line_no, set_type)
|
return make_json_response(
data={'status': status, 'result': result}
)
|
set_clear_breakpoint(trans_id, line_no, set_type)
This method is responsible to set and clean the breakpoint
Parameters:
trans_id
- Transaction ID
line_no
- Line number to set
set_type
- 0 - clear the breakpoint, 1 - set the breakpoint
|
set_clear_breakpoint(trans_id, line_no, set_type)
|
[
"set_clear_breakpoint",
"(",
"trans_id",
"line_no",
"set_type",
")"
] |
def set_clear_breakpoint(trans_id, line_no, set_type):
"""
set_clear_breakpoint(trans_id, line_no, set_type)
This method is responsible to set and clean the breakpoint
Parameters:
trans_id
- Transaction ID
line_no
- Line number to set
set_type
- 0 - clear the breakpoint, 1 - set the breakpoint
"""
de_inst = DebuggerInstance(trans_id)
if de_inst.debugger_data is None:
return make_json_response(
data={
'status': False,
'result': SERVER_CONNECTION_CLOSED
}
)
manager = get_driver(PG_DEFAULT_DRIVER).connection_manager(
de_inst.debugger_data['server_id'])
conn = manager.connection(
did=de_inst.debugger_data['database_id'],
conn_id=de_inst.debugger_data['exe_conn_id'])
# find the debugger version and execute the query accordingly
dbg_version = de_inst.debugger_data['debugger_version']
if dbg_version <= 2:
template_path = DEBUGGER_SQL_V1_PATH
else:
template_path = DEBUGGER_SQL_V3_PATH
query_type = ''
# We need to find out function OID before sending the foid to set the
# breakpoint because it may possible that debugging function has multi
# level function for debugging so we need to save the debug level to
# session variable and pass tha appropriate foid to set the breakpoint.
sql_ = render_template(
"/".join([template_path, "get_stack_info.sql"]),
session_id=de_inst.debugger_data['session_id']
)
status, res_stack = execute_dict_search_path(
conn, sql_, de_inst.debugger_data['search_path'])
if not status:
return internal_server_error(errormsg=res_stack)
# For multilevel function debugging, we need to fetch current selected
# frame's function oid for setting the breakpoint. For single function
# the frame id will be 0.
foid = res_stack['rows'][de_inst.debugger_data['frame_id']]['func']
# Check the result of the stack before setting the breakpoint
if conn.connected():
if set_type == 1:
query_type = 'set_breakpoint'
else:
query_type = 'clear_breakpoint'
sql = render_template(
"/".join([template_path, query_type + ".sql"]),
session_id=de_inst.debugger_data['session_id'],
foid=foid, line_number=line_no
)
status, result = execute_dict_search_path(
conn, sql, de_inst.debugger_data['search_path'])
result = result['rows']
if not status:
return internal_server_error(errormsg=result)
else:
status = False
result = SERVER_CONNECTION_CLOSED
return make_json_response(
data={'status': status, 'result': result}
)
|
[
"def",
"set_clear_breakpoint",
"(",
"trans_id",
",",
"line_no",
",",
"set_type",
")",
":",
"de_inst",
"=",
"DebuggerInstance",
"(",
"trans_id",
")",
"if",
"de_inst",
".",
"debugger_data",
"is",
"None",
":",
"return",
"make_json_response",
"(",
"data",
"=",
"{",
"'status'",
":",
"False",
",",
"'result'",
":",
"SERVER_CONNECTION_CLOSED",
"}",
")",
"manager",
"=",
"get_driver",
"(",
"PG_DEFAULT_DRIVER",
")",
".",
"connection_manager",
"(",
"de_inst",
".",
"debugger_data",
"[",
"'server_id'",
"]",
")",
"conn",
"=",
"manager",
".",
"connection",
"(",
"did",
"=",
"de_inst",
".",
"debugger_data",
"[",
"'database_id'",
"]",
",",
"conn_id",
"=",
"de_inst",
".",
"debugger_data",
"[",
"'exe_conn_id'",
"]",
")",
"# find the debugger version and execute the query accordingly",
"dbg_version",
"=",
"de_inst",
".",
"debugger_data",
"[",
"'debugger_version'",
"]",
"if",
"dbg_version",
"<=",
"2",
":",
"template_path",
"=",
"DEBUGGER_SQL_V1_PATH",
"else",
":",
"template_path",
"=",
"DEBUGGER_SQL_V3_PATH",
"query_type",
"=",
"''",
"# We need to find out function OID before sending the foid to set the",
"# breakpoint because it may possible that debugging function has multi",
"# level function for debugging so we need to save the debug level to",
"# session variable and pass tha appropriate foid to set the breakpoint.",
"sql_",
"=",
"render_template",
"(",
"\"/\"",
".",
"join",
"(",
"[",
"template_path",
",",
"\"get_stack_info.sql\"",
"]",
")",
",",
"session_id",
"=",
"de_inst",
".",
"debugger_data",
"[",
"'session_id'",
"]",
")",
"status",
",",
"res_stack",
"=",
"execute_dict_search_path",
"(",
"conn",
",",
"sql_",
",",
"de_inst",
".",
"debugger_data",
"[",
"'search_path'",
"]",
")",
"if",
"not",
"status",
":",
"return",
"internal_server_error",
"(",
"errormsg",
"=",
"res_stack",
")",
"# For multilevel function debugging, we need to fetch current selected",
"# frame's function oid for setting the breakpoint. For single function",
"# the frame id will be 0.",
"foid",
"=",
"res_stack",
"[",
"'rows'",
"]",
"[",
"de_inst",
".",
"debugger_data",
"[",
"'frame_id'",
"]",
"]",
"[",
"'func'",
"]",
"# Check the result of the stack before setting the breakpoint",
"if",
"conn",
".",
"connected",
"(",
")",
":",
"if",
"set_type",
"==",
"1",
":",
"query_type",
"=",
"'set_breakpoint'",
"else",
":",
"query_type",
"=",
"'clear_breakpoint'",
"sql",
"=",
"render_template",
"(",
"\"/\"",
".",
"join",
"(",
"[",
"template_path",
",",
"query_type",
"+",
"\".sql\"",
"]",
")",
",",
"session_id",
"=",
"de_inst",
".",
"debugger_data",
"[",
"'session_id'",
"]",
",",
"foid",
"=",
"foid",
",",
"line_number",
"=",
"line_no",
")",
"status",
",",
"result",
"=",
"execute_dict_search_path",
"(",
"conn",
",",
"sql",
",",
"de_inst",
".",
"debugger_data",
"[",
"'search_path'",
"]",
")",
"result",
"=",
"result",
"[",
"'rows'",
"]",
"if",
"not",
"status",
":",
"return",
"internal_server_error",
"(",
"errormsg",
"=",
"result",
")",
"else",
":",
"status",
"=",
"False",
"result",
"=",
"SERVER_CONNECTION_CLOSED",
"return",
"make_json_response",
"(",
"data",
"=",
"{",
"'status'",
":",
"status",
",",
"'result'",
":",
"result",
"}",
")"
] |
https://github.com/postgres/pgadmin4/blob/374c5e952fa594d749fadf1f88076c1cba8c5f64/web/pgadmin/tools/debugger/__init__.py#L1443-L1525
|
|
google/rekall
|
55d1925f2df9759a989b35271b4fa48fc54a1c86
|
rekall-agent/rekall_agent/locations/cloud.py
|
python
|
GCSOAuth2BasedLocation.list_files
|
(self, completion_routine=None, paging=100,
max_results=100, **kwargs)
|
A generator of Location object below this one.
|
A generator of Location object below this one.
|
[
"A",
"generator",
"of",
"Location",
"object",
"below",
"this",
"one",
"."
] |
def list_files(self, completion_routine=None, paging=100,
max_results=100, **kwargs):
"""A generator of Location object below this one."""
_, params, headers, _ = self._get_parameters(**kwargs)
url_endpoint = ("https://www.googleapis.com/storage/v1/b/%s/o" %
self.bucket)
params["prefix"] = utils.join_path(self.path)
params["maxResults"] = paging
count = 0
while count < max_results:
resp = self.get_requests_session().get(
url_endpoint, params=params, headers=headers)
if not resp.ok:
self._report_error(completion_routine, resp)
return
data = json.loads(resp.text)
items = data.get("items", [])
for item in items:
sublocation = self.copy()
sublocation.path = item["name"]
sublocation.generation = item["generation"]
count += 1
yield location.LocationStat.from_keywords(
session=self._session,
location=sublocation,
size=item["size"],
generation=item["generation"],
created=arrow.get(item["timeCreated"]).timestamp,
updated=arrow.get(item["updated"]).timestamp)
next_page_token = data.get("nextPageToken")
if not next_page_token or not items:
break
params["pageToken"] = next_page_token
|
[
"def",
"list_files",
"(",
"self",
",",
"completion_routine",
"=",
"None",
",",
"paging",
"=",
"100",
",",
"max_results",
"=",
"100",
",",
"*",
"*",
"kwargs",
")",
":",
"_",
",",
"params",
",",
"headers",
",",
"_",
"=",
"self",
".",
"_get_parameters",
"(",
"*",
"*",
"kwargs",
")",
"url_endpoint",
"=",
"(",
"\"https://www.googleapis.com/storage/v1/b/%s/o\"",
"%",
"self",
".",
"bucket",
")",
"params",
"[",
"\"prefix\"",
"]",
"=",
"utils",
".",
"join_path",
"(",
"self",
".",
"path",
")",
"params",
"[",
"\"maxResults\"",
"]",
"=",
"paging",
"count",
"=",
"0",
"while",
"count",
"<",
"max_results",
":",
"resp",
"=",
"self",
".",
"get_requests_session",
"(",
")",
".",
"get",
"(",
"url_endpoint",
",",
"params",
"=",
"params",
",",
"headers",
"=",
"headers",
")",
"if",
"not",
"resp",
".",
"ok",
":",
"self",
".",
"_report_error",
"(",
"completion_routine",
",",
"resp",
")",
"return",
"data",
"=",
"json",
".",
"loads",
"(",
"resp",
".",
"text",
")",
"items",
"=",
"data",
".",
"get",
"(",
"\"items\"",
",",
"[",
"]",
")",
"for",
"item",
"in",
"items",
":",
"sublocation",
"=",
"self",
".",
"copy",
"(",
")",
"sublocation",
".",
"path",
"=",
"item",
"[",
"\"name\"",
"]",
"sublocation",
".",
"generation",
"=",
"item",
"[",
"\"generation\"",
"]",
"count",
"+=",
"1",
"yield",
"location",
".",
"LocationStat",
".",
"from_keywords",
"(",
"session",
"=",
"self",
".",
"_session",
",",
"location",
"=",
"sublocation",
",",
"size",
"=",
"item",
"[",
"\"size\"",
"]",
",",
"generation",
"=",
"item",
"[",
"\"generation\"",
"]",
",",
"created",
"=",
"arrow",
".",
"get",
"(",
"item",
"[",
"\"timeCreated\"",
"]",
")",
".",
"timestamp",
",",
"updated",
"=",
"arrow",
".",
"get",
"(",
"item",
"[",
"\"updated\"",
"]",
")",
".",
"timestamp",
")",
"next_page_token",
"=",
"data",
".",
"get",
"(",
"\"nextPageToken\"",
")",
"if",
"not",
"next_page_token",
"or",
"not",
"items",
":",
"break",
"params",
"[",
"\"pageToken\"",
"]",
"=",
"next_page_token"
] |
https://github.com/google/rekall/blob/55d1925f2df9759a989b35271b4fa48fc54a1c86/rekall-agent/rekall_agent/locations/cloud.py#L632-L670
|
||
visionml/pytracking
|
3e6a8980db7a2275252abcc398ed0c2494f0ceab
|
ltr/data/loader.py
|
python
|
ltr_collate
|
(batch)
|
Puts each data field into a tensor with outer dimension batch size
|
Puts each data field into a tensor with outer dimension batch size
|
[
"Puts",
"each",
"data",
"field",
"into",
"a",
"tensor",
"with",
"outer",
"dimension",
"batch",
"size"
] |
def ltr_collate(batch):
"""Puts each data field into a tensor with outer dimension batch size"""
error_msg = "batch must contain tensors, numbers, dicts or lists; found {}"
elem_type = type(batch[0])
if isinstance(batch[0], torch.Tensor):
out = None
if _check_use_shared_memory():
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum([x.numel() for x in batch])
storage = batch[0].storage()._new_shared(numel)
out = batch[0].new(storage)
return torch.stack(batch, 0, out=out)
# if batch[0].dim() < 4:
# return torch.stack(batch, 0, out=out)
# return torch.cat(batch, 0, out=out)
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
elem = batch[0]
if elem_type.__name__ == 'ndarray':
# array of string classes and object
if torch.utils.data.dataloader.re.search('[SaUO]', elem.dtype.str) is not None:
raise TypeError(error_msg.format(elem.dtype))
return torch.stack([torch.from_numpy(b) for b in batch], 0)
if elem.shape == (): # scalars
py_type = float if elem.dtype.name.startswith('float') else int
return torch.utils.data.dataloader.numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
elif isinstance(batch[0], int_classes):
return torch.LongTensor(batch)
elif isinstance(batch[0], float):
return torch.DoubleTensor(batch)
elif isinstance(batch[0], string_classes):
return batch
elif isinstance(batch[0], TensorDict):
return TensorDict({key: ltr_collate([d[key] for d in batch]) for key in batch[0]})
elif isinstance(batch[0], collections.Mapping):
return {key: ltr_collate([d[key] for d in batch]) for key in batch[0]}
elif isinstance(batch[0], TensorList):
transposed = zip(*batch)
return TensorList([ltr_collate(samples) for samples in transposed])
elif isinstance(batch[0], collections.Sequence):
transposed = zip(*batch)
return [ltr_collate(samples) for samples in transposed]
elif batch[0] is None:
return batch
raise TypeError((error_msg.format(type(batch[0]))))
|
[
"def",
"ltr_collate",
"(",
"batch",
")",
":",
"error_msg",
"=",
"\"batch must contain tensors, numbers, dicts or lists; found {}\"",
"elem_type",
"=",
"type",
"(",
"batch",
"[",
"0",
"]",
")",
"if",
"isinstance",
"(",
"batch",
"[",
"0",
"]",
",",
"torch",
".",
"Tensor",
")",
":",
"out",
"=",
"None",
"if",
"_check_use_shared_memory",
"(",
")",
":",
"# If we're in a background process, concatenate directly into a",
"# shared memory tensor to avoid an extra copy",
"numel",
"=",
"sum",
"(",
"[",
"x",
".",
"numel",
"(",
")",
"for",
"x",
"in",
"batch",
"]",
")",
"storage",
"=",
"batch",
"[",
"0",
"]",
".",
"storage",
"(",
")",
".",
"_new_shared",
"(",
"numel",
")",
"out",
"=",
"batch",
"[",
"0",
"]",
".",
"new",
"(",
"storage",
")",
"return",
"torch",
".",
"stack",
"(",
"batch",
",",
"0",
",",
"out",
"=",
"out",
")",
"# if batch[0].dim() < 4:",
"# return torch.stack(batch, 0, out=out)",
"# return torch.cat(batch, 0, out=out)",
"elif",
"elem_type",
".",
"__module__",
"==",
"'numpy'",
"and",
"elem_type",
".",
"__name__",
"!=",
"'str_'",
"and",
"elem_type",
".",
"__name__",
"!=",
"'string_'",
":",
"elem",
"=",
"batch",
"[",
"0",
"]",
"if",
"elem_type",
".",
"__name__",
"==",
"'ndarray'",
":",
"# array of string classes and object",
"if",
"torch",
".",
"utils",
".",
"data",
".",
"dataloader",
".",
"re",
".",
"search",
"(",
"'[SaUO]'",
",",
"elem",
".",
"dtype",
".",
"str",
")",
"is",
"not",
"None",
":",
"raise",
"TypeError",
"(",
"error_msg",
".",
"format",
"(",
"elem",
".",
"dtype",
")",
")",
"return",
"torch",
".",
"stack",
"(",
"[",
"torch",
".",
"from_numpy",
"(",
"b",
")",
"for",
"b",
"in",
"batch",
"]",
",",
"0",
")",
"if",
"elem",
".",
"shape",
"==",
"(",
")",
":",
"# scalars",
"py_type",
"=",
"float",
"if",
"elem",
".",
"dtype",
".",
"name",
".",
"startswith",
"(",
"'float'",
")",
"else",
"int",
"return",
"torch",
".",
"utils",
".",
"data",
".",
"dataloader",
".",
"numpy_type_map",
"[",
"elem",
".",
"dtype",
".",
"name",
"]",
"(",
"list",
"(",
"map",
"(",
"py_type",
",",
"batch",
")",
")",
")",
"elif",
"isinstance",
"(",
"batch",
"[",
"0",
"]",
",",
"int_classes",
")",
":",
"return",
"torch",
".",
"LongTensor",
"(",
"batch",
")",
"elif",
"isinstance",
"(",
"batch",
"[",
"0",
"]",
",",
"float",
")",
":",
"return",
"torch",
".",
"DoubleTensor",
"(",
"batch",
")",
"elif",
"isinstance",
"(",
"batch",
"[",
"0",
"]",
",",
"string_classes",
")",
":",
"return",
"batch",
"elif",
"isinstance",
"(",
"batch",
"[",
"0",
"]",
",",
"TensorDict",
")",
":",
"return",
"TensorDict",
"(",
"{",
"key",
":",
"ltr_collate",
"(",
"[",
"d",
"[",
"key",
"]",
"for",
"d",
"in",
"batch",
"]",
")",
"for",
"key",
"in",
"batch",
"[",
"0",
"]",
"}",
")",
"elif",
"isinstance",
"(",
"batch",
"[",
"0",
"]",
",",
"collections",
".",
"Mapping",
")",
":",
"return",
"{",
"key",
":",
"ltr_collate",
"(",
"[",
"d",
"[",
"key",
"]",
"for",
"d",
"in",
"batch",
"]",
")",
"for",
"key",
"in",
"batch",
"[",
"0",
"]",
"}",
"elif",
"isinstance",
"(",
"batch",
"[",
"0",
"]",
",",
"TensorList",
")",
":",
"transposed",
"=",
"zip",
"(",
"*",
"batch",
")",
"return",
"TensorList",
"(",
"[",
"ltr_collate",
"(",
"samples",
")",
"for",
"samples",
"in",
"transposed",
"]",
")",
"elif",
"isinstance",
"(",
"batch",
"[",
"0",
"]",
",",
"collections",
".",
"Sequence",
")",
":",
"transposed",
"=",
"zip",
"(",
"*",
"batch",
")",
"return",
"[",
"ltr_collate",
"(",
"samples",
")",
"for",
"samples",
"in",
"transposed",
"]",
"elif",
"batch",
"[",
"0",
"]",
"is",
"None",
":",
"return",
"batch",
"raise",
"TypeError",
"(",
"(",
"error_msg",
".",
"format",
"(",
"type",
"(",
"batch",
"[",
"0",
"]",
")",
")",
")",
")"
] |
https://github.com/visionml/pytracking/blob/3e6a8980db7a2275252abcc398ed0c2494f0ceab/ltr/data/loader.py#L18-L66
|
||
kvazis/homeassistant
|
aca227a780f806d861342e3611025a52a3bb4366
|
custom_components/localtuya/pytuya/__init__.py
|
python
|
TuyaLoggingAdapter.process
|
(self, msg, kwargs)
|
return f"[{dev_id[0:3]}...{dev_id[-3:]}] {msg}", kwargs
|
Process log point and return output.
|
Process log point and return output.
|
[
"Process",
"log",
"point",
"and",
"return",
"output",
"."
] |
def process(self, msg, kwargs):
"""Process log point and return output."""
dev_id = self.extra["device_id"]
return f"[{dev_id[0:3]}...{dev_id[-3:]}] {msg}", kwargs
|
[
"def",
"process",
"(",
"self",
",",
"msg",
",",
"kwargs",
")",
":",
"dev_id",
"=",
"self",
".",
"extra",
"[",
"\"device_id\"",
"]",
"return",
"f\"[{dev_id[0:3]}...{dev_id[-3:]}] {msg}\"",
",",
"kwargs"
] |
https://github.com/kvazis/homeassistant/blob/aca227a780f806d861342e3611025a52a3bb4366/custom_components/localtuya/pytuya/__init__.py#L105-L108
|
|
oilshell/oil
|
94388e7d44a9ad879b12615f6203b38596b5a2d3
|
tools/find/ast.py
|
python
|
_path
|
(glob)
|
return asdl.expr.PathTest(
asdl.pathAccessor_e.FullPath,
asdl.predicate.GlobMatch(glob)
)
|
[] |
def _path(glob):
return asdl.expr.PathTest(
asdl.pathAccessor_e.FullPath,
asdl.predicate.GlobMatch(glob)
)
|
[
"def",
"_path",
"(",
"glob",
")",
":",
"return",
"asdl",
".",
"expr",
".",
"PathTest",
"(",
"asdl",
".",
"pathAccessor_e",
".",
"FullPath",
",",
"asdl",
".",
"predicate",
".",
"GlobMatch",
"(",
"glob",
")",
")"
] |
https://github.com/oilshell/oil/blob/94388e7d44a9ad879b12615f6203b38596b5a2d3/tools/find/ast.py#L36-L40
|
|||
chapmanb/cloudbiolinux
|
f6d0414bdba495944aaccf19ae55ba50b13da892
|
cloudbio/custom/bio_proteomics.py
|
python
|
install_openms
|
(env)
|
See comments above, working on getting this to compile from source. In
the meantime installing from deb will have to do.
|
See comments above, working on getting this to compile from source. In
the meantime installing from deb will have to do.
|
[
"See",
"comments",
"above",
"working",
"on",
"getting",
"this",
"to",
"compile",
"from",
"source",
".",
"In",
"the",
"meantime",
"installing",
"from",
"deb",
"will",
"have",
"to",
"do",
"."
] |
def install_openms(env):
"""
See comments above, working on getting this to compile from source. In
the meantime installing from deb will have to do.
"""
default_version = "1.10.0"
version = env.get("tool_version", default_version)
dot_version = version[0:version.rindex('.')]
url = 'http://downloads.sourceforge.net/project/open-ms/OpenMS/OpenMS-%s/OpenMS-%s.tar.gz' % (dot_version, version)
def _make(env):
with cd("contrib"):
env.safe_run("cmake -DINSTALL_PREFIX=%s ." % env.get('system_install'))
env.safe_run("make")
env.safe_run("cmake -DINSTALL_PREFIX=%s ." % env.get('system_install'))
env.safe_run("make")
env.safe_sudo("make install")
_get_install(url, env, _make)
|
[
"def",
"install_openms",
"(",
"env",
")",
":",
"default_version",
"=",
"\"1.10.0\"",
"version",
"=",
"env",
".",
"get",
"(",
"\"tool_version\"",
",",
"default_version",
")",
"dot_version",
"=",
"version",
"[",
"0",
":",
"version",
".",
"rindex",
"(",
"'.'",
")",
"]",
"url",
"=",
"'http://downloads.sourceforge.net/project/open-ms/OpenMS/OpenMS-%s/OpenMS-%s.tar.gz'",
"%",
"(",
"dot_version",
",",
"version",
")",
"def",
"_make",
"(",
"env",
")",
":",
"with",
"cd",
"(",
"\"contrib\"",
")",
":",
"env",
".",
"safe_run",
"(",
"\"cmake -DINSTALL_PREFIX=%s .\"",
"%",
"env",
".",
"get",
"(",
"'system_install'",
")",
")",
"env",
".",
"safe_run",
"(",
"\"make\"",
")",
"env",
".",
"safe_run",
"(",
"\"cmake -DINSTALL_PREFIX=%s .\"",
"%",
"env",
".",
"get",
"(",
"'system_install'",
")",
")",
"env",
".",
"safe_run",
"(",
"\"make\"",
")",
"env",
".",
"safe_sudo",
"(",
"\"make install\"",
")",
"_get_install",
"(",
"url",
",",
"env",
",",
"_make",
")"
] |
https://github.com/chapmanb/cloudbiolinux/blob/f6d0414bdba495944aaccf19ae55ba50b13da892/cloudbio/custom/bio_proteomics.py#L75-L92
|
||
PowerScript/KatanaFramework
|
0f6ad90a88de865d58ec26941cb4460501e75496
|
lib/setuptools/setuptools/archive_util.py
|
python
|
unpack_tarfile
|
(filename, extract_dir, progress_filter=default_filter)
|
Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir`
Raises ``UnrecognizedFormat`` if `filename` is not a tarfile (as determined
by ``tarfile.open()``). See ``unpack_archive()`` for an explanation
of the `progress_filter` argument.
|
Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir`
|
[
"Unpack",
"tar",
"/",
"tar",
".",
"gz",
"/",
"tar",
".",
"bz2",
"filename",
"to",
"extract_dir"
] |
def unpack_tarfile(filename, extract_dir, progress_filter=default_filter):
"""Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir`
Raises ``UnrecognizedFormat`` if `filename` is not a tarfile (as determined
by ``tarfile.open()``). See ``unpack_archive()`` for an explanation
of the `progress_filter` argument.
"""
try:
tarobj = tarfile.open(filename)
except tarfile.TarError:
raise UnrecognizedFormat(
"%s is not a compressed or uncompressed tar file" % (filename,)
)
with contextlib.closing(tarobj):
# don't do any chowning!
tarobj.chown = lambda *args: None
for member in tarobj:
name = member.name
# don't extract absolute paths or ones with .. in them
if not name.startswith('/') and '..' not in name.split('/'):
prelim_dst = os.path.join(extract_dir, *name.split('/'))
# resolve any links and to extract the link targets as normal
# files
while member is not None and (member.islnk() or member.issym()):
linkpath = member.linkname
if member.issym():
base = posixpath.dirname(member.name)
linkpath = posixpath.join(base, linkpath)
linkpath = posixpath.normpath(linkpath)
member = tarobj._getmember(linkpath)
if member is not None and (member.isfile() or member.isdir()):
final_dst = progress_filter(name, prelim_dst)
if final_dst:
if final_dst.endswith(os.sep):
final_dst = final_dst[:-1]
try:
# XXX Ugh
tarobj._extract_member(member, final_dst)
except tarfile.ExtractError:
# chown/chmod/mkfifo/mknode/makedev failed
pass
return True
|
[
"def",
"unpack_tarfile",
"(",
"filename",
",",
"extract_dir",
",",
"progress_filter",
"=",
"default_filter",
")",
":",
"try",
":",
"tarobj",
"=",
"tarfile",
".",
"open",
"(",
"filename",
")",
"except",
"tarfile",
".",
"TarError",
":",
"raise",
"UnrecognizedFormat",
"(",
"\"%s is not a compressed or uncompressed tar file\"",
"%",
"(",
"filename",
",",
")",
")",
"with",
"contextlib",
".",
"closing",
"(",
"tarobj",
")",
":",
"# don't do any chowning!",
"tarobj",
".",
"chown",
"=",
"lambda",
"*",
"args",
":",
"None",
"for",
"member",
"in",
"tarobj",
":",
"name",
"=",
"member",
".",
"name",
"# don't extract absolute paths or ones with .. in them",
"if",
"not",
"name",
".",
"startswith",
"(",
"'/'",
")",
"and",
"'..'",
"not",
"in",
"name",
".",
"split",
"(",
"'/'",
")",
":",
"prelim_dst",
"=",
"os",
".",
"path",
".",
"join",
"(",
"extract_dir",
",",
"*",
"name",
".",
"split",
"(",
"'/'",
")",
")",
"# resolve any links and to extract the link targets as normal",
"# files",
"while",
"member",
"is",
"not",
"None",
"and",
"(",
"member",
".",
"islnk",
"(",
")",
"or",
"member",
".",
"issym",
"(",
")",
")",
":",
"linkpath",
"=",
"member",
".",
"linkname",
"if",
"member",
".",
"issym",
"(",
")",
":",
"base",
"=",
"posixpath",
".",
"dirname",
"(",
"member",
".",
"name",
")",
"linkpath",
"=",
"posixpath",
".",
"join",
"(",
"base",
",",
"linkpath",
")",
"linkpath",
"=",
"posixpath",
".",
"normpath",
"(",
"linkpath",
")",
"member",
"=",
"tarobj",
".",
"_getmember",
"(",
"linkpath",
")",
"if",
"member",
"is",
"not",
"None",
"and",
"(",
"member",
".",
"isfile",
"(",
")",
"or",
"member",
".",
"isdir",
"(",
")",
")",
":",
"final_dst",
"=",
"progress_filter",
"(",
"name",
",",
"prelim_dst",
")",
"if",
"final_dst",
":",
"if",
"final_dst",
".",
"endswith",
"(",
"os",
".",
"sep",
")",
":",
"final_dst",
"=",
"final_dst",
"[",
":",
"-",
"1",
"]",
"try",
":",
"# XXX Ugh",
"tarobj",
".",
"_extract_member",
"(",
"member",
",",
"final_dst",
")",
"except",
"tarfile",
".",
"ExtractError",
":",
"# chown/chmod/mkfifo/mknode/makedev failed",
"pass",
"return",
"True"
] |
https://github.com/PowerScript/KatanaFramework/blob/0f6ad90a88de865d58ec26941cb4460501e75496/lib/setuptools/setuptools/archive_util.py#L128-L171
|
||
virtualabs/btlejack
|
4e3014f90a55d0e7068f2580dfd6cac3e149114b
|
btlejack/ui.py
|
python
|
CLIConnectionSniffer.on_ll_packet
|
(self, packet)
|
A BLE LL packet has been captured.
|
A BLE LL packet has been captured.
|
[
"A",
"BLE",
"LL",
"packet",
"has",
"been",
"captured",
"."
] |
def on_ll_packet(self, packet):
"""
A BLE LL packet has been captured.
"""
timestamp = time()
ts_sec = int(timestamp)
ts_usec = int((timestamp - ts_sec)*1000000)
if self.output is not None:
# Is it a Nordic Tap output format ?
if isinstance(self.output, PcapNordicTapWriter) or isinstance(self.output, PcapBlePHDRWriter):
self.output.write_packet(ts_sec, ts_usec, self.access_address, packet.data)
else:
self.output.write_packet(ts_sec, ts_usec, self.access_address, packet.data[10:])
pkt_hex = ' '.join(['%02x' % c for c in packet.data[10:]])
print('LL Data: ' + pkt_hex)
|
[
"def",
"on_ll_packet",
"(",
"self",
",",
"packet",
")",
":",
"timestamp",
"=",
"time",
"(",
")",
"ts_sec",
"=",
"int",
"(",
"timestamp",
")",
"ts_usec",
"=",
"int",
"(",
"(",
"timestamp",
"-",
"ts_sec",
")",
"*",
"1000000",
")",
"if",
"self",
".",
"output",
"is",
"not",
"None",
":",
"# Is it a Nordic Tap output format ?",
"if",
"isinstance",
"(",
"self",
".",
"output",
",",
"PcapNordicTapWriter",
")",
"or",
"isinstance",
"(",
"self",
".",
"output",
",",
"PcapBlePHDRWriter",
")",
":",
"self",
".",
"output",
".",
"write_packet",
"(",
"ts_sec",
",",
"ts_usec",
",",
"self",
".",
"access_address",
",",
"packet",
".",
"data",
")",
"else",
":",
"self",
".",
"output",
".",
"write_packet",
"(",
"ts_sec",
",",
"ts_usec",
",",
"self",
".",
"access_address",
",",
"packet",
".",
"data",
"[",
"10",
":",
"]",
")",
"pkt_hex",
"=",
"' '",
".",
"join",
"(",
"[",
"'%02x'",
"%",
"c",
"for",
"c",
"in",
"packet",
".",
"data",
"[",
"10",
":",
"]",
"]",
")",
"print",
"(",
"'LL Data: '",
"+",
"pkt_hex",
")"
] |
https://github.com/virtualabs/btlejack/blob/4e3014f90a55d0e7068f2580dfd6cac3e149114b/btlejack/ui.py#L403-L419
|
||
webpy/webpy
|
62245f7da4aab8f8607c192b98d5ef93873f995b
|
web/template.py
|
python
|
CodeNode.emit
|
(self, indent, text_indent="")
|
return rx.sub(indent, self.code).rstrip(" ")
|
[] |
def emit(self, indent, text_indent=""):
import re
rx = re.compile("^", re.M)
return rx.sub(indent, self.code).rstrip(" ")
|
[
"def",
"emit",
"(",
"self",
",",
"indent",
",",
"text_indent",
"=",
"\"\"",
")",
":",
"import",
"re",
"rx",
"=",
"re",
".",
"compile",
"(",
"\"^\"",
",",
"re",
".",
"M",
")",
"return",
"rx",
".",
"sub",
"(",
"indent",
",",
"self",
".",
"code",
")",
".",
"rstrip",
"(",
"\" \"",
")"
] |
https://github.com/webpy/webpy/blob/62245f7da4aab8f8607c192b98d5ef93873f995b/web/template.py#L658-L662
|
|||
securesystemslab/zippy
|
ff0e84ac99442c2c55fe1d285332cfd4e185e089
|
zippy/benchmarks/src/benchmarks/whoosh/src/whoosh/filedb/filetables.py
|
python
|
OrderedBase.ranges_from
|
(self, key)
|
Yields a series of ``(keypos, keylen, datapos, datalen)`` tuples
for the ordered series of keys equal or greater than the given key.
|
Yields a series of ``(keypos, keylen, datapos, datalen)`` tuples
for the ordered series of keys equal or greater than the given key.
|
[
"Yields",
"a",
"series",
"of",
"(",
"keypos",
"keylen",
"datapos",
"datalen",
")",
"tuples",
"for",
"the",
"ordered",
"series",
"of",
"keys",
"equal",
"or",
"greater",
"than",
"the",
"given",
"key",
"."
] |
def ranges_from(self, key):
"""Yields a series of ``(keypos, keylen, datapos, datalen)`` tuples
for the ordered series of keys equal or greater than the given key.
"""
pos = self._closest_key_pos(key)
if pos is None:
return
for item in self._ranges(pos=pos):
yield item
|
[
"def",
"ranges_from",
"(",
"self",
",",
"key",
")",
":",
"pos",
"=",
"self",
".",
"_closest_key_pos",
"(",
"key",
")",
"if",
"pos",
"is",
"None",
":",
"return",
"for",
"item",
"in",
"self",
".",
"_ranges",
"(",
"pos",
"=",
"pos",
")",
":",
"yield",
"item"
] |
https://github.com/securesystemslab/zippy/blob/ff0e84ac99442c2c55fe1d285332cfd4e185e089/zippy/benchmarks/src/benchmarks/whoosh/src/whoosh/filedb/filetables.py#L473-L483
|
||
conan-io/conan
|
28ec09f6cbf1d7e27ec27393fd7bbc74891e74a8
|
conans/server/rest/controller/v1/file_upload_download.py
|
python
|
ConanFileUpload.filename
|
(self)
|
return fname[:255] or 'empty'
|
Name of the file on the client file system, but normalized to ensure
file system compatibility. An empty filename is returned as 'empty'.
Only ASCII letters, digits, dashes, underscores and dots are
allowed in the final filename. Accents are removed, if possible.
Whitespace is replaced by a single dash. Leading or tailing dots
or dashes are removed. The filename is limited to 255 characters.
|
Name of the file on the client file system, but normalized to ensure
file system compatibility. An empty filename is returned as 'empty'.
|
[
"Name",
"of",
"the",
"file",
"on",
"the",
"client",
"file",
"system",
"but",
"normalized",
"to",
"ensure",
"file",
"system",
"compatibility",
".",
"An",
"empty",
"filename",
"is",
"returned",
"as",
"empty",
"."
] |
def filename(self):
""" Name of the file on the client file system, but normalized to ensure
file system compatibility. An empty filename is returned as 'empty'.
Only ASCII letters, digits, dashes, underscores and dots are
allowed in the final filename. Accents are removed, if possible.
Whitespace is replaced by a single dash. Leading or tailing dots
or dashes are removed. The filename is limited to 255 characters.
"""
fname = self.raw_filename
if six.PY2:
if not isinstance(fname, unicode):
fname = fname.decode('utf8', 'ignore')
fname = normalize('NFKD', fname).encode('ASCII', 'ignore').decode('ASCII')
fname = os.path.basename(fname.replace('\\', os.path.sep))
# fname = re.sub(r'[^a-zA-Z0-9-_.\s]', '', fname).strip()
# fname = re.sub(r'[-\s]+', '-', fname).strip('.-')
return fname[:255] or 'empty'
|
[
"def",
"filename",
"(",
"self",
")",
":",
"fname",
"=",
"self",
".",
"raw_filename",
"if",
"six",
".",
"PY2",
":",
"if",
"not",
"isinstance",
"(",
"fname",
",",
"unicode",
")",
":",
"fname",
"=",
"fname",
".",
"decode",
"(",
"'utf8'",
",",
"'ignore'",
")",
"fname",
"=",
"normalize",
"(",
"'NFKD'",
",",
"fname",
")",
".",
"encode",
"(",
"'ASCII'",
",",
"'ignore'",
")",
".",
"decode",
"(",
"'ASCII'",
")",
"fname",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"fname",
".",
"replace",
"(",
"'\\\\'",
",",
"os",
".",
"path",
".",
"sep",
")",
")",
"# fname = re.sub(r'[^a-zA-Z0-9-_.\\s]', '', fname).strip()",
"# fname = re.sub(r'[-\\s]+', '-', fname).strip('.-')",
"return",
"fname",
"[",
":",
"255",
"]",
"or",
"'empty'"
] |
https://github.com/conan-io/conan/blob/28ec09f6cbf1d7e27ec27393fd7bbc74891e74a8/conans/server/rest/controller/v1/file_upload_download.py#L46-L63
|
|
tp4a/teleport
|
1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad
|
server/www/packages/packages-linux/x64/pymysql/optionfile.py
|
python
|
Parser.get
|
(self, section, option)
|
return self.__remove_quotes(value)
|
[] |
def get(self, section, option):
value = configparser.RawConfigParser.get(self, section, option)
return self.__remove_quotes(value)
|
[
"def",
"get",
"(",
"self",
",",
"section",
",",
"option",
")",
":",
"value",
"=",
"configparser",
".",
"RawConfigParser",
".",
"get",
"(",
"self",
",",
"section",
",",
"option",
")",
"return",
"self",
".",
"__remove_quotes",
"(",
"value",
")"
] |
https://github.com/tp4a/teleport/blob/1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad/server/www/packages/packages-linux/x64/pymysql/optionfile.py#L21-L23
|
|||
ales-tsurko/cells
|
4cf7e395cd433762bea70cdc863a346f3a6fe1d0
|
packaging/macos/python/lib/python3.7/site-packages/pip/_internal/utils/ui.py
|
python
|
InterruptibleMixin.finish
|
(self)
|
Restore the original SIGINT handler after finishing.
This should happen regardless of whether the progress display finishes
normally, or gets interrupted.
|
Restore the original SIGINT handler after finishing.
|
[
"Restore",
"the",
"original",
"SIGINT",
"handler",
"after",
"finishing",
"."
] |
def finish(self):
"""
Restore the original SIGINT handler after finishing.
This should happen regardless of whether the progress display finishes
normally, or gets interrupted.
"""
super(InterruptibleMixin, self).finish()
signal(SIGINT, self.original_handler)
|
[
"def",
"finish",
"(",
"self",
")",
":",
"super",
"(",
"InterruptibleMixin",
",",
"self",
")",
".",
"finish",
"(",
")",
"signal",
"(",
"SIGINT",
",",
"self",
".",
"original_handler",
")"
] |
https://github.com/ales-tsurko/cells/blob/4cf7e395cd433762bea70cdc863a346f3a6fe1d0/packaging/macos/python/lib/python3.7/site-packages/pip/_internal/utils/ui.py#L97-L105
|
||
CPqD/RouteFlow
|
3f406b9c1a0796f40a86eb1194990cdd2c955f4d
|
pox/tools/reindent-pox.py
|
python
|
_rstrip
|
(line, JUNK='\n \t')
|
return line[:i]
|
Return line stripped of trailing spaces, tabs, newlines.
Note that line.rstrip() instead also strips sundry control characters,
but at least one known Emacs user expects to keep junk like that, not
mentioning Barry by name or anything <wink>.
|
Return line stripped of trailing spaces, tabs, newlines.
|
[
"Return",
"line",
"stripped",
"of",
"trailing",
"spaces",
"tabs",
"newlines",
"."
] |
def _rstrip(line, JUNK='\n \t'):
"""Return line stripped of trailing spaces, tabs, newlines.
Note that line.rstrip() instead also strips sundry control characters,
but at least one known Emacs user expects to keep junk like that, not
mentioning Barry by name or anything <wink>.
"""
i = len(line)
while i > 0 and line[i-1] in JUNK:
i -= 1
return line[:i]
|
[
"def",
"_rstrip",
"(",
"line",
",",
"JUNK",
"=",
"'\\n \\t'",
")",
":",
"i",
"=",
"len",
"(",
"line",
")",
"while",
"i",
">",
"0",
"and",
"line",
"[",
"i",
"-",
"1",
"]",
"in",
"JUNK",
":",
"i",
"-=",
"1",
"return",
"line",
"[",
":",
"i",
"]"
] |
https://github.com/CPqD/RouteFlow/blob/3f406b9c1a0796f40a86eb1194990cdd2c955f4d/pox/tools/reindent-pox.py#L140-L151
|
|
openshift/openshift-tools
|
1188778e728a6e4781acf728123e5b356380fe6f
|
openshift/installer/vendored/openshift-ansible-3.11.28-1/roles/lib_vendored_deps/library/oc_serviceaccount.py
|
python
|
Yedit.separator
|
(self, inc_sep)
|
setter method for separator
|
setter method for separator
|
[
"setter",
"method",
"for",
"separator"
] |
def separator(self, inc_sep):
''' setter method for separator '''
self._separator = inc_sep
|
[
"def",
"separator",
"(",
"self",
",",
"inc_sep",
")",
":",
"self",
".",
"_separator",
"=",
"inc_sep"
] |
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/openshift/installer/vendored/openshift-ansible-3.11.28-1/roles/lib_vendored_deps/library/oc_serviceaccount.py#L169-L171
|
||
ganglia/gmond_python_modules
|
2f7fcab3d27926ef4a2feb1b53c09af16a43e729
|
scribe/python_modules/scribe_stats.py
|
python
|
metric_cleanup
|
()
|
Clean up the metric module.
|
Clean up the metric module.
|
[
"Clean",
"up",
"the",
"metric",
"module",
"."
] |
def metric_cleanup():
'''Clean up the metric module.'''
pass
|
[
"def",
"metric_cleanup",
"(",
")",
":",
"pass"
] |
https://github.com/ganglia/gmond_python_modules/blob/2f7fcab3d27926ef4a2feb1b53c09af16a43e729/scribe/python_modules/scribe_stats.py#L84-L86
|
||
pywinauto/pywinauto
|
7235e6f83edfd96a7aeb8bbf9fef7b8f3d912512
|
pywinauto/controls/win32_controls.py
|
python
|
ComboBoxWrapper.selected_text
|
(self)
|
return self.item_texts()[self.selected_index()]
|
Return the selected text
|
Return the selected text
|
[
"Return",
"the",
"selected",
"text"
] |
def selected_text(self):
"""Return the selected text"""
return self.item_texts()[self.selected_index()]
|
[
"def",
"selected_text",
"(",
"self",
")",
":",
"return",
"self",
".",
"item_texts",
"(",
")",
"[",
"self",
".",
"selected_index",
"(",
")",
"]"
] |
https://github.com/pywinauto/pywinauto/blob/7235e6f83edfd96a7aeb8bbf9fef7b8f3d912512/pywinauto/controls/win32_controls.py#L343-L345
|
|
vatlab/sos
|
5f4dd45cc1b2f244354de2e23ea6f47011e2db31
|
src/sos/targets_r.py
|
python
|
R_library._install
|
(self, name, version, repos)
|
return ret_val
|
Check existence and version match of R library.
cran and bioc packages are unique yet might overlap with github.
Therefore if the input name is {repo}/{pkg} the package will be
installed from github if not available, else from cran or bioc
|
Check existence and version match of R library.
cran and bioc packages are unique yet might overlap with github.
Therefore if the input name is {repo}/{pkg} the package will be
installed from github if not available, else from cran or bioc
|
[
"Check",
"existence",
"and",
"version",
"match",
"of",
"R",
"library",
".",
"cran",
"and",
"bioc",
"packages",
"are",
"unique",
"yet",
"might",
"overlap",
"with",
"github",
".",
"Therefore",
"if",
"the",
"input",
"name",
"is",
"{",
"repo",
"}",
"/",
"{",
"pkg",
"}",
"the",
"package",
"will",
"be",
"installed",
"from",
"github",
"if",
"not",
"available",
"else",
"from",
"cran",
"or",
"bioc"
] |
def _install(self, name, version, repos):
"""Check existence and version match of R library.
cran and bioc packages are unique yet might overlap with github.
Therefore if the input name is {repo}/{pkg} the package will be
installed from github if not available, else from cran or bioc
"""
import subprocess
import tempfile
from sos.pattern import glob_wildcards
output_file = tempfile.NamedTemporaryFile(
mode="w+t", suffix=".txt", delete=False).name
script_file = tempfile.NamedTemporaryFile(
mode="w+t", suffix=".R", delete=False).name
#
package_loaded = (
"suppressMessages(require(package, character.only=TRUE, quietly=TRUE))"
)
version_satisfied = "TRUE"
for opt in ("==", ">=", ">", "<=", "<", "!="):
if opt in name:
if version is not None:
raise ValueError(
f"Specifying 'version=' option in addition to '{name}' is not allowed"
)
name, version = [x.strip() for x in name.split(opt, 1)]
if "," in version:
raise ValueError(
f"SoS does not yet support multiple version comparisons. {version} provided"
)
version = (opt + version,)
break
if version is not None:
version = list(version)
operators = []
for idx, value in enumerate(version):
value = str(value)
if value.startswith(">="):
operators.append(">=")
version[idx] = value[2:]
elif value.startswith(">"):
operators.append(">")
version[idx] = value[1:]
elif value.startswith("<="):
operators.append("<=")
version[idx] = value[2:]
elif value.startswith("<"):
operators.append("<")
version[idx] = value[1:]
elif value.startswith("=="):
operators.append("==")
version[idx] = value[2:]
elif value.startswith("!="):
operators.append("!=")
version[idx] = value[2:]
else:
operators.append("==")
# check version and mark version mismatch
# if current version satisfies any of the
# requirement the check program quits
version_satisfied = "||".join([
f"(cur_version {y} {repr(x)})"
for x, y in zip(version, operators)
])
#
if len(glob_wildcards("{repo}@{pkg}", [name])["repo"]):
# package is from github
self._install("remotes>=2.0.0", None, repos)
install_script = f"""
options(warn=-1)
package_repo <-strsplit("{name}", split="@")[[1]][2]
package <-strsplit("{name}", split="@")[[1]][1]
if ({package_loaded}) cur_version <- packageVersion(package) else cur_version <- NULL
if (!is.null(cur_version) && {version_satisfied}) {{
write(paste(package, cur_version, "AVAILABLE"), file={repr(output_file)})
}} else if ({"TRUE" if self._autoinstall else "FALSE"}) {{
remotes::install_github(package_repo, force=TRUE, upgrade="never")
if ({package_loaded}) cur_version <- packageVersion(package) else cur_version <- NULL
# if it still does not exist, write the package name to output
if (!is.null(cur_version)) {{
if ({version_satisfied}) write(paste(package, cur_version, "INSTALLED"), file={repr(output_file)})
else write(paste(package, cur_version, "VERSION_MISMATCH"), file={repr(output_file)})
}} else {{
write(paste(package, "NA", "MISSING"), file={repr(output_file)})
quit("no")
}}
}} else {{
if (!is.null(cur_version)) write(paste(package, cur_version, "VERSION_MISMATCH"), file={repr(output_file)}) else write(paste(package, cur_version, "UNAVAILABLE"), file={repr(output_file)})
}}
"""
else:
# package is from cran or bioc
install_script = f"""
options(warn=-1)
package <- "{name}"
if ({package_loaded}) cur_version <- packageVersion(package) else cur_version <- NULL
if (!is.null(cur_version) && {version_satisfied}) {{
write(paste(package, cur_version, "AVAILABLE"), file={repr(output_file)})
}} else if ({"TRUE" if self._autoinstall else "FALSE"}) {{
install.packages(package, repos="{repos}", quiet=FALSE)
# if the package still does not exist
if (!{package_loaded}) {{
source("http://bioconductor.org/biocLite.R")
biocLite(package, suppressUpdates=TRUE, suppressAutoUpdate=TRUE, ask=FALSE)
}}
if ({package_loaded}) cur_version <- packageVersion(package) else cur_version <- NULL
# if it still does not exist, write the package name to output
if (!is.null(cur_version)) {{
if ({version_satisfied}) write(paste(package, cur_version, "INSTALLED"), file={repr(output_file)}) else write(paste(package, cur_version, "VERSION_MISMATCH"), file={repr(output_file)})
}} else {{
write(paste(package, "NA", "MISSING"), file={repr(output_file)})
quit("no")
}}
}} else {{
if (!is.null(cur_version)) write(paste(package, cur_version, "VERSION_MISMATCH"), file={repr(output_file)}) else write(paste(package, cur_version, "UNAVAILABLE"), file={repr(output_file)})
}}
"""
# temporarily change the run mode to run to execute script
try:
with open(script_file, "w") as sfile:
sfile.write(install_script)
#
p = subprocess.Popen(
["Rscript", "--default-packages=utils", script_file])
ret = p.wait()
if ret != 0:
env.logger.warning(
f"Failed to detect or install R library {name}")
return False
except Exception as e:
env.logger.error(f"Failed to execute script: {e}")
return False
finally:
os.remove(script_file)
ret_val = False
with open(output_file) as tmp:
for line in tmp:
lib, cur_version, status = line.split(" ", 2)
if status.strip() == "MISSING":
env.logger.error(
f"R library {lib} is not available and cannot be installed."
)
elif status.strip() == "UNAVAILABLE":
env.logger.error(f"R library {lib} is not available.")
elif status.strip() == "AVAILABLE":
env.logger.debug(
f"R library {lib} ({cur_version}) is available")
ret_val = True
elif status.strip() == "INSTALLED":
env.logger.debug(
f"R library {lib} ({cur_version}) has been installed")
ret_val = True
elif status.strip() == "VERSION_MISMATCH":
env.logger.error(
f'R library {lib} ({cur_version}) does not satisfy version requirement ({"/".join(version)})!'
)
else:
raise RuntimeError(f"This should not happen: {line}")
try:
os.remove(output_file)
except Exception:
pass
return ret_val
|
[
"def",
"_install",
"(",
"self",
",",
"name",
",",
"version",
",",
"repos",
")",
":",
"import",
"subprocess",
"import",
"tempfile",
"from",
"sos",
".",
"pattern",
"import",
"glob_wildcards",
"output_file",
"=",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"mode",
"=",
"\"w+t\"",
",",
"suffix",
"=",
"\".txt\"",
",",
"delete",
"=",
"False",
")",
".",
"name",
"script_file",
"=",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"mode",
"=",
"\"w+t\"",
",",
"suffix",
"=",
"\".R\"",
",",
"delete",
"=",
"False",
")",
".",
"name",
"#",
"package_loaded",
"=",
"(",
"\"suppressMessages(require(package, character.only=TRUE, quietly=TRUE))\"",
")",
"version_satisfied",
"=",
"\"TRUE\"",
"for",
"opt",
"in",
"(",
"\"==\"",
",",
"\">=\"",
",",
"\">\"",
",",
"\"<=\"",
",",
"\"<\"",
",",
"\"!=\"",
")",
":",
"if",
"opt",
"in",
"name",
":",
"if",
"version",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"f\"Specifying 'version=' option in addition to '{name}' is not allowed\"",
")",
"name",
",",
"version",
"=",
"[",
"x",
".",
"strip",
"(",
")",
"for",
"x",
"in",
"name",
".",
"split",
"(",
"opt",
",",
"1",
")",
"]",
"if",
"\",\"",
"in",
"version",
":",
"raise",
"ValueError",
"(",
"f\"SoS does not yet support multiple version comparisons. {version} provided\"",
")",
"version",
"=",
"(",
"opt",
"+",
"version",
",",
")",
"break",
"if",
"version",
"is",
"not",
"None",
":",
"version",
"=",
"list",
"(",
"version",
")",
"operators",
"=",
"[",
"]",
"for",
"idx",
",",
"value",
"in",
"enumerate",
"(",
"version",
")",
":",
"value",
"=",
"str",
"(",
"value",
")",
"if",
"value",
".",
"startswith",
"(",
"\">=\"",
")",
":",
"operators",
".",
"append",
"(",
"\">=\"",
")",
"version",
"[",
"idx",
"]",
"=",
"value",
"[",
"2",
":",
"]",
"elif",
"value",
".",
"startswith",
"(",
"\">\"",
")",
":",
"operators",
".",
"append",
"(",
"\">\"",
")",
"version",
"[",
"idx",
"]",
"=",
"value",
"[",
"1",
":",
"]",
"elif",
"value",
".",
"startswith",
"(",
"\"<=\"",
")",
":",
"operators",
".",
"append",
"(",
"\"<=\"",
")",
"version",
"[",
"idx",
"]",
"=",
"value",
"[",
"2",
":",
"]",
"elif",
"value",
".",
"startswith",
"(",
"\"<\"",
")",
":",
"operators",
".",
"append",
"(",
"\"<\"",
")",
"version",
"[",
"idx",
"]",
"=",
"value",
"[",
"1",
":",
"]",
"elif",
"value",
".",
"startswith",
"(",
"\"==\"",
")",
":",
"operators",
".",
"append",
"(",
"\"==\"",
")",
"version",
"[",
"idx",
"]",
"=",
"value",
"[",
"2",
":",
"]",
"elif",
"value",
".",
"startswith",
"(",
"\"!=\"",
")",
":",
"operators",
".",
"append",
"(",
"\"!=\"",
")",
"version",
"[",
"idx",
"]",
"=",
"value",
"[",
"2",
":",
"]",
"else",
":",
"operators",
".",
"append",
"(",
"\"==\"",
")",
"# check version and mark version mismatch",
"# if current version satisfies any of the",
"# requirement the check program quits",
"version_satisfied",
"=",
"\"||\"",
".",
"join",
"(",
"[",
"f\"(cur_version {y} {repr(x)})\"",
"for",
"x",
",",
"y",
"in",
"zip",
"(",
"version",
",",
"operators",
")",
"]",
")",
"#",
"if",
"len",
"(",
"glob_wildcards",
"(",
"\"{repo}@{pkg}\"",
",",
"[",
"name",
"]",
")",
"[",
"\"repo\"",
"]",
")",
":",
"# package is from github",
"self",
".",
"_install",
"(",
"\"remotes>=2.0.0\"",
",",
"None",
",",
"repos",
")",
"install_script",
"=",
"f\"\"\"\n options(warn=-1)\n package_repo <-strsplit(\"{name}\", split=\"@\")[[1]][2]\n package <-strsplit(\"{name}\", split=\"@\")[[1]][1]\n if ({package_loaded}) cur_version <- packageVersion(package) else cur_version <- NULL\n if (!is.null(cur_version) && {version_satisfied}) {{\n write(paste(package, cur_version, \"AVAILABLE\"), file={repr(output_file)})\n }} else if ({\"TRUE\" if self._autoinstall else \"FALSE\"}) {{\n remotes::install_github(package_repo, force=TRUE, upgrade=\"never\")\n if ({package_loaded}) cur_version <- packageVersion(package) else cur_version <- NULL\n # if it still does not exist, write the package name to output\n if (!is.null(cur_version)) {{\n if ({version_satisfied}) write(paste(package, cur_version, \"INSTALLED\"), file={repr(output_file)})\n else write(paste(package, cur_version, \"VERSION_MISMATCH\"), file={repr(output_file)})\n }} else {{\n write(paste(package, \"NA\", \"MISSING\"), file={repr(output_file)})\n quit(\"no\")\n }}\n }} else {{\n if (!is.null(cur_version)) write(paste(package, cur_version, \"VERSION_MISMATCH\"), file={repr(output_file)}) else write(paste(package, cur_version, \"UNAVAILABLE\"), file={repr(output_file)})\n }}\n \"\"\"",
"else",
":",
"# package is from cran or bioc",
"install_script",
"=",
"f\"\"\"\n options(warn=-1)\n package <- \"{name}\"\n if ({package_loaded}) cur_version <- packageVersion(package) else cur_version <- NULL\n if (!is.null(cur_version) && {version_satisfied}) {{\n write(paste(package, cur_version, \"AVAILABLE\"), file={repr(output_file)})\n }} else if ({\"TRUE\" if self._autoinstall else \"FALSE\"}) {{\n install.packages(package, repos=\"{repos}\", quiet=FALSE)\n # if the package still does not exist\n if (!{package_loaded}) {{\n source(\"http://bioconductor.org/biocLite.R\")\n biocLite(package, suppressUpdates=TRUE, suppressAutoUpdate=TRUE, ask=FALSE)\n }}\n if ({package_loaded}) cur_version <- packageVersion(package) else cur_version <- NULL\n # if it still does not exist, write the package name to output\n if (!is.null(cur_version)) {{\n if ({version_satisfied}) write(paste(package, cur_version, \"INSTALLED\"), file={repr(output_file)}) else write(paste(package, cur_version, \"VERSION_MISMATCH\"), file={repr(output_file)})\n }} else {{\n write(paste(package, \"NA\", \"MISSING\"), file={repr(output_file)})\n quit(\"no\")\n }}\n }} else {{\n if (!is.null(cur_version)) write(paste(package, cur_version, \"VERSION_MISMATCH\"), file={repr(output_file)}) else write(paste(package, cur_version, \"UNAVAILABLE\"), file={repr(output_file)})\n }}\n \"\"\"",
"# temporarily change the run mode to run to execute script",
"try",
":",
"with",
"open",
"(",
"script_file",
",",
"\"w\"",
")",
"as",
"sfile",
":",
"sfile",
".",
"write",
"(",
"install_script",
")",
"#",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"[",
"\"Rscript\"",
",",
"\"--default-packages=utils\"",
",",
"script_file",
"]",
")",
"ret",
"=",
"p",
".",
"wait",
"(",
")",
"if",
"ret",
"!=",
"0",
":",
"env",
".",
"logger",
".",
"warning",
"(",
"f\"Failed to detect or install R library {name}\"",
")",
"return",
"False",
"except",
"Exception",
"as",
"e",
":",
"env",
".",
"logger",
".",
"error",
"(",
"f\"Failed to execute script: {e}\"",
")",
"return",
"False",
"finally",
":",
"os",
".",
"remove",
"(",
"script_file",
")",
"ret_val",
"=",
"False",
"with",
"open",
"(",
"output_file",
")",
"as",
"tmp",
":",
"for",
"line",
"in",
"tmp",
":",
"lib",
",",
"cur_version",
",",
"status",
"=",
"line",
".",
"split",
"(",
"\" \"",
",",
"2",
")",
"if",
"status",
".",
"strip",
"(",
")",
"==",
"\"MISSING\"",
":",
"env",
".",
"logger",
".",
"error",
"(",
"f\"R library {lib} is not available and cannot be installed.\"",
")",
"elif",
"status",
".",
"strip",
"(",
")",
"==",
"\"UNAVAILABLE\"",
":",
"env",
".",
"logger",
".",
"error",
"(",
"f\"R library {lib} is not available.\"",
")",
"elif",
"status",
".",
"strip",
"(",
")",
"==",
"\"AVAILABLE\"",
":",
"env",
".",
"logger",
".",
"debug",
"(",
"f\"R library {lib} ({cur_version}) is available\"",
")",
"ret_val",
"=",
"True",
"elif",
"status",
".",
"strip",
"(",
")",
"==",
"\"INSTALLED\"",
":",
"env",
".",
"logger",
".",
"debug",
"(",
"f\"R library {lib} ({cur_version}) has been installed\"",
")",
"ret_val",
"=",
"True",
"elif",
"status",
".",
"strip",
"(",
")",
"==",
"\"VERSION_MISMATCH\"",
":",
"env",
".",
"logger",
".",
"error",
"(",
"f'R library {lib} ({cur_version}) does not satisfy version requirement ({\"/\".join(version)})!'",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"f\"This should not happen: {line}\"",
")",
"try",
":",
"os",
".",
"remove",
"(",
"output_file",
")",
"except",
"Exception",
":",
"pass",
"return",
"ret_val"
] |
https://github.com/vatlab/sos/blob/5f4dd45cc1b2f244354de2e23ea6f47011e2db31/src/sos/targets_r.py#L33-L197
|
|
openshift/openshift-tools
|
1188778e728a6e4781acf728123e5b356380fe6f
|
ansible/roles/lib_oa_openshift/library/oc_label.py
|
python
|
Utils.create_tmpfile
|
(prefix='tmp')
|
Generates and returns a temporary file name
|
Generates and returns a temporary file name
|
[
"Generates",
"and",
"returns",
"a",
"temporary",
"file",
"name"
] |
def create_tmpfile(prefix='tmp'):
''' Generates and returns a temporary file name '''
with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
return tmp.name
|
[
"def",
"create_tmpfile",
"(",
"prefix",
"=",
"'tmp'",
")",
":",
"with",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"prefix",
"=",
"prefix",
",",
"delete",
"=",
"False",
")",
"as",
"tmp",
":",
"return",
"tmp",
".",
"name"
] |
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/ansible/roles/lib_oa_openshift/library/oc_label.py#L1224-L1228
|
||
adamcaudill/EquationGroupLeak
|
52fa871c89008566c27159bd48f2a8641260c984
|
Firewall/EXPLOITS/EXBA/scapy/packet.py
|
python
|
Packet.sprintf
|
(self, fmt, relax=1)
|
return s
|
sprintf(format, [relax=1]) -> str
where format is a string that can include directives. A directive begins and
ends by % and has the following format %[fmt[r],][cls[:nb].]field%.
fmt is a classic printf directive, "r" can be appended for raw substitution
(ex: IP.flags=0x18 instead of SA), nb is the number of the layer we want
(ex: for IP/IP packets, IP:2.src is the src of the upper IP layer).
Special case : "%.time%" is the creation time.
Ex : p.sprintf("%.time% %-15s,IP.src% -> %-15s,IP.dst% %IP.chksum% "
"%03xr,IP.proto% %r,TCP.flags%")
Moreover, the format string can include conditionnal statements. A conditionnal
statement looks like : {layer:string} where layer is a layer name, and string
is the string to insert in place of the condition if it is true, i.e. if layer
is present. If layer is preceded by a "!", the result si inverted. Conditions
can be imbricated. A valid statement can be :
p.sprintf("This is a{TCP: TCP}{UDP: UDP}{ICMP:n ICMP} packet")
p.sprintf("{IP:%IP.dst% {ICMP:%ICMP.type%}{TCP:%TCP.dport%}}")
A side effect is that, to obtain "{" and "}" characters, you must use
"%(" and "%)".
|
sprintf(format, [relax=1]) -> str
where format is a string that can include directives. A directive begins and
ends by % and has the following format %[fmt[r],][cls[:nb].]field%.
|
[
"sprintf",
"(",
"format",
"[",
"relax",
"=",
"1",
"]",
")",
"-",
">",
"str",
"where",
"format",
"is",
"a",
"string",
"that",
"can",
"include",
"directives",
".",
"A",
"directive",
"begins",
"and",
"ends",
"by",
"%",
"and",
"has",
"the",
"following",
"format",
"%",
"[",
"fmt",
"[",
"r",
"]",
"]",
"[",
"cls",
"[",
":",
"nb",
"]",
".",
"]",
"field%",
"."
] |
def sprintf(self, fmt, relax=1):
"""sprintf(format, [relax=1]) -> str
where format is a string that can include directives. A directive begins and
ends by % and has the following format %[fmt[r],][cls[:nb].]field%.
fmt is a classic printf directive, "r" can be appended for raw substitution
(ex: IP.flags=0x18 instead of SA), nb is the number of the layer we want
(ex: for IP/IP packets, IP:2.src is the src of the upper IP layer).
Special case : "%.time%" is the creation time.
Ex : p.sprintf("%.time% %-15s,IP.src% -> %-15s,IP.dst% %IP.chksum% "
"%03xr,IP.proto% %r,TCP.flags%")
Moreover, the format string can include conditionnal statements. A conditionnal
statement looks like : {layer:string} where layer is a layer name, and string
is the string to insert in place of the condition if it is true, i.e. if layer
is present. If layer is preceded by a "!", the result si inverted. Conditions
can be imbricated. A valid statement can be :
p.sprintf("This is a{TCP: TCP}{UDP: UDP}{ICMP:n ICMP} packet")
p.sprintf("{IP:%IP.dst% {ICMP:%ICMP.type%}{TCP:%TCP.dport%}}")
A side effect is that, to obtain "{" and "}" characters, you must use
"%(" and "%)".
"""
escape = { "%": "%",
"(": "{",
")": "}" }
# Evaluate conditions
while "{" in fmt:
i = fmt.rindex("{")
j = fmt[i+1:].index("}")
cond = fmt[i+1:i+j+1]
k = cond.find(":")
if k < 0:
raise Scapy_Exception("Bad condition in format string: [%s] (read sprintf doc!)"%cond)
cond,format = cond[:k],cond[k+1:]
res = False
if cond[0] == "!":
res = True
cond = cond[1:]
if self.haslayer(cond):
res = not res
if not res:
format = ""
fmt = fmt[:i]+format+fmt[i+j+2:]
# Evaluate directives
s = ""
while "%" in fmt:
i = fmt.index("%")
s += fmt[:i]
fmt = fmt[i+1:]
if fmt and fmt[0] in escape:
s += escape[fmt[0]]
fmt = fmt[1:]
continue
try:
i = fmt.index("%")
sfclsfld = fmt[:i]
fclsfld = sfclsfld.split(",")
if len(fclsfld) == 1:
f = "s"
clsfld = fclsfld[0]
elif len(fclsfld) == 2:
f,clsfld = fclsfld
else:
raise Scapy_Exception
if "." in clsfld:
cls,fld = clsfld.split(".")
else:
cls = self.__class__.__name__
fld = clsfld
num = 1
if ":" in cls:
cls,num = cls.split(":")
num = int(num)
fmt = fmt[i+1:]
except:
raise Scapy_Exception("Bad format string [%%%s%s]" % (fmt[:25], fmt[25:] and "..."))
else:
if fld == "time":
val = time.strftime("%H:%M:%S.%%06i", time.localtime(self.time)) % int((self.time-int(self.time))*1000000)
elif cls == self.__class__.__name__ and hasattr(self, fld):
if num > 1:
val = self.payload.sprintf("%%%s,%s:%s.%s%%" % (f,cls,num-1,fld), relax)
f = "s"
elif f[-1] == "r": # Raw field value
val = getattr(self,fld)
f = f[:-1]
if not f:
f = "s"
else:
val = getattr(self,fld)
if fld in self.fieldtype:
val = self.fieldtype[fld].i2repr(self,val)
else:
val = self.payload.sprintf("%%%s%%" % sfclsfld, relax)
f = "s"
s += ("%"+f) % val
s += fmt
return s
|
[
"def",
"sprintf",
"(",
"self",
",",
"fmt",
",",
"relax",
"=",
"1",
")",
":",
"escape",
"=",
"{",
"\"%\"",
":",
"\"%\"",
",",
"\"(\"",
":",
"\"{\"",
",",
"\")\"",
":",
"\"}\"",
"}",
"# Evaluate conditions ",
"while",
"\"{\"",
"in",
"fmt",
":",
"i",
"=",
"fmt",
".",
"rindex",
"(",
"\"{\"",
")",
"j",
"=",
"fmt",
"[",
"i",
"+",
"1",
":",
"]",
".",
"index",
"(",
"\"}\"",
")",
"cond",
"=",
"fmt",
"[",
"i",
"+",
"1",
":",
"i",
"+",
"j",
"+",
"1",
"]",
"k",
"=",
"cond",
".",
"find",
"(",
"\":\"",
")",
"if",
"k",
"<",
"0",
":",
"raise",
"Scapy_Exception",
"(",
"\"Bad condition in format string: [%s] (read sprintf doc!)\"",
"%",
"cond",
")",
"cond",
",",
"format",
"=",
"cond",
"[",
":",
"k",
"]",
",",
"cond",
"[",
"k",
"+",
"1",
":",
"]",
"res",
"=",
"False",
"if",
"cond",
"[",
"0",
"]",
"==",
"\"!\"",
":",
"res",
"=",
"True",
"cond",
"=",
"cond",
"[",
"1",
":",
"]",
"if",
"self",
".",
"haslayer",
"(",
"cond",
")",
":",
"res",
"=",
"not",
"res",
"if",
"not",
"res",
":",
"format",
"=",
"\"\"",
"fmt",
"=",
"fmt",
"[",
":",
"i",
"]",
"+",
"format",
"+",
"fmt",
"[",
"i",
"+",
"j",
"+",
"2",
":",
"]",
"# Evaluate directives",
"s",
"=",
"\"\"",
"while",
"\"%\"",
"in",
"fmt",
":",
"i",
"=",
"fmt",
".",
"index",
"(",
"\"%\"",
")",
"s",
"+=",
"fmt",
"[",
":",
"i",
"]",
"fmt",
"=",
"fmt",
"[",
"i",
"+",
"1",
":",
"]",
"if",
"fmt",
"and",
"fmt",
"[",
"0",
"]",
"in",
"escape",
":",
"s",
"+=",
"escape",
"[",
"fmt",
"[",
"0",
"]",
"]",
"fmt",
"=",
"fmt",
"[",
"1",
":",
"]",
"continue",
"try",
":",
"i",
"=",
"fmt",
".",
"index",
"(",
"\"%\"",
")",
"sfclsfld",
"=",
"fmt",
"[",
":",
"i",
"]",
"fclsfld",
"=",
"sfclsfld",
".",
"split",
"(",
"\",\"",
")",
"if",
"len",
"(",
"fclsfld",
")",
"==",
"1",
":",
"f",
"=",
"\"s\"",
"clsfld",
"=",
"fclsfld",
"[",
"0",
"]",
"elif",
"len",
"(",
"fclsfld",
")",
"==",
"2",
":",
"f",
",",
"clsfld",
"=",
"fclsfld",
"else",
":",
"raise",
"Scapy_Exception",
"if",
"\".\"",
"in",
"clsfld",
":",
"cls",
",",
"fld",
"=",
"clsfld",
".",
"split",
"(",
"\".\"",
")",
"else",
":",
"cls",
"=",
"self",
".",
"__class__",
".",
"__name__",
"fld",
"=",
"clsfld",
"num",
"=",
"1",
"if",
"\":\"",
"in",
"cls",
":",
"cls",
",",
"num",
"=",
"cls",
".",
"split",
"(",
"\":\"",
")",
"num",
"=",
"int",
"(",
"num",
")",
"fmt",
"=",
"fmt",
"[",
"i",
"+",
"1",
":",
"]",
"except",
":",
"raise",
"Scapy_Exception",
"(",
"\"Bad format string [%%%s%s]\"",
"%",
"(",
"fmt",
"[",
":",
"25",
"]",
",",
"fmt",
"[",
"25",
":",
"]",
"and",
"\"...\"",
")",
")",
"else",
":",
"if",
"fld",
"==",
"\"time\"",
":",
"val",
"=",
"time",
".",
"strftime",
"(",
"\"%H:%M:%S.%%06i\"",
",",
"time",
".",
"localtime",
"(",
"self",
".",
"time",
")",
")",
"%",
"int",
"(",
"(",
"self",
".",
"time",
"-",
"int",
"(",
"self",
".",
"time",
")",
")",
"*",
"1000000",
")",
"elif",
"cls",
"==",
"self",
".",
"__class__",
".",
"__name__",
"and",
"hasattr",
"(",
"self",
",",
"fld",
")",
":",
"if",
"num",
">",
"1",
":",
"val",
"=",
"self",
".",
"payload",
".",
"sprintf",
"(",
"\"%%%s,%s:%s.%s%%\"",
"%",
"(",
"f",
",",
"cls",
",",
"num",
"-",
"1",
",",
"fld",
")",
",",
"relax",
")",
"f",
"=",
"\"s\"",
"elif",
"f",
"[",
"-",
"1",
"]",
"==",
"\"r\"",
":",
"# Raw field value",
"val",
"=",
"getattr",
"(",
"self",
",",
"fld",
")",
"f",
"=",
"f",
"[",
":",
"-",
"1",
"]",
"if",
"not",
"f",
":",
"f",
"=",
"\"s\"",
"else",
":",
"val",
"=",
"getattr",
"(",
"self",
",",
"fld",
")",
"if",
"fld",
"in",
"self",
".",
"fieldtype",
":",
"val",
"=",
"self",
".",
"fieldtype",
"[",
"fld",
"]",
".",
"i2repr",
"(",
"self",
",",
"val",
")",
"else",
":",
"val",
"=",
"self",
".",
"payload",
".",
"sprintf",
"(",
"\"%%%s%%\"",
"%",
"sfclsfld",
",",
"relax",
")",
"f",
"=",
"\"s\"",
"s",
"+=",
"(",
"\"%\"",
"+",
"f",
")",
"%",
"val",
"s",
"+=",
"fmt",
"return",
"s"
] |
https://github.com/adamcaudill/EquationGroupLeak/blob/52fa871c89008566c27159bd48f2a8641260c984/Firewall/EXPLOITS/EXBA/scapy/packet.py#L813-L916
|
|
devitocodes/devito
|
6abd441e3f5f091775ad332be6b95e017b8cbd16
|
examples/misc/linalg.py
|
python
|
cli_chain_contractions
|
(mat_shape, optimize, **kwargs)
|
``AB + AC = D, DE = F``.
|
``AB + AC = D, DE = F``.
|
[
"AB",
"+",
"AC",
"=",
"D",
"DE",
"=",
"F",
"."
] |
def cli_chain_contractions(mat_shape, optimize, **kwargs):
"""``AB + AC = D, DE = F``."""
i, j, k, l = dimensions('i j k l')
A = Function(name='A', shape=mat_shape, dimensions=(i, j))
B = Function(name='B', shape=mat_shape, dimensions=(j, k))
C = Function(name='C', shape=mat_shape, dimensions=(j, k))
D = Function(name='D', shape=mat_shape, dimensions=(i, k))
E = Function(name='E', shape=mat_shape, dimensions=(k, l))
F = Function(name='F', shape=mat_shape, dimensions=(i, l))
chain_contractions(A, B, C, D, E, F, optimize)
|
[
"def",
"cli_chain_contractions",
"(",
"mat_shape",
",",
"optimize",
",",
"*",
"*",
"kwargs",
")",
":",
"i",
",",
"j",
",",
"k",
",",
"l",
"=",
"dimensions",
"(",
"'i j k l'",
")",
"A",
"=",
"Function",
"(",
"name",
"=",
"'A'",
",",
"shape",
"=",
"mat_shape",
",",
"dimensions",
"=",
"(",
"i",
",",
"j",
")",
")",
"B",
"=",
"Function",
"(",
"name",
"=",
"'B'",
",",
"shape",
"=",
"mat_shape",
",",
"dimensions",
"=",
"(",
"j",
",",
"k",
")",
")",
"C",
"=",
"Function",
"(",
"name",
"=",
"'C'",
",",
"shape",
"=",
"mat_shape",
",",
"dimensions",
"=",
"(",
"j",
",",
"k",
")",
")",
"D",
"=",
"Function",
"(",
"name",
"=",
"'D'",
",",
"shape",
"=",
"mat_shape",
",",
"dimensions",
"=",
"(",
"i",
",",
"k",
")",
")",
"E",
"=",
"Function",
"(",
"name",
"=",
"'E'",
",",
"shape",
"=",
"mat_shape",
",",
"dimensions",
"=",
"(",
"k",
",",
"l",
")",
")",
"F",
"=",
"Function",
"(",
"name",
"=",
"'F'",
",",
"shape",
"=",
"mat_shape",
",",
"dimensions",
"=",
"(",
"i",
",",
"l",
")",
")",
"chain_contractions",
"(",
"A",
",",
"B",
",",
"C",
",",
"D",
",",
"E",
",",
"F",
",",
"optimize",
")"
] |
https://github.com/devitocodes/devito/blob/6abd441e3f5f091775ad332be6b95e017b8cbd16/examples/misc/linalg.py#L90-L99
|
||
holzschu/Carnets
|
44effb10ddfc6aa5c8b0687582a724ba82c6b547
|
Library/lib/python3.7/site-packages/sympy/physics/continuum_mechanics/beam.py
|
python
|
Beam.bending_moment
|
(self)
|
return integrate(self.shear_force(), x)
|
Returns a Singularity Function expression which represents
the bending moment curve of the Beam object.
Examples
========
There is a beam of length 30 meters. A moment of magnitude 120 Nm is
applied in the clockwise direction at the end of the beam. A pointload
of magnitude 8 N is applied from the top of the beam at the starting
point. There are two simple supports below the beam. One at the end
and another one at a distance of 10 meters from the start. The
deflection is restricted at both the supports.
Using the sign convention of upward forces and clockwise moment
being positive.
>>> from sympy.physics.continuum_mechanics.beam import Beam
>>> from sympy import symbols
>>> E, I = symbols('E, I')
>>> R1, R2 = symbols('R1, R2')
>>> b = Beam(30, E, I)
>>> b.apply_load(-8, 0, -1)
>>> b.apply_load(R1, 10, -1)
>>> b.apply_load(R2, 30, -1)
>>> b.apply_load(120, 30, -2)
>>> b.bc_deflection = [(10, 0), (30, 0)]
>>> b.solve_for_reaction_loads(R1, R2)
>>> b.bending_moment()
-8*SingularityFunction(x, 0, 1) + 6*SingularityFunction(x, 10, 1) + 120*SingularityFunction(x, 30, 0) + 2*SingularityFunction(x, 30, 1)
|
Returns a Singularity Function expression which represents
the bending moment curve of the Beam object.
|
[
"Returns",
"a",
"Singularity",
"Function",
"expression",
"which",
"represents",
"the",
"bending",
"moment",
"curve",
"of",
"the",
"Beam",
"object",
"."
] |
def bending_moment(self):
"""
Returns a Singularity Function expression which represents
the bending moment curve of the Beam object.
Examples
========
There is a beam of length 30 meters. A moment of magnitude 120 Nm is
applied in the clockwise direction at the end of the beam. A pointload
of magnitude 8 N is applied from the top of the beam at the starting
point. There are two simple supports below the beam. One at the end
and another one at a distance of 10 meters from the start. The
deflection is restricted at both the supports.
Using the sign convention of upward forces and clockwise moment
being positive.
>>> from sympy.physics.continuum_mechanics.beam import Beam
>>> from sympy import symbols
>>> E, I = symbols('E, I')
>>> R1, R2 = symbols('R1, R2')
>>> b = Beam(30, E, I)
>>> b.apply_load(-8, 0, -1)
>>> b.apply_load(R1, 10, -1)
>>> b.apply_load(R2, 30, -1)
>>> b.apply_load(120, 30, -2)
>>> b.bc_deflection = [(10, 0), (30, 0)]
>>> b.solve_for_reaction_loads(R1, R2)
>>> b.bending_moment()
-8*SingularityFunction(x, 0, 1) + 6*SingularityFunction(x, 10, 1) + 120*SingularityFunction(x, 30, 0) + 2*SingularityFunction(x, 30, 1)
"""
x = self.variable
return integrate(self.shear_force(), x)
|
[
"def",
"bending_moment",
"(",
"self",
")",
":",
"x",
"=",
"self",
".",
"variable",
"return",
"integrate",
"(",
"self",
".",
"shear_force",
"(",
")",
",",
"x",
")"
] |
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/sympy/physics/continuum_mechanics/beam.py#L870-L902
|
|
ambujraj/hacktoberfest2018
|
53df2cac8b3404261131a873352ec4f2ffa3544d
|
MAC_changer/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/requests/utils.py
|
python
|
get_environ_proxies
|
(url, no_proxy=None)
|
Return a dict of environment proxies.
:rtype: dict
|
Return a dict of environment proxies.
|
[
"Return",
"a",
"dict",
"of",
"environment",
"proxies",
"."
] |
def get_environ_proxies(url, no_proxy=None):
"""
Return a dict of environment proxies.
:rtype: dict
"""
if should_bypass_proxies(url, no_proxy=no_proxy):
return {}
else:
return getproxies()
|
[
"def",
"get_environ_proxies",
"(",
"url",
",",
"no_proxy",
"=",
"None",
")",
":",
"if",
"should_bypass_proxies",
"(",
"url",
",",
"no_proxy",
"=",
"no_proxy",
")",
":",
"return",
"{",
"}",
"else",
":",
"return",
"getproxies",
"(",
")"
] |
https://github.com/ambujraj/hacktoberfest2018/blob/53df2cac8b3404261131a873352ec4f2ffa3544d/MAC_changer/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/requests/utils.py#L686-L695
|
||
XX-net/XX-Net
|
a9898cfcf0084195fb7e69b6bc834e59aecdf14f
|
python3.8.2/Lib/site-packages/pip/_vendor/requests/utils.py
|
python
|
parse_list_header
|
(value)
|
return result
|
Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
:rtype: list
|
Parse lists as described by RFC 2068 Section 2.
|
[
"Parse",
"lists",
"as",
"described",
"by",
"RFC",
"2068",
"Section",
"2",
"."
] |
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
:rtype: list
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
|
[
"def",
"parse_list_header",
"(",
"value",
")",
":",
"result",
"=",
"[",
"]",
"for",
"item",
"in",
"_parse_list_header",
"(",
"value",
")",
":",
"if",
"item",
"[",
":",
"1",
"]",
"==",
"item",
"[",
"-",
"1",
":",
"]",
"==",
"'\"'",
":",
"item",
"=",
"unquote_header_value",
"(",
"item",
"[",
"1",
":",
"-",
"1",
"]",
")",
"result",
".",
"append",
"(",
"item",
")",
"return",
"result"
] |
https://github.com/XX-net/XX-Net/blob/a9898cfcf0084195fb7e69b6bc834e59aecdf14f/python3.8.2/Lib/site-packages/pip/_vendor/requests/utils.py#L312-L340
|
|
bruderstein/PythonScript
|
df9f7071ddf3a079e3a301b9b53a6dc78cf1208f
|
PythonLib/min/ipaddress.py
|
python
|
IPv4Interface.ip
|
(self)
|
return IPv4Address(self._ip)
|
[] |
def ip(self):
return IPv4Address(self._ip)
|
[
"def",
"ip",
"(",
"self",
")",
":",
"return",
"IPv4Address",
"(",
"self",
".",
"_ip",
")"
] |
https://github.com/bruderstein/PythonScript/blob/df9f7071ddf3a079e3a301b9b53a6dc78cf1208f/PythonLib/min/ipaddress.py#L1434-L1435
|
|||
cloudant/bigcouch
|
8e9c1ec0ed1676ff152f10658f5c83a1a91fa8fe
|
couchjs/scons/scons-local-2.0.1/SCons/Builder.py
|
python
|
BuilderBase.subst_src_suffixes
|
(self, env)
|
return suffixes
|
The suffix list may contain construction variable expansions,
so we have to evaluate the individual strings. To avoid doing
this over and over, we memoize the results for each construction
environment.
|
The suffix list may contain construction variable expansions,
so we have to evaluate the individual strings. To avoid doing
this over and over, we memoize the results for each construction
environment.
|
[
"The",
"suffix",
"list",
"may",
"contain",
"construction",
"variable",
"expansions",
"so",
"we",
"have",
"to",
"evaluate",
"the",
"individual",
"strings",
".",
"To",
"avoid",
"doing",
"this",
"over",
"and",
"over",
"we",
"memoize",
"the",
"results",
"for",
"each",
"construction",
"environment",
"."
] |
def subst_src_suffixes(self, env):
"""
The suffix list may contain construction variable expansions,
so we have to evaluate the individual strings. To avoid doing
this over and over, we memoize the results for each construction
environment.
"""
memo_key = id(env)
try:
memo_dict = self._memo['subst_src_suffixes']
except KeyError:
memo_dict = {}
self._memo['subst_src_suffixes'] = memo_dict
else:
try:
return memo_dict[memo_key]
except KeyError:
pass
suffixes = [env.subst(x) for x in self.src_suffix]
memo_dict[memo_key] = suffixes
return suffixes
|
[
"def",
"subst_src_suffixes",
"(",
"self",
",",
"env",
")",
":",
"memo_key",
"=",
"id",
"(",
"env",
")",
"try",
":",
"memo_dict",
"=",
"self",
".",
"_memo",
"[",
"'subst_src_suffixes'",
"]",
"except",
"KeyError",
":",
"memo_dict",
"=",
"{",
"}",
"self",
".",
"_memo",
"[",
"'subst_src_suffixes'",
"]",
"=",
"memo_dict",
"else",
":",
"try",
":",
"return",
"memo_dict",
"[",
"memo_key",
"]",
"except",
"KeyError",
":",
"pass",
"suffixes",
"=",
"[",
"env",
".",
"subst",
"(",
"x",
")",
"for",
"x",
"in",
"self",
".",
"src_suffix",
"]",
"memo_dict",
"[",
"memo_key",
"]",
"=",
"suffixes",
"return",
"suffixes"
] |
https://github.com/cloudant/bigcouch/blob/8e9c1ec0ed1676ff152f10658f5c83a1a91fa8fe/couchjs/scons/scons-local-2.0.1/SCons/Builder.py#L801-L821
|
|
savon-noir/python-libnmap
|
8f442747a7a16969309d6f7653ad1b13a3a99bae
|
libnmap/objects/report.py
|
python
|
NmapReport.hosts_up
|
(self)
|
return rval
|
Accessor returning the numer of host detected
as 'up' during the scan.
:return: integer (0 >= or -1)
|
Accessor returning the numer of host detected
as 'up' during the scan.
|
[
"Accessor",
"returning",
"the",
"numer",
"of",
"host",
"detected",
"as",
"up",
"during",
"the",
"scan",
"."
] |
def hosts_up(self):
"""
Accessor returning the numer of host detected
as 'up' during the scan.
:return: integer (0 >= or -1)
"""
rval = -1
try:
s_up = self._runstats["hosts"]["up"]
rval = int(s_up)
except (KeyError, TypeError, ValueError):
rval = -1
return rval
|
[
"def",
"hosts_up",
"(",
"self",
")",
":",
"rval",
"=",
"-",
"1",
"try",
":",
"s_up",
"=",
"self",
".",
"_runstats",
"[",
"\"hosts\"",
"]",
"[",
"\"up\"",
"]",
"rval",
"=",
"int",
"(",
"s_up",
")",
"except",
"(",
"KeyError",
",",
"TypeError",
",",
"ValueError",
")",
":",
"rval",
"=",
"-",
"1",
"return",
"rval"
] |
https://github.com/savon-noir/python-libnmap/blob/8f442747a7a16969309d6f7653ad1b13a3a99bae/libnmap/objects/report.py#L255-L268
|
|
openstack/keystone
|
771c943ad2116193e7bb118c74993c829d93bd71
|
keystone/api/s3tokens.py
|
python
|
_calculate_signature_v4
|
(string_to_sign, secret_key)
|
return signature.hexdigest()
|
Calculate a v4 signature.
:param bytes string_to_sign: String that contains request params and
is used for calculate signature of request
:param text secret_key: Second auth key of EC2 account that is used to
sign requests
|
Calculate a v4 signature.
|
[
"Calculate",
"a",
"v4",
"signature",
"."
] |
def _calculate_signature_v4(string_to_sign, secret_key):
"""Calculate a v4 signature.
:param bytes string_to_sign: String that contains request params and
is used for calculate signature of request
:param text secret_key: Second auth key of EC2 account that is used to
sign requests
"""
parts = string_to_sign.split(b'\n')
if len(parts) != 4 or parts[0] != b'AWS4-HMAC-SHA256':
raise exception.Unauthorized(message=_('Invalid EC2 signature.'))
scope = parts[2].split(b'/')
if len(scope) != 4 or scope[3] != b'aws4_request':
raise exception.Unauthorized(message=_('Invalid EC2 signature.'))
allowed_services = [b's3', b'iam', b'sts']
if scope[2] not in allowed_services:
raise exception.Unauthorized(message=_('Invalid EC2 signature.'))
def _sign(key, msg):
return hmac.new(key, msg, hashlib.sha256).digest()
signed = _sign(('AWS4' + secret_key).encode('utf-8'), scope[0])
signed = _sign(signed, scope[1])
signed = _sign(signed, scope[2])
signed = _sign(signed, b'aws4_request')
signature = hmac.new(signed, string_to_sign, hashlib.sha256)
return signature.hexdigest()
|
[
"def",
"_calculate_signature_v4",
"(",
"string_to_sign",
",",
"secret_key",
")",
":",
"parts",
"=",
"string_to_sign",
".",
"split",
"(",
"b'\\n'",
")",
"if",
"len",
"(",
"parts",
")",
"!=",
"4",
"or",
"parts",
"[",
"0",
"]",
"!=",
"b'AWS4-HMAC-SHA256'",
":",
"raise",
"exception",
".",
"Unauthorized",
"(",
"message",
"=",
"_",
"(",
"'Invalid EC2 signature.'",
")",
")",
"scope",
"=",
"parts",
"[",
"2",
"]",
".",
"split",
"(",
"b'/'",
")",
"if",
"len",
"(",
"scope",
")",
"!=",
"4",
"or",
"scope",
"[",
"3",
"]",
"!=",
"b'aws4_request'",
":",
"raise",
"exception",
".",
"Unauthorized",
"(",
"message",
"=",
"_",
"(",
"'Invalid EC2 signature.'",
")",
")",
"allowed_services",
"=",
"[",
"b's3'",
",",
"b'iam'",
",",
"b'sts'",
"]",
"if",
"scope",
"[",
"2",
"]",
"not",
"in",
"allowed_services",
":",
"raise",
"exception",
".",
"Unauthorized",
"(",
"message",
"=",
"_",
"(",
"'Invalid EC2 signature.'",
")",
")",
"def",
"_sign",
"(",
"key",
",",
"msg",
")",
":",
"return",
"hmac",
".",
"new",
"(",
"key",
",",
"msg",
",",
"hashlib",
".",
"sha256",
")",
".",
"digest",
"(",
")",
"signed",
"=",
"_sign",
"(",
"(",
"'AWS4'",
"+",
"secret_key",
")",
".",
"encode",
"(",
"'utf-8'",
")",
",",
"scope",
"[",
"0",
"]",
")",
"signed",
"=",
"_sign",
"(",
"signed",
",",
"scope",
"[",
"1",
"]",
")",
"signed",
"=",
"_sign",
"(",
"signed",
",",
"scope",
"[",
"2",
"]",
")",
"signed",
"=",
"_sign",
"(",
"signed",
",",
"b'aws4_request'",
")",
"signature",
"=",
"hmac",
".",
"new",
"(",
"signed",
",",
"string_to_sign",
",",
"hashlib",
".",
"sha256",
")",
"return",
"signature",
".",
"hexdigest",
"(",
")"
] |
https://github.com/openstack/keystone/blob/771c943ad2116193e7bb118c74993c829d93bd71/keystone/api/s3tokens.py#L47-L74
|
|
IronLanguages/main
|
a949455434b1fda8c783289e897e78a9a0caabb5
|
External.LCA_RESTRICTED/Languages/CPython/27/Lib/multiprocessing/process.py
|
python
|
Process.is_alive
|
(self)
|
return self._popen.returncode is None
|
Return whether process is alive
|
Return whether process is alive
|
[
"Return",
"whether",
"process",
"is",
"alive"
] |
def is_alive(self):
'''
Return whether process is alive
'''
if self is _current_process:
return True
assert self._parent_pid == os.getpid(), 'can only test a child process'
if self._popen is None:
return False
self._popen.poll()
return self._popen.returncode is None
|
[
"def",
"is_alive",
"(",
"self",
")",
":",
"if",
"self",
"is",
"_current_process",
":",
"return",
"True",
"assert",
"self",
".",
"_parent_pid",
"==",
"os",
".",
"getpid",
"(",
")",
",",
"'can only test a child process'",
"if",
"self",
".",
"_popen",
"is",
"None",
":",
"return",
"False",
"self",
".",
"_popen",
".",
"poll",
"(",
")",
"return",
"self",
".",
"_popen",
".",
"returncode",
"is",
"None"
] |
https://github.com/IronLanguages/main/blob/a949455434b1fda8c783289e897e78a9a0caabb5/External.LCA_RESTRICTED/Languages/CPython/27/Lib/multiprocessing/process.py#L149-L159
|
|
securesystemslab/zippy
|
ff0e84ac99442c2c55fe1d285332cfd4e185e089
|
zippy/benchmarks/src/benchmarks/whoosh/src/whoosh/sorting.py
|
python
|
MultiFacet.add_score
|
(self)
|
return self
|
[] |
def add_score(self):
self.facets.append(ScoreFacet())
return self
|
[
"def",
"add_score",
"(",
"self",
")",
":",
"self",
".",
"facets",
".",
"append",
"(",
"ScoreFacet",
"(",
")",
")",
"return",
"self"
] |
https://github.com/securesystemslab/zippy/blob/ff0e84ac99442c2c55fe1d285332cfd4e185e089/zippy/benchmarks/src/benchmarks/whoosh/src/whoosh/sorting.py#L817-L819
|
|||
lonePatient/BERT-NER-Pytorch
|
bc9276185539c59bbd3d310ea808fea2b618fedf
|
tools/convert_albert_tf_checkpoint_to_pytorch.py
|
python
|
convert_tf_checkpoint_to_pytorch
|
(tf_checkpoint_path, bert_config_file, pytorch_dump_path)
|
[] |
def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, bert_config_file, pytorch_dump_path):
# Initialise PyTorch model
config = AlbertConfig.from_pretrained(bert_config_file)
# print("Building PyTorch model from configuration: {}".format(str(config)))
model = AlbertForPreTraining(config)
# Load weights from tf checkpoint
load_tf_weights_in_albert(model, config, tf_checkpoint_path)
# Save pytorch-model
print("Save PyTorch model to {}".format(pytorch_dump_path))
torch.save(model.state_dict(), pytorch_dump_path)
|
[
"def",
"convert_tf_checkpoint_to_pytorch",
"(",
"tf_checkpoint_path",
",",
"bert_config_file",
",",
"pytorch_dump_path",
")",
":",
"# Initialise PyTorch model",
"config",
"=",
"AlbertConfig",
".",
"from_pretrained",
"(",
"bert_config_file",
")",
"# print(\"Building PyTorch model from configuration: {}\".format(str(config)))",
"model",
"=",
"AlbertForPreTraining",
"(",
"config",
")",
"# Load weights from tf checkpoint",
"load_tf_weights_in_albert",
"(",
"model",
",",
"config",
",",
"tf_checkpoint_path",
")",
"# Save pytorch-model",
"print",
"(",
"\"Save PyTorch model to {}\"",
".",
"format",
"(",
"pytorch_dump_path",
")",
")",
"torch",
".",
"save",
"(",
"model",
".",
"state_dict",
"(",
")",
",",
"pytorch_dump_path",
")"
] |
https://github.com/lonePatient/BERT-NER-Pytorch/blob/bc9276185539c59bbd3d310ea808fea2b618fedf/tools/convert_albert_tf_checkpoint_to_pytorch.py#L14-L24
|
||||
django/django
|
0a17666045de6739ae1c2ac695041823d5f827f7
|
django/contrib/gis/gdal/geometries.py
|
python
|
OGRGeometry.intersection
|
(self, other)
|
return self._geomgen(capi.geom_intersection, other)
|
Return a new geometry consisting of the region of intersection of this
geometry and the other.
|
Return a new geometry consisting of the region of intersection of this
geometry and the other.
|
[
"Return",
"a",
"new",
"geometry",
"consisting",
"of",
"the",
"region",
"of",
"intersection",
"of",
"this",
"geometry",
"and",
"the",
"other",
"."
] |
def intersection(self, other):
"""
Return a new geometry consisting of the region of intersection of this
geometry and the other.
"""
return self._geomgen(capi.geom_intersection, other)
|
[
"def",
"intersection",
"(",
"self",
",",
"other",
")",
":",
"return",
"self",
".",
"_geomgen",
"(",
"capi",
".",
"geom_intersection",
",",
"other",
")"
] |
https://github.com/django/django/blob/0a17666045de6739ae1c2ac695041823d5f827f7/django/contrib/gis/gdal/geometries.py#L479-L484
|
|
n374/dmusic-plugin-NeteaseCloudMusic
|
503701ce6c2c4d94f1fcd40a158c7a0077861793
|
neteasecloudmusic/netease_music_player.py
|
python
|
MusicPlayer.save_lyric
|
(self, data, sid, name, artist)
|
return lrc_path
|
[] |
def save_lyric(self, data, sid, name, artist):
save_path = os.path.expanduser(config.get("lyrics", "save_lrc_path"))
if not os.path.exists(save_path):
utils.makedirs(save_path)
try:
lrc = data['lrc']['lyric']
except:
lrc = "[00:00.00] "+name+' - '+artist+"\n[99:59:99] No lyric found\n"
# deepin music 好像不支持tlyric, tlyric应该是英文歌词的翻译
# 最好能把英文和翻译合并起来
#try:
#tlyric = data['tlyric']['lyric']
#except:
#tlyric = None
#try:
#klyric = data['klyric']['lyric']
#except:
#klyric = None
#lrc_content = klyric or lrc or tlyric
lrc_content = lrc
lrc_path = os.path.join(save_path, str(sid)+'.lrc')
if not os.path.exists(lrc_path) and lrc_content:
with open(lrc_path, 'w') as f:
f.write(str(lrc_content))
return lrc_path
|
[
"def",
"save_lyric",
"(",
"self",
",",
"data",
",",
"sid",
",",
"name",
",",
"artist",
")",
":",
"save_path",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"config",
".",
"get",
"(",
"\"lyrics\"",
",",
"\"save_lrc_path\"",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"save_path",
")",
":",
"utils",
".",
"makedirs",
"(",
"save_path",
")",
"try",
":",
"lrc",
"=",
"data",
"[",
"'lrc'",
"]",
"[",
"'lyric'",
"]",
"except",
":",
"lrc",
"=",
"\"[00:00.00] \"",
"+",
"name",
"+",
"' - '",
"+",
"artist",
"+",
"\"\\n[99:59:99] No lyric found\\n\"",
"# deepin music 好像不支持tlyric, tlyric应该是英文歌词的翻译",
"# 最好能把英文和翻译合并起来",
"#try:",
"#tlyric = data['tlyric']['lyric']",
"#except:",
"#tlyric = None",
"#try:",
"#klyric = data['klyric']['lyric']",
"#except:",
"#klyric = None",
"#lrc_content = klyric or lrc or tlyric",
"lrc_content",
"=",
"lrc",
"lrc_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"save_path",
",",
"str",
"(",
"sid",
")",
"+",
"'.lrc'",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"lrc_path",
")",
"and",
"lrc_content",
":",
"with",
"open",
"(",
"lrc_path",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"str",
"(",
"lrc_content",
")",
")",
"return",
"lrc_path"
] |
https://github.com/n374/dmusic-plugin-NeteaseCloudMusic/blob/503701ce6c2c4d94f1fcd40a158c7a0077861793/neteasecloudmusic/netease_music_player.py#L42-L68
|
|||
jplusplus/detective.io
|
cbda4f1b81036e9e1e711af92632b478ead6bdd9
|
app/detective/utils.py
|
python
|
create_node_model
|
(name, fields=None, app_label='', module='', options=None)
|
return cls
|
Create specified model
|
Create specified model
|
[
"Create",
"specified",
"model"
] |
def create_node_model(name, fields=None, app_label='', module='', options=None):
"""
Create specified model
"""
from app.detective.models import update_topic_cache, delete_entity
from neo4django.db import models
from django.db.models.loading import AppCache
# Django use a cache by model
cache = AppCache()
# If we already create a model for this app
if app_label in cache.app_models and name.lower() in cache.app_models[app_label]:
# We just delete it quietly
del cache.app_models[app_label][name.lower()]
class Meta:
# Using type('Meta', ...) gives a dictproxy error during model creation
pass
if app_label:
# app_label must be set using the Meta inner class
setattr(Meta, 'app_label', app_label)
# Update Meta with any options that were provided
if options is not None:
for key, value in options.iteritems():
setattr(Meta, key, value)
# Set up a dictionary to simulate declarations within a class
attrs = {'__module__': module, 'Meta': Meta}
# Add in any fields that were provided
if fields: attrs.update(fields)
# Create the class, which automatically triggers ModelBase processing
cls = type(name, (FluidNodeModel,), attrs)
signals.post_save.connect(update_topic_cache, sender=cls)
signals.post_delete.connect(delete_entity, sender=cls)
return cls
|
[
"def",
"create_node_model",
"(",
"name",
",",
"fields",
"=",
"None",
",",
"app_label",
"=",
"''",
",",
"module",
"=",
"''",
",",
"options",
"=",
"None",
")",
":",
"from",
"app",
".",
"detective",
".",
"models",
"import",
"update_topic_cache",
",",
"delete_entity",
"from",
"neo4django",
".",
"db",
"import",
"models",
"from",
"django",
".",
"db",
".",
"models",
".",
"loading",
"import",
"AppCache",
"# Django use a cache by model",
"cache",
"=",
"AppCache",
"(",
")",
"# If we already create a model for this app",
"if",
"app_label",
"in",
"cache",
".",
"app_models",
"and",
"name",
".",
"lower",
"(",
")",
"in",
"cache",
".",
"app_models",
"[",
"app_label",
"]",
":",
"# We just delete it quietly",
"del",
"cache",
".",
"app_models",
"[",
"app_label",
"]",
"[",
"name",
".",
"lower",
"(",
")",
"]",
"class",
"Meta",
":",
"# Using type('Meta', ...) gives a dictproxy error during model creation",
"pass",
"if",
"app_label",
":",
"# app_label must be set using the Meta inner class",
"setattr",
"(",
"Meta",
",",
"'app_label'",
",",
"app_label",
")",
"# Update Meta with any options that were provided",
"if",
"options",
"is",
"not",
"None",
":",
"for",
"key",
",",
"value",
"in",
"options",
".",
"iteritems",
"(",
")",
":",
"setattr",
"(",
"Meta",
",",
"key",
",",
"value",
")",
"# Set up a dictionary to simulate declarations within a class",
"attrs",
"=",
"{",
"'__module__'",
":",
"module",
",",
"'Meta'",
":",
"Meta",
"}",
"# Add in any fields that were provided",
"if",
"fields",
":",
"attrs",
".",
"update",
"(",
"fields",
")",
"# Create the class, which automatically triggers ModelBase processing",
"cls",
"=",
"type",
"(",
"name",
",",
"(",
"FluidNodeModel",
",",
")",
",",
"attrs",
")",
"signals",
".",
"post_save",
".",
"connect",
"(",
"update_topic_cache",
",",
"sender",
"=",
"cls",
")",
"signals",
".",
"post_delete",
".",
"connect",
"(",
"delete_entity",
",",
"sender",
"=",
"cls",
")",
"return",
"cls"
] |
https://github.com/jplusplus/detective.io/blob/cbda4f1b81036e9e1e711af92632b478ead6bdd9/app/detective/utils.py#L54-L85
|
|
twilio/twilio-python
|
6e1e811ea57a1edfadd5161ace87397c563f6915
|
twilio/rest/trunking/v1/trunk/ip_access_control_list.py
|
python
|
IpAccessControlListContext.__repr__
|
(self)
|
return '<Twilio.Trunking.V1.IpAccessControlListContext {}>'.format(context)
|
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
|
Provide a friendly representation
|
[
"Provide",
"a",
"friendly",
"representation"
] |
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Trunking.V1.IpAccessControlListContext {}>'.format(context)
|
[
"def",
"__repr__",
"(",
"self",
")",
":",
"context",
"=",
"' '",
".",
"join",
"(",
"'{}={}'",
".",
"format",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"self",
".",
"_solution",
".",
"items",
"(",
")",
")",
"return",
"'<Twilio.Trunking.V1.IpAccessControlListContext {}>'",
".",
"format",
"(",
"context",
")"
] |
https://github.com/twilio/twilio-python/blob/6e1e811ea57a1edfadd5161ace87397c563f6915/twilio/rest/trunking/v1/trunk/ip_access_control_list.py#L242-L250
|
|
openhatch/oh-mainline
|
ce29352a034e1223141dcc2f317030bbc3359a51
|
vendor/packages/whoosh/src/whoosh/analysis.py
|
python
|
Token.__init__
|
(self, positions=False, chars=False, removestops=True, mode='',
**kwargs)
|
:param positions: Whether tokens should have the token position in the
'pos' attribute.
:param chars: Whether tokens should have character offsets in the
'startchar' and 'endchar' attributes.
:param removestops: whether to remove stop words from the stream (if
the tokens pass through a stop filter).
:param mode: contains a string describing the purpose for which the
analyzer is being called, i.e. 'index' or 'query'.
|
:param positions: Whether tokens should have the token position in the
'pos' attribute.
:param chars: Whether tokens should have character offsets in the
'startchar' and 'endchar' attributes.
:param removestops: whether to remove stop words from the stream (if
the tokens pass through a stop filter).
:param mode: contains a string describing the purpose for which the
analyzer is being called, i.e. 'index' or 'query'.
|
[
":",
"param",
"positions",
":",
"Whether",
"tokens",
"should",
"have",
"the",
"token",
"position",
"in",
"the",
"pos",
"attribute",
".",
":",
"param",
"chars",
":",
"Whether",
"tokens",
"should",
"have",
"character",
"offsets",
"in",
"the",
"startchar",
"and",
"endchar",
"attributes",
".",
":",
"param",
"removestops",
":",
"whether",
"to",
"remove",
"stop",
"words",
"from",
"the",
"stream",
"(",
"if",
"the",
"tokens",
"pass",
"through",
"a",
"stop",
"filter",
")",
".",
":",
"param",
"mode",
":",
"contains",
"a",
"string",
"describing",
"the",
"purpose",
"for",
"which",
"the",
"analyzer",
"is",
"being",
"called",
"i",
".",
"e",
".",
"index",
"or",
"query",
"."
] |
def __init__(self, positions=False, chars=False, removestops=True, mode='',
**kwargs):
"""
:param positions: Whether tokens should have the token position in the
'pos' attribute.
:param chars: Whether tokens should have character offsets in the
'startchar' and 'endchar' attributes.
:param removestops: whether to remove stop words from the stream (if
the tokens pass through a stop filter).
:param mode: contains a string describing the purpose for which the
analyzer is being called, i.e. 'index' or 'query'.
"""
self.positions = positions
self.chars = chars
self.stopped = False
self.boost = 1.0
self.removestops = removestops
self.mode = mode
self.__dict__.update(kwargs)
|
[
"def",
"__init__",
"(",
"self",
",",
"positions",
"=",
"False",
",",
"chars",
"=",
"False",
",",
"removestops",
"=",
"True",
",",
"mode",
"=",
"''",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"positions",
"=",
"positions",
"self",
".",
"chars",
"=",
"chars",
"self",
".",
"stopped",
"=",
"False",
"self",
".",
"boost",
"=",
"1.0",
"self",
".",
"removestops",
"=",
"removestops",
"self",
".",
"mode",
"=",
"mode",
"self",
".",
"__dict__",
".",
"update",
"(",
"kwargs",
")"
] |
https://github.com/openhatch/oh-mainline/blob/ce29352a034e1223141dcc2f317030bbc3359a51/vendor/packages/whoosh/src/whoosh/analysis.py#L168-L187
|
||
google/grr
|
8ad8a4d2c5a93c92729206b7771af19d92d4f915
|
grr/server/grr_response_server/databases/db.py
|
python
|
Database.UpdateHuntOutputPluginState
|
(self, hunt_id, state_index, update_fn)
|
Updates hunt output plugin state for a given output plugin.
Args:
hunt_id: Id of the hunt to be updated.
state_index: Index of a state in ReadHuntOutputPluginsStates-returned
list.
update_fn: A function accepting a (descriptor, state) arguments, where
descriptor is OutputPluginDescriptor and state is an AttributedDict. The
function is expected to return a modified state (it's ok to modify it
in-place).
Returns:
An updated AttributedDict object corresponding to an update plugin state
(result of the update_fn function call).
Raises:
UnknownHuntError: if a hunt with a given hunt id does not exit.
UnknownHuntOutputPluginStateError: if a state with a given index does
not exist.
|
Updates hunt output plugin state for a given output plugin.
|
[
"Updates",
"hunt",
"output",
"plugin",
"state",
"for",
"a",
"given",
"output",
"plugin",
"."
] |
def UpdateHuntOutputPluginState(self, hunt_id, state_index, update_fn):
"""Updates hunt output plugin state for a given output plugin.
Args:
hunt_id: Id of the hunt to be updated.
state_index: Index of a state in ReadHuntOutputPluginsStates-returned
list.
update_fn: A function accepting a (descriptor, state) arguments, where
descriptor is OutputPluginDescriptor and state is an AttributedDict. The
function is expected to return a modified state (it's ok to modify it
in-place).
Returns:
An updated AttributedDict object corresponding to an update plugin state
(result of the update_fn function call).
Raises:
UnknownHuntError: if a hunt with a given hunt id does not exit.
UnknownHuntOutputPluginStateError: if a state with a given index does
not exist.
"""
|
[
"def",
"UpdateHuntOutputPluginState",
"(",
"self",
",",
"hunt_id",
",",
"state_index",
",",
"update_fn",
")",
":"
] |
https://github.com/google/grr/blob/8ad8a4d2c5a93c92729206b7771af19d92d4f915/grr/server/grr_response_server/databases/db.py#L2531-L2551
|
||
Trusted-AI/adversarial-robustness-toolbox
|
9fabffdbb92947efa1ecc5d825d634d30dfbaf29
|
art/estimators/certification/abstain.py
|
python
|
AbstainPredictorMixin.__init__
|
(self, **kwargs)
|
Creates a predictor that can abstain from predictions
|
Creates a predictor that can abstain from predictions
|
[
"Creates",
"a",
"predictor",
"that",
"can",
"abstain",
"from",
"predictions"
] |
def __init__(self, **kwargs):
"""
Creates a predictor that can abstain from predictions
"""
super().__init__(**kwargs)
|
[
"def",
"__init__",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
"*",
"*",
"kwargs",
")"
] |
https://github.com/Trusted-AI/adversarial-robustness-toolbox/blob/9fabffdbb92947efa1ecc5d825d634d30dfbaf29/art/estimators/certification/abstain.py#L38-L43
|
||
sahana/eden
|
1696fa50e90ce967df69f66b571af45356cc18da
|
modules/s3db/pr.py
|
python
|
pr_get_role_paths
|
(pe_id, roles=None, role_types=None)
|
return role_paths
|
Get the ancestor paths of the ancestor OUs this person entity
is affiliated with, sorted by roles.
Used by:
gis.set_config()
Args:
pe_id: the person entity ID
roles: list of roles to limit the search
role_types: list of role types to limit the search
Returns:
Storage() of S3MultiPaths with the role names as keys
Note:
role_types is ignored if roles gets specified
|
Get the ancestor paths of the ancestor OUs this person entity
is affiliated with, sorted by roles.
|
[
"Get",
"the",
"ancestor",
"paths",
"of",
"the",
"ancestor",
"OUs",
"this",
"person",
"entity",
"is",
"affiliated",
"with",
"sorted",
"by",
"roles",
"."
] |
def pr_get_role_paths(pe_id, roles=None, role_types=None):
"""
Get the ancestor paths of the ancestor OUs this person entity
is affiliated with, sorted by roles.
Used by:
gis.set_config()
Args:
pe_id: the person entity ID
roles: list of roles to limit the search
role_types: list of role types to limit the search
Returns:
Storage() of S3MultiPaths with the role names as keys
Note:
role_types is ignored if roles gets specified
"""
s3db = current.s3db
atable = s3db.pr_affiliation
rtable = s3db.pr_role
query = (atable.deleted != True) & \
(atable.role_id == rtable.id) & \
(atable.pe_id == pe_id) & \
(rtable.deleted != True)
if roles is not None:
# Limit the lookup to these roles
if not isinstance(roles, (list, tuple)):
roles = [roles]
query &= (rtable.role.belongs(roles))
elif role_types is not None:
# Limit the lookup to these types of roles
if not isinstance(role_types, (list, tuple)):
role_types = [role_types]
query &= (rtable.role_type.belongs(role_types))
rows = current.db(query).select(rtable.role,
rtable.path,
rtable.pe_id,
)
role_paths = Storage()
for role in rows:
name = role.role
if name in role_paths:
multipath = role_paths[name]
multipath.append([role.pe_id])
else:
multipath = S3MultiPath([role.pe_id])
path = pr_get_path(role.pe_id)
multipath.extend(role.pe_id, path, cut=pe_id)
role_paths[name] = multipath.clean()
return role_paths
|
[
"def",
"pr_get_role_paths",
"(",
"pe_id",
",",
"roles",
"=",
"None",
",",
"role_types",
"=",
"None",
")",
":",
"s3db",
"=",
"current",
".",
"s3db",
"atable",
"=",
"s3db",
".",
"pr_affiliation",
"rtable",
"=",
"s3db",
".",
"pr_role",
"query",
"=",
"(",
"atable",
".",
"deleted",
"!=",
"True",
")",
"&",
"(",
"atable",
".",
"role_id",
"==",
"rtable",
".",
"id",
")",
"&",
"(",
"atable",
".",
"pe_id",
"==",
"pe_id",
")",
"&",
"(",
"rtable",
".",
"deleted",
"!=",
"True",
")",
"if",
"roles",
"is",
"not",
"None",
":",
"# Limit the lookup to these roles",
"if",
"not",
"isinstance",
"(",
"roles",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"roles",
"=",
"[",
"roles",
"]",
"query",
"&=",
"(",
"rtable",
".",
"role",
".",
"belongs",
"(",
"roles",
")",
")",
"elif",
"role_types",
"is",
"not",
"None",
":",
"# Limit the lookup to these types of roles",
"if",
"not",
"isinstance",
"(",
"role_types",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"role_types",
"=",
"[",
"role_types",
"]",
"query",
"&=",
"(",
"rtable",
".",
"role_type",
".",
"belongs",
"(",
"role_types",
")",
")",
"rows",
"=",
"current",
".",
"db",
"(",
"query",
")",
".",
"select",
"(",
"rtable",
".",
"role",
",",
"rtable",
".",
"path",
",",
"rtable",
".",
"pe_id",
",",
")",
"role_paths",
"=",
"Storage",
"(",
")",
"for",
"role",
"in",
"rows",
":",
"name",
"=",
"role",
".",
"role",
"if",
"name",
"in",
"role_paths",
":",
"multipath",
"=",
"role_paths",
"[",
"name",
"]",
"multipath",
".",
"append",
"(",
"[",
"role",
".",
"pe_id",
"]",
")",
"else",
":",
"multipath",
"=",
"S3MultiPath",
"(",
"[",
"role",
".",
"pe_id",
"]",
")",
"path",
"=",
"pr_get_path",
"(",
"role",
".",
"pe_id",
")",
"multipath",
".",
"extend",
"(",
"role",
".",
"pe_id",
",",
"path",
",",
"cut",
"=",
"pe_id",
")",
"role_paths",
"[",
"name",
"]",
"=",
"multipath",
".",
"clean",
"(",
")",
"return",
"role_paths"
] |
https://github.com/sahana/eden/blob/1696fa50e90ce967df69f66b571af45356cc18da/modules/s3db/pr.py#L9728-L9784
|
|
isnowfy/pydown
|
71ecc891868cd2a34b7e5fe662c99474f2d0fd7f
|
pygments/token.py
|
python
|
is_token_subtype
|
(ttype, other)
|
return ttype in other
|
Return True if ``ttype`` is a subtype of ``other``.
exists for backwards compatibility. use ``ttype in other`` now.
|
Return True if ``ttype`` is a subtype of ``other``.
|
[
"Return",
"True",
"if",
"ttype",
"is",
"a",
"subtype",
"of",
"other",
"."
] |
def is_token_subtype(ttype, other):
"""
Return True if ``ttype`` is a subtype of ``other``.
exists for backwards compatibility. use ``ttype in other`` now.
"""
return ttype in other
|
[
"def",
"is_token_subtype",
"(",
"ttype",
",",
"other",
")",
":",
"return",
"ttype",
"in",
"other"
] |
https://github.com/isnowfy/pydown/blob/71ecc891868cd2a34b7e5fe662c99474f2d0fd7f/pygments/token.py#L76-L82
|
|
caiiiac/Machine-Learning-with-Python
|
1a26c4467da41ca4ebc3d5bd789ea942ef79422f
|
MachineLearning/venv/lib/python3.5/site-packages/pandas/core/dtypes/dtypes.py
|
python
|
ExtensionDtype.__str__
|
(self)
|
return self.__bytes__()
|
Return a string representation for a particular Object
Invoked by str(df) in both py2/py3.
Yields Bytestring in Py2, Unicode String in py3.
|
Return a string representation for a particular Object
|
[
"Return",
"a",
"string",
"representation",
"for",
"a",
"particular",
"Object"
] |
def __str__(self):
"""
Return a string representation for a particular Object
Invoked by str(df) in both py2/py3.
Yields Bytestring in Py2, Unicode String in py3.
"""
if compat.PY3:
return self.__unicode__()
return self.__bytes__()
|
[
"def",
"__str__",
"(",
"self",
")",
":",
"if",
"compat",
".",
"PY3",
":",
"return",
"self",
".",
"__unicode__",
"(",
")",
"return",
"self",
".",
"__bytes__",
"(",
")"
] |
https://github.com/caiiiac/Machine-Learning-with-Python/blob/1a26c4467da41ca4ebc3d5bd789ea942ef79422f/MachineLearning/venv/lib/python3.5/site-packages/pandas/core/dtypes/dtypes.py#L32-L42
|
|
Jrohy/multi-v2ray
|
935547d0f39680885267558dfae68b228adeb28f
|
v2ray_util/util_core/writer.py
|
python
|
GroupWriter.__init__
|
(self, group_tag, group_index)
|
[] |
def __init__(self, group_tag, group_index):
super(GroupWriter, self).__init__(group_tag, group_index)
|
[
"def",
"__init__",
"(",
"self",
",",
"group_tag",
",",
"group_index",
")",
":",
"super",
"(",
"GroupWriter",
",",
"self",
")",
".",
"__init__",
"(",
"group_tag",
",",
"group_index",
")"
] |
https://github.com/Jrohy/multi-v2ray/blob/935547d0f39680885267558dfae68b228adeb28f/v2ray_util/util_core/writer.py#L340-L341
|
||||
jbjorne/TEES
|
caf19a4a1352ac59f5dc13a8684cc42ce4342d9d
|
ExampleBuilders/FeatureBuilders/GiulianoFeatureBuilder.py
|
python
|
GiulianoFeatureBuilder.addToPattern
|
(self, pattern, tokenText, prevTokenText, prevToken2Text)
|
[] |
def addToPattern(self, pattern, tokenText, prevTokenText, prevToken2Text):
if not pattern.has_key(tokenText):
pattern[tokenText] = 0
pattern[tokenText] += 1
# Should the n-grams be unordered?
if prevTokenText != None:
ngram1 = prevTokenText + "_" + tokenText
if not pattern.has_key(ngram1):
pattern[ngram1] = 0
pattern[ngram1] += 1
if prevToken2Text != None:
ngram2 = prevToken2Text + "_" + ngram1
if not pattern.has_key(ngram2):
pattern[ngram2] = 0
pattern[ngram2] += 1
|
[
"def",
"addToPattern",
"(",
"self",
",",
"pattern",
",",
"tokenText",
",",
"prevTokenText",
",",
"prevToken2Text",
")",
":",
"if",
"not",
"pattern",
".",
"has_key",
"(",
"tokenText",
")",
":",
"pattern",
"[",
"tokenText",
"]",
"=",
"0",
"pattern",
"[",
"tokenText",
"]",
"+=",
"1",
"# Should the n-grams be unordered?",
"if",
"prevTokenText",
"!=",
"None",
":",
"ngram1",
"=",
"prevTokenText",
"+",
"\"_\"",
"+",
"tokenText",
"if",
"not",
"pattern",
".",
"has_key",
"(",
"ngram1",
")",
":",
"pattern",
"[",
"ngram1",
"]",
"=",
"0",
"pattern",
"[",
"ngram1",
"]",
"+=",
"1",
"if",
"prevToken2Text",
"!=",
"None",
":",
"ngram2",
"=",
"prevToken2Text",
"+",
"\"_\"",
"+",
"ngram1",
"if",
"not",
"pattern",
".",
"has_key",
"(",
"ngram2",
")",
":",
"pattern",
"[",
"ngram2",
"]",
"=",
"0",
"pattern",
"[",
"ngram2",
"]",
"+=",
"1"
] |
https://github.com/jbjorne/TEES/blob/caf19a4a1352ac59f5dc13a8684cc42ce4342d9d/ExampleBuilders/FeatureBuilders/GiulianoFeatureBuilder.py#L140-L156
|
||||
rytilahti/python-miio
|
b6e53dd16fac77915426e7592e2528b78ef65190
|
miio/huizuo.py
|
python
|
HuizuoStatus.brightness
|
(self)
|
return self.data["brightness"]
|
Return current brightness.
|
Return current brightness.
|
[
"Return",
"current",
"brightness",
"."
] |
def brightness(self) -> int:
"""Return current brightness."""
return self.data["brightness"]
|
[
"def",
"brightness",
"(",
"self",
")",
"->",
"int",
":",
"return",
"self",
".",
"data",
"[",
"\"brightness\"",
"]"
] |
https://github.com/rytilahti/python-miio/blob/b6e53dd16fac77915426e7592e2528b78ef65190/miio/huizuo.py#L130-L132
|
|
tp4a/teleport
|
1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad
|
server/www/packages/packages-darwin/x64/mako/template.py
|
python
|
_compile_module_file
|
(template, text, filename, outputpath, module_writer)
|
[] |
def _compile_module_file(template, text, filename, outputpath, module_writer):
source, lexer = _compile(template, text, filename,
generate_magic_comment=True)
if isinstance(source, compat.text_type):
source = source.encode(lexer.encoding or 'ascii')
if module_writer:
module_writer(source, outputpath)
else:
# make tempfiles in the same location as the ultimate
# location. this ensures they're on the same filesystem,
# avoiding synchronization issues.
(dest, name) = tempfile.mkstemp(dir=os.path.dirname(outputpath))
os.write(dest, source)
os.close(dest)
shutil.move(name, outputpath)
|
[
"def",
"_compile_module_file",
"(",
"template",
",",
"text",
",",
"filename",
",",
"outputpath",
",",
"module_writer",
")",
":",
"source",
",",
"lexer",
"=",
"_compile",
"(",
"template",
",",
"text",
",",
"filename",
",",
"generate_magic_comment",
"=",
"True",
")",
"if",
"isinstance",
"(",
"source",
",",
"compat",
".",
"text_type",
")",
":",
"source",
"=",
"source",
".",
"encode",
"(",
"lexer",
".",
"encoding",
"or",
"'ascii'",
")",
"if",
"module_writer",
":",
"module_writer",
"(",
"source",
",",
"outputpath",
")",
"else",
":",
"# make tempfiles in the same location as the ultimate",
"# location. this ensures they're on the same filesystem,",
"# avoiding synchronization issues.",
"(",
"dest",
",",
"name",
")",
"=",
"tempfile",
".",
"mkstemp",
"(",
"dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"outputpath",
")",
")",
"os",
".",
"write",
"(",
"dest",
",",
"source",
")",
"os",
".",
"close",
"(",
"dest",
")",
"shutil",
".",
"move",
"(",
"name",
",",
"outputpath",
")"
] |
https://github.com/tp4a/teleport/blob/1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad/server/www/packages/packages-darwin/x64/mako/template.py#L718-L735
|
||||
f-dangel/backpack
|
1da7e53ebb2c490e2b7dd9f79116583641f3cca1
|
backpack/context.py
|
python
|
CTX.set_extension_hook
|
(extension_hook: Callable[[Module], None])
|
Set the current extension hook.
Args:
extension_hook: the extension hook to run after all other extensions
|
Set the current extension hook.
|
[
"Set",
"the",
"current",
"extension",
"hook",
"."
] |
def set_extension_hook(extension_hook: Callable[[Module], None]) -> None:
"""Set the current extension hook.
Args:
extension_hook: the extension hook to run after all other extensions
"""
CTX.extension_hook = extension_hook
|
[
"def",
"set_extension_hook",
"(",
"extension_hook",
":",
"Callable",
"[",
"[",
"Module",
"]",
",",
"None",
"]",
")",
"->",
"None",
":",
"CTX",
".",
"extension_hook",
"=",
"extension_hook"
] |
https://github.com/f-dangel/backpack/blob/1da7e53ebb2c490e2b7dd9f79116583641f3cca1/backpack/context.py#L94-L100
|
||
prody/ProDy
|
b24bbf58aa8fffe463c8548ae50e3955910e5b7f
|
prody/apps/prody_apps/prody_fetch.py
|
python
|
prody_fetch
|
(*pdb, **kwargs)
|
Fetch PDB files from PDB FTP server.
:arg pdbs: PDB identifier(s) or filename(s)
:arg dir: target directory for saving PDB file(s), default is ``'.'``
:arg gzip: gzip fetched files or not, default is **True**
|
Fetch PDB files from PDB FTP server.
|
[
"Fetch",
"PDB",
"files",
"from",
"PDB",
"FTP",
"server",
"."
] |
def prody_fetch(*pdb, **kwargs):
"""Fetch PDB files from PDB FTP server.
:arg pdbs: PDB identifier(s) or filename(s)
:arg dir: target directory for saving PDB file(s), default is ``'.'``
:arg gzip: gzip fetched files or not, default is **True**"""
import prody
pdblist = pdb
if len(pdblist) == 1 and os.path.isfile(pdblist[0]):
from prody.utilities import openFile
with openFile(pdblist[0]) as inp:
for item in inp.read().strip().split():
for pdb in item.split(','):
if len(pdb) == 4 and pdb.isalnum():
pdblist.append(pdb)
prody.fetchPDB(*pdblist, folder=kwargs.get('folder', '.'),
compressed=kwargs.get('gzip', False),
copy=True)
|
[
"def",
"prody_fetch",
"(",
"*",
"pdb",
",",
"*",
"*",
"kwargs",
")",
":",
"import",
"prody",
"pdblist",
"=",
"pdb",
"if",
"len",
"(",
"pdblist",
")",
"==",
"1",
"and",
"os",
".",
"path",
".",
"isfile",
"(",
"pdblist",
"[",
"0",
"]",
")",
":",
"from",
"prody",
".",
"utilities",
"import",
"openFile",
"with",
"openFile",
"(",
"pdblist",
"[",
"0",
"]",
")",
"as",
"inp",
":",
"for",
"item",
"in",
"inp",
".",
"read",
"(",
")",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
":",
"for",
"pdb",
"in",
"item",
".",
"split",
"(",
"','",
")",
":",
"if",
"len",
"(",
"pdb",
")",
"==",
"4",
"and",
"pdb",
".",
"isalnum",
"(",
")",
":",
"pdblist",
".",
"append",
"(",
"pdb",
")",
"prody",
".",
"fetchPDB",
"(",
"*",
"pdblist",
",",
"folder",
"=",
"kwargs",
".",
"get",
"(",
"'folder'",
",",
"'.'",
")",
",",
"compressed",
"=",
"kwargs",
".",
"get",
"(",
"'gzip'",
",",
"False",
")",
",",
"copy",
"=",
"True",
")"
] |
https://github.com/prody/ProDy/blob/b24bbf58aa8fffe463c8548ae50e3955910e5b7f/prody/apps/prody_apps/prody_fetch.py#L9-L32
|
||
Source-Python-Dev-Team/Source.Python
|
d0ffd8ccbd1e9923c9bc44936f20613c1c76b7fb
|
addons/source-python/packages/site-packages/sqlalchemy/sql/compiler.py
|
python
|
IdentifierPreparer.format_column
|
(self, column, use_table=False,
name=None, table_name=None)
|
Prepare a quoted column name.
|
Prepare a quoted column name.
|
[
"Prepare",
"a",
"quoted",
"column",
"name",
"."
] |
def format_column(self, column, use_table=False,
name=None, table_name=None):
"""Prepare a quoted column name."""
if name is None:
name = column.name
if not getattr(column, 'is_literal', False):
if use_table:
return self.format_table(
column.table, use_schema=False,
name=table_name) + "." + self.quote(name)
else:
return self.quote(name)
else:
# literal textual elements get stuck into ColumnClause a lot,
# which shouldn't get quoted
if use_table:
return self.format_table(
column.table, use_schema=False,
name=table_name) + '.' + name
else:
return name
|
[
"def",
"format_column",
"(",
"self",
",",
"column",
",",
"use_table",
"=",
"False",
",",
"name",
"=",
"None",
",",
"table_name",
"=",
"None",
")",
":",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"column",
".",
"name",
"if",
"not",
"getattr",
"(",
"column",
",",
"'is_literal'",
",",
"False",
")",
":",
"if",
"use_table",
":",
"return",
"self",
".",
"format_table",
"(",
"column",
".",
"table",
",",
"use_schema",
"=",
"False",
",",
"name",
"=",
"table_name",
")",
"+",
"\".\"",
"+",
"self",
".",
"quote",
"(",
"name",
")",
"else",
":",
"return",
"self",
".",
"quote",
"(",
"name",
")",
"else",
":",
"# literal textual elements get stuck into ColumnClause a lot,",
"# which shouldn't get quoted",
"if",
"use_table",
":",
"return",
"self",
".",
"format_table",
"(",
"column",
".",
"table",
",",
"use_schema",
"=",
"False",
",",
"name",
"=",
"table_name",
")",
"+",
"'.'",
"+",
"name",
"else",
":",
"return",
"name"
] |
https://github.com/Source-Python-Dev-Team/Source.Python/blob/d0ffd8ccbd1e9923c9bc44936f20613c1c76b7fb/addons/source-python/packages/site-packages/sqlalchemy/sql/compiler.py#L2762-L2784
|
||
morganstanley/treadmill
|
f18267c665baf6def4374d21170198f63ff1cde4
|
lib/python/treadmill/zkdatacache.py
|
python
|
ZkDataCache.cached
|
(self)
|
return self._cached
|
Dictionary of name to list of ZkCachedEntry.
|
Dictionary of name to list of ZkCachedEntry.
|
[
"Dictionary",
"of",
"name",
"to",
"list",
"of",
"ZkCachedEntry",
"."
] |
def cached(self):
"""Dictionary of name to list of ZkCachedEntry.
"""
return self._cached
|
[
"def",
"cached",
"(",
"self",
")",
":",
"return",
"self",
".",
"_cached"
] |
https://github.com/morganstanley/treadmill/blob/f18267c665baf6def4374d21170198f63ff1cde4/lib/python/treadmill/zkdatacache.py#L75-L78
|
|
TengXiaoDai/DistributedCrawling
|
f5c2439e6ce68dd9b49bde084d76473ff9ed4963
|
Lib/ntpath.py
|
python
|
normpath
|
(path)
|
return prefix + sep.join(comps)
|
Normalize path, eliminating double slashes, etc.
|
Normalize path, eliminating double slashes, etc.
|
[
"Normalize",
"path",
"eliminating",
"double",
"slashes",
"etc",
"."
] |
def normpath(path):
"""Normalize path, eliminating double slashes, etc."""
if isinstance(path, bytes):
sep = b'\\'
altsep = b'/'
curdir = b'.'
pardir = b'..'
special_prefixes = (b'\\\\.\\', b'\\\\?\\')
else:
sep = '\\'
altsep = '/'
curdir = '.'
pardir = '..'
special_prefixes = ('\\\\.\\', '\\\\?\\')
if path.startswith(special_prefixes):
# in the case of paths with these prefixes:
# \\.\ -> device names
# \\?\ -> literal paths
# do not do any normalization, but return the path unchanged
return path
path = path.replace(altsep, sep)
prefix, path = splitdrive(path)
# collapse initial backslashes
if path.startswith(sep):
prefix += sep
path = path.lstrip(sep)
comps = path.split(sep)
i = 0
while i < len(comps):
if not comps[i] or comps[i] == curdir:
del comps[i]
elif comps[i] == pardir:
if i > 0 and comps[i-1] != pardir:
del comps[i-1:i+1]
i -= 1
elif i == 0 and prefix.endswith(sep):
del comps[i]
else:
i += 1
else:
i += 1
# If the path is now empty, substitute '.'
if not prefix and not comps:
comps.append(curdir)
return prefix + sep.join(comps)
|
[
"def",
"normpath",
"(",
"path",
")",
":",
"if",
"isinstance",
"(",
"path",
",",
"bytes",
")",
":",
"sep",
"=",
"b'\\\\'",
"altsep",
"=",
"b'/'",
"curdir",
"=",
"b'.'",
"pardir",
"=",
"b'..'",
"special_prefixes",
"=",
"(",
"b'\\\\\\\\.\\\\'",
",",
"b'\\\\\\\\?\\\\'",
")",
"else",
":",
"sep",
"=",
"'\\\\'",
"altsep",
"=",
"'/'",
"curdir",
"=",
"'.'",
"pardir",
"=",
"'..'",
"special_prefixes",
"=",
"(",
"'\\\\\\\\.\\\\'",
",",
"'\\\\\\\\?\\\\'",
")",
"if",
"path",
".",
"startswith",
"(",
"special_prefixes",
")",
":",
"# in the case of paths with these prefixes:",
"# \\\\.\\ -> device names",
"# \\\\?\\ -> literal paths",
"# do not do any normalization, but return the path unchanged",
"return",
"path",
"path",
"=",
"path",
".",
"replace",
"(",
"altsep",
",",
"sep",
")",
"prefix",
",",
"path",
"=",
"splitdrive",
"(",
"path",
")",
"# collapse initial backslashes",
"if",
"path",
".",
"startswith",
"(",
"sep",
")",
":",
"prefix",
"+=",
"sep",
"path",
"=",
"path",
".",
"lstrip",
"(",
"sep",
")",
"comps",
"=",
"path",
".",
"split",
"(",
"sep",
")",
"i",
"=",
"0",
"while",
"i",
"<",
"len",
"(",
"comps",
")",
":",
"if",
"not",
"comps",
"[",
"i",
"]",
"or",
"comps",
"[",
"i",
"]",
"==",
"curdir",
":",
"del",
"comps",
"[",
"i",
"]",
"elif",
"comps",
"[",
"i",
"]",
"==",
"pardir",
":",
"if",
"i",
">",
"0",
"and",
"comps",
"[",
"i",
"-",
"1",
"]",
"!=",
"pardir",
":",
"del",
"comps",
"[",
"i",
"-",
"1",
":",
"i",
"+",
"1",
"]",
"i",
"-=",
"1",
"elif",
"i",
"==",
"0",
"and",
"prefix",
".",
"endswith",
"(",
"sep",
")",
":",
"del",
"comps",
"[",
"i",
"]",
"else",
":",
"i",
"+=",
"1",
"else",
":",
"i",
"+=",
"1",
"# If the path is now empty, substitute '.'",
"if",
"not",
"prefix",
"and",
"not",
"comps",
":",
"comps",
".",
"append",
"(",
"curdir",
")",
"return",
"prefix",
"+",
"sep",
".",
"join",
"(",
"comps",
")"
] |
https://github.com/TengXiaoDai/DistributedCrawling/blob/f5c2439e6ce68dd9b49bde084d76473ff9ed4963/Lib/ntpath.py#L465-L511
|
|
openstack/cinder
|
23494a6d6c51451688191e1847a458f1d3cdcaa5
|
cinder/brick/local_dev/lvm.py
|
python
|
LVM.supports_thin_provisioning
|
(root_helper)
|
return LVM.get_lvm_version(root_helper) >= (2, 2, 95)
|
Static method to check for thin LVM support on a system.
:param root_helper: root_helper to use for execute
:returns: True if supported, False otherwise
|
Static method to check for thin LVM support on a system.
|
[
"Static",
"method",
"to",
"check",
"for",
"thin",
"LVM",
"support",
"on",
"a",
"system",
"."
] |
def supports_thin_provisioning(root_helper):
"""Static method to check for thin LVM support on a system.
:param root_helper: root_helper to use for execute
:returns: True if supported, False otherwise
"""
return LVM.get_lvm_version(root_helper) >= (2, 2, 95)
|
[
"def",
"supports_thin_provisioning",
"(",
"root_helper",
")",
":",
"return",
"LVM",
".",
"get_lvm_version",
"(",
"root_helper",
")",
">=",
"(",
"2",
",",
"2",
",",
"95",
")"
] |
https://github.com/openstack/cinder/blob/23494a6d6c51451688191e1847a458f1d3cdcaa5/cinder/brick/local_dev/lvm.py#L243-L251
|
|
tav/pylibs
|
3c16b843681f54130ee6a022275289cadb2f2a69
|
mapreduce/input_readers.py
|
python
|
BlobstoreLineInputReader._extract_record
|
(self, newline_neg1_offset)
|
return record
|
Returns the string containing the current record.
The current record's boundaries are defined by
_current_record_start_0_offset inclusive and newline_neg1_offset exclusive.
|
Returns the string containing the current record.
|
[
"Returns",
"the",
"string",
"containing",
"the",
"current",
"record",
"."
] |
def _extract_record(self, newline_neg1_offset):
"""Returns the string containing the current record.
The current record's boundaries are defined by
_current_record_start_0_offset inclusive and newline_neg1_offset exclusive.
"""
if len(self._data) == 1:
record = self._data[0][
self._current_record_start_0_offset:newline_neg1_offset]
elif len(self._data) > 1:
remaining_blocks = self._data[1:-1]
remaining_blocks.append(self._data[-1][:newline_neg1_offset])
record = "".join([self._data[0][self._current_record_start_0_offset:]] +
remaining_blocks)
return record
|
[
"def",
"_extract_record",
"(",
"self",
",",
"newline_neg1_offset",
")",
":",
"if",
"len",
"(",
"self",
".",
"_data",
")",
"==",
"1",
":",
"record",
"=",
"self",
".",
"_data",
"[",
"0",
"]",
"[",
"self",
".",
"_current_record_start_0_offset",
":",
"newline_neg1_offset",
"]",
"elif",
"len",
"(",
"self",
".",
"_data",
")",
">",
"1",
":",
"remaining_blocks",
"=",
"self",
".",
"_data",
"[",
"1",
":",
"-",
"1",
"]",
"remaining_blocks",
".",
"append",
"(",
"self",
".",
"_data",
"[",
"-",
"1",
"]",
"[",
":",
"newline_neg1_offset",
"]",
")",
"record",
"=",
"\"\"",
".",
"join",
"(",
"[",
"self",
".",
"_data",
"[",
"0",
"]",
"[",
"self",
".",
"_current_record_start_0_offset",
":",
"]",
"]",
"+",
"remaining_blocks",
")",
"return",
"record"
] |
https://github.com/tav/pylibs/blob/3c16b843681f54130ee6a022275289cadb2f2a69/mapreduce/input_readers.py#L348-L362
|
|
timothyb0912/pylogit
|
cffc9c523b5368966ef2481c7dc30f0a5d296de8
|
src/pylogit/bootstrap_abc.py
|
python
|
ensure_rows_to_obs_validity
|
(rows_to_obs)
|
return None
|
Ensure that `rows_to_obs` is None or a 2D scipy sparse CSR matrix. Raises a
helpful ValueError if otherwise.
|
Ensure that `rows_to_obs` is None or a 2D scipy sparse CSR matrix. Raises a
helpful ValueError if otherwise.
|
[
"Ensure",
"that",
"rows_to_obs",
"is",
"None",
"or",
"a",
"2D",
"scipy",
"sparse",
"CSR",
"matrix",
".",
"Raises",
"a",
"helpful",
"ValueError",
"if",
"otherwise",
"."
] |
def ensure_rows_to_obs_validity(rows_to_obs):
"""
Ensure that `rows_to_obs` is None or a 2D scipy sparse CSR matrix. Raises a
helpful ValueError if otherwise.
"""
if rows_to_obs is not None and not isspmatrix_csr(rows_to_obs):
msg = "rows_to_obs MUST be a 2D scipy sparse row matrix."
raise ValueError(msg)
return None
|
[
"def",
"ensure_rows_to_obs_validity",
"(",
"rows_to_obs",
")",
":",
"if",
"rows_to_obs",
"is",
"not",
"None",
"and",
"not",
"isspmatrix_csr",
"(",
"rows_to_obs",
")",
":",
"msg",
"=",
"\"rows_to_obs MUST be a 2D scipy sparse row matrix.\"",
"raise",
"ValueError",
"(",
"msg",
")",
"return",
"None"
] |
https://github.com/timothyb0912/pylogit/blob/cffc9c523b5368966ef2481c7dc30f0a5d296de8/src/pylogit/bootstrap_abc.py#L43-L51
|
|
brightmart/nlu_sim
|
c99b2edad338f3771fb7153898fce84f225cfea1
|
a1_dual_bilstm_cnn_model.py
|
python
|
DualBilstmCnnModel.self_attention
|
(self,sequences_original,scope,reuse_flag=False)
|
return weighted_sum
|
self attention apply to sequences, get a final features
:param sequences: [batch_size,sequence_length,dimension]
:return: [batch_size,dimension]
|
self attention apply to sequences, get a final features
:param sequences: [batch_size,sequence_length,dimension]
:return: [batch_size,dimension]
|
[
"self",
"attention",
"apply",
"to",
"sequences",
"get",
"a",
"final",
"features",
":",
"param",
"sequences",
":",
"[",
"batch_size",
"sequence_length",
"dimension",
"]",
":",
"return",
":",
"[",
"batch_size",
"dimension",
"]"
] |
def self_attention(self,sequences_original,scope,reuse_flag=False) :
"""
self attention apply to sequences, get a final features
:param sequences: [batch_size,sequence_length,dimension]
:return: [batch_size,dimension]
"""
dimension=sequences_original.get_shape().as_list()[-1]
#0. use one-layer feed forward to transform orginal sequences.
sequences=tf.layers.dense(sequences_original,dimension,activation=tf.nn.tanh,use_bias=True)
#1.get weights sequences:[batch_size,sequence_length,dimension]; attention_weight=[dimension]=>after attention, we should get [batch_size,dimension]
with tf.variable_scope("self_attention_"+str(scope), reuse=reuse_flag):
attention_vector=tf.get_variable("attention_vector", [dimension],initializer=self.initializer)
weights=tf.reduce_sum(tf.multiply(sequences,attention_vector),axis=-1) #[batch_size,sequence_length]
#2.get score by normalize each weight
score=tf.nn.softmax(weights,axis=-1) #[batch_size, sequence_length]
#3.get weighted sum. sequences=[batch_size,sequence_length,dimension];score=[batch_size, sequence_length]. after operation, we need to get:[ batch_size,dimension]
score=tf.expand_dims(score,axis=-1) #[batch_size,sequence_length,1]
weighted_sum=tf.multiply(sequences_original,score) # [batch_size,sequence_length,dimension]
weighted_sum=tf.reduce_sum(weighted_sum,axis=1) #[batch_size,dimension]
return weighted_sum
|
[
"def",
"self_attention",
"(",
"self",
",",
"sequences_original",
",",
"scope",
",",
"reuse_flag",
"=",
"False",
")",
":",
"dimension",
"=",
"sequences_original",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"[",
"-",
"1",
"]",
"#0. use one-layer feed forward to transform orginal sequences.",
"sequences",
"=",
"tf",
".",
"layers",
".",
"dense",
"(",
"sequences_original",
",",
"dimension",
",",
"activation",
"=",
"tf",
".",
"nn",
".",
"tanh",
",",
"use_bias",
"=",
"True",
")",
"#1.get weights sequences:[batch_size,sequence_length,dimension]; attention_weight=[dimension]=>after attention, we should get [batch_size,dimension]",
"with",
"tf",
".",
"variable_scope",
"(",
"\"self_attention_\"",
"+",
"str",
"(",
"scope",
")",
",",
"reuse",
"=",
"reuse_flag",
")",
":",
"attention_vector",
"=",
"tf",
".",
"get_variable",
"(",
"\"attention_vector\"",
",",
"[",
"dimension",
"]",
",",
"initializer",
"=",
"self",
".",
"initializer",
")",
"weights",
"=",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"multiply",
"(",
"sequences",
",",
"attention_vector",
")",
",",
"axis",
"=",
"-",
"1",
")",
"#[batch_size,sequence_length]",
"#2.get score by normalize each weight",
"score",
"=",
"tf",
".",
"nn",
".",
"softmax",
"(",
"weights",
",",
"axis",
"=",
"-",
"1",
")",
"#[batch_size, sequence_length]",
"#3.get weighted sum. sequences=[batch_size,sequence_length,dimension];score=[batch_size, sequence_length]. after operation, we need to get:[ batch_size,dimension]",
"score",
"=",
"tf",
".",
"expand_dims",
"(",
"score",
",",
"axis",
"=",
"-",
"1",
")",
"#[batch_size,sequence_length,1]",
"weighted_sum",
"=",
"tf",
".",
"multiply",
"(",
"sequences_original",
",",
"score",
")",
"# [batch_size,sequence_length,dimension]",
"weighted_sum",
"=",
"tf",
".",
"reduce_sum",
"(",
"weighted_sum",
",",
"axis",
"=",
"1",
")",
"#[batch_size,dimension]",
"return",
"weighted_sum"
] |
https://github.com/brightmart/nlu_sim/blob/c99b2edad338f3771fb7153898fce84f225cfea1/a1_dual_bilstm_cnn_model.py#L255-L277
|
|
aptonic/dropzone4-actions
|
936ab89868ba8c79094a3577c2055fe376bfc488
|
YouTube Downloader.dzbundle/youtube-dl/youtube_dl/utils.py
|
python
|
smuggle_url
|
(url, data)
|
return url + '#' + sdata
|
Pass additional data in a URL for internal use.
|
Pass additional data in a URL for internal use.
|
[
"Pass",
"additional",
"data",
"in",
"a",
"URL",
"for",
"internal",
"use",
"."
] |
def smuggle_url(url, data):
""" Pass additional data in a URL for internal use. """
url, idata = unsmuggle_url(url, {})
data.update(idata)
sdata = compat_urllib_parse_urlencode(
{'__youtubedl_smuggle': json.dumps(data)})
return url + '#' + sdata
|
[
"def",
"smuggle_url",
"(",
"url",
",",
"data",
")",
":",
"url",
",",
"idata",
"=",
"unsmuggle_url",
"(",
"url",
",",
"{",
"}",
")",
"data",
".",
"update",
"(",
"idata",
")",
"sdata",
"=",
"compat_urllib_parse_urlencode",
"(",
"{",
"'__youtubedl_smuggle'",
":",
"json",
".",
"dumps",
"(",
"data",
")",
"}",
")",
"return",
"url",
"+",
"'#'",
"+",
"sdata"
] |
https://github.com/aptonic/dropzone4-actions/blob/936ab89868ba8c79094a3577c2055fe376bfc488/YouTube Downloader.dzbundle/youtube-dl/youtube_dl/utils.py#L3372-L3379
|
|
sfepy/sfepy
|
02ec7bb2ab39ee1dfe1eb4cd509f0ffb7dcc8b25
|
sfepy/discrete/dg/fields.py
|
python
|
DGField.get_region_info
|
(region)
|
return dim, n_cell, n_el_facets
|
Extracts information about region needed in various methods of DGField
Parameters
----------
region : sfepy.discrete.common.region.Region
Returns
-------
dim, n_cell, n_el_facets
|
Extracts information about region needed in various methods of DGField
|
[
"Extracts",
"information",
"about",
"region",
"needed",
"in",
"various",
"methods",
"of",
"DGField"
] |
def get_region_info(region):
"""
Extracts information about region needed in various methods of DGField
Parameters
----------
region : sfepy.discrete.common.region.Region
Returns
-------
dim, n_cell, n_el_facets
"""
if not region.has_cells():
raise ValueError("Region {} has no cells".format(region.name))
n_cell = region.get_n_cells()
dim = region.tdim
gel = get_gel(region)
n_el_facets = dim + 1 if gel.is_simplex else 2 ** dim
return dim, n_cell, n_el_facets
|
[
"def",
"get_region_info",
"(",
"region",
")",
":",
"if",
"not",
"region",
".",
"has_cells",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"Region {} has no cells\"",
".",
"format",
"(",
"region",
".",
"name",
")",
")",
"n_cell",
"=",
"region",
".",
"get_n_cells",
"(",
")",
"dim",
"=",
"region",
".",
"tdim",
"gel",
"=",
"get_gel",
"(",
"region",
")",
"n_el_facets",
"=",
"dim",
"+",
"1",
"if",
"gel",
".",
"is_simplex",
"else",
"2",
"**",
"dim",
"return",
"dim",
",",
"n_cell",
",",
"n_el_facets"
] |
https://github.com/sfepy/sfepy/blob/02ec7bb2ab39ee1dfe1eb4cd509f0ffb7dcc8b25/sfepy/discrete/dg/fields.py#L661-L680
|
|
the4thdoctor/pg_chameleon
|
9d80212541559c8d0a42b3e7c1b2c67bb7606411
|
pg_chameleon/lib/mysql_lib.py
|
python
|
mysql_source.get_table_list
|
(self)
|
The method pulls the table list from the information_schema.
The list is stored in a dictionary which key is the table's schema.
|
The method pulls the table list from the information_schema.
The list is stored in a dictionary which key is the table's schema.
|
[
"The",
"method",
"pulls",
"the",
"table",
"list",
"from",
"the",
"information_schema",
".",
"The",
"list",
"is",
"stored",
"in",
"a",
"dictionary",
"which",
"key",
"is",
"the",
"table",
"s",
"schema",
"."
] |
def get_table_list(self):
"""
The method pulls the table list from the information_schema.
The list is stored in a dictionary which key is the table's schema.
"""
sql_tables="""
SELECT
table_name as table_name
FROM
information_schema.TABLES
WHERE
table_type='BASE TABLE'
AND table_schema=%s
;
"""
for schema in self.schema_list:
self.cursor_buffered.execute(sql_tables, (schema))
table_list = [table["table_name"] for table in self.cursor_buffered.fetchall()]
try:
limit_tables = self.limit_tables[schema]
if len(limit_tables) > 0:
table_list = [table for table in table_list if table in limit_tables]
except KeyError:
pass
try:
skip_tables = self.skip_tables[schema]
if len(skip_tables) > 0:
table_list = [table for table in table_list if table not in skip_tables]
except KeyError:
pass
self.schema_tables[schema] = table_list
|
[
"def",
"get_table_list",
"(",
"self",
")",
":",
"sql_tables",
"=",
"\"\"\"\n SELECT\n table_name as table_name\n FROM\n information_schema.TABLES\n WHERE\n table_type='BASE TABLE'\n AND table_schema=%s\n ;\n \"\"\"",
"for",
"schema",
"in",
"self",
".",
"schema_list",
":",
"self",
".",
"cursor_buffered",
".",
"execute",
"(",
"sql_tables",
",",
"(",
"schema",
")",
")",
"table_list",
"=",
"[",
"table",
"[",
"\"table_name\"",
"]",
"for",
"table",
"in",
"self",
".",
"cursor_buffered",
".",
"fetchall",
"(",
")",
"]",
"try",
":",
"limit_tables",
"=",
"self",
".",
"limit_tables",
"[",
"schema",
"]",
"if",
"len",
"(",
"limit_tables",
")",
">",
"0",
":",
"table_list",
"=",
"[",
"table",
"for",
"table",
"in",
"table_list",
"if",
"table",
"in",
"limit_tables",
"]",
"except",
"KeyError",
":",
"pass",
"try",
":",
"skip_tables",
"=",
"self",
".",
"skip_tables",
"[",
"schema",
"]",
"if",
"len",
"(",
"skip_tables",
")",
">",
"0",
":",
"table_list",
"=",
"[",
"table",
"for",
"table",
"in",
"table_list",
"if",
"table",
"not",
"in",
"skip_tables",
"]",
"except",
"KeyError",
":",
"pass",
"self",
".",
"schema_tables",
"[",
"schema",
"]",
"=",
"table_list"
] |
https://github.com/the4thdoctor/pg_chameleon/blob/9d80212541559c8d0a42b3e7c1b2c67bb7606411/pg_chameleon/lib/mysql_lib.py#L246-L277
|
||
nadineproject/nadine
|
c41c8ef7ffe18f1853029c97eecc329039b4af6c
|
doors/core.py
|
python
|
DoorController.add_cardholder
|
(self, cardholder)
|
Add the given cardholder to the door.
|
Add the given cardholder to the door.
|
[
"Add",
"the",
"given",
"cardholder",
"to",
"the",
"door",
"."
] |
def add_cardholder(self, cardholder):
"""Add the given cardholder to the door."""
|
[
"def",
"add_cardholder",
"(",
"self",
",",
"cardholder",
")",
":"
] |
https://github.com/nadineproject/nadine/blob/c41c8ef7ffe18f1853029c97eecc329039b4af6c/doors/core.py#L305-L306
|
||
TheAlgorithms/Python
|
9af2eef9b3761bf51580dedfb6fa7136ca0c5c2c
|
blockchain/diophantine_equation.py
|
python
|
diophantine
|
(a: int, b: int, c: int)
|
return (r * x, r * y)
|
Diophantine Equation : Given integers a,b,c ( at least one of a and b != 0), the
diophantine equation a*x + b*y = c has a solution (where x and y are integers)
iff gcd(a,b) divides c.
GCD ( Greatest Common Divisor ) or HCF ( Highest Common Factor )
>>> diophantine(10,6,14)
(-7.0, 14.0)
>>> diophantine(391,299,-69)
(9.0, -12.0)
But above equation has one more solution i.e., x = -4, y = 5.
That's why we need diophantine all solution function.
|
Diophantine Equation : Given integers a,b,c ( at least one of a and b != 0), the
diophantine equation a*x + b*y = c has a solution (where x and y are integers)
iff gcd(a,b) divides c.
|
[
"Diophantine",
"Equation",
":",
"Given",
"integers",
"a",
"b",
"c",
"(",
"at",
"least",
"one",
"of",
"a",
"and",
"b",
"!",
"=",
"0",
")",
"the",
"diophantine",
"equation",
"a",
"*",
"x",
"+",
"b",
"*",
"y",
"=",
"c",
"has",
"a",
"solution",
"(",
"where",
"x",
"and",
"y",
"are",
"integers",
")",
"iff",
"gcd",
"(",
"a",
"b",
")",
"divides",
"c",
"."
] |
def diophantine(a: int, b: int, c: int) -> tuple[float, float]:
"""
Diophantine Equation : Given integers a,b,c ( at least one of a and b != 0), the
diophantine equation a*x + b*y = c has a solution (where x and y are integers)
iff gcd(a,b) divides c.
GCD ( Greatest Common Divisor ) or HCF ( Highest Common Factor )
>>> diophantine(10,6,14)
(-7.0, 14.0)
>>> diophantine(391,299,-69)
(9.0, -12.0)
But above equation has one more solution i.e., x = -4, y = 5.
That's why we need diophantine all solution function.
"""
assert (
c % greatest_common_divisor(a, b) == 0
) # greatest_common_divisor(a,b) function implemented below
(d, x, y) = extended_gcd(a, b) # extended_gcd(a,b) function implemented below
r = c / d
return (r * x, r * y)
|
[
"def",
"diophantine",
"(",
"a",
":",
"int",
",",
"b",
":",
"int",
",",
"c",
":",
"int",
")",
"->",
"tuple",
"[",
"float",
",",
"float",
"]",
":",
"assert",
"(",
"c",
"%",
"greatest_common_divisor",
"(",
"a",
",",
"b",
")",
"==",
"0",
")",
"# greatest_common_divisor(a,b) function implemented below",
"(",
"d",
",",
"x",
",",
"y",
")",
"=",
"extended_gcd",
"(",
"a",
",",
"b",
")",
"# extended_gcd(a,b) function implemented below",
"r",
"=",
"c",
"/",
"d",
"return",
"(",
"r",
"*",
"x",
",",
"r",
"*",
"y",
")"
] |
https://github.com/TheAlgorithms/Python/blob/9af2eef9b3761bf51580dedfb6fa7136ca0c5c2c/blockchain/diophantine_equation.py#L4-L28
|
|
networkx/networkx
|
1620568e36702b1cfeaf1c0277b167b6cb93e48d
|
networkx/generators/small.py
|
python
|
frucht_graph
|
(create_using=None)
|
return G
|
Returns the Frucht Graph.
The Frucht Graph is the smallest cubical graph whose
automorphism group consists only of the identity element.
|
Returns the Frucht Graph.
|
[
"Returns",
"the",
"Frucht",
"Graph",
"."
] |
def frucht_graph(create_using=None):
"""Returns the Frucht Graph.
The Frucht Graph is the smallest cubical graph whose
automorphism group consists only of the identity element.
"""
G = cycle_graph(7, create_using)
G.add_edges_from(
[
[0, 7],
[1, 7],
[2, 8],
[3, 9],
[4, 9],
[5, 10],
[6, 10],
[7, 11],
[8, 11],
[8, 9],
[10, 11],
]
)
G.name = "Frucht Graph"
return G
|
[
"def",
"frucht_graph",
"(",
"create_using",
"=",
"None",
")",
":",
"G",
"=",
"cycle_graph",
"(",
"7",
",",
"create_using",
")",
"G",
".",
"add_edges_from",
"(",
"[",
"[",
"0",
",",
"7",
"]",
",",
"[",
"1",
",",
"7",
"]",
",",
"[",
"2",
",",
"8",
"]",
",",
"[",
"3",
",",
"9",
"]",
",",
"[",
"4",
",",
"9",
"]",
",",
"[",
"5",
",",
"10",
"]",
",",
"[",
"6",
",",
"10",
"]",
",",
"[",
"7",
",",
"11",
"]",
",",
"[",
"8",
",",
"11",
"]",
",",
"[",
"8",
",",
"9",
"]",
",",
"[",
"10",
",",
"11",
"]",
",",
"]",
")",
"G",
".",
"name",
"=",
"\"Frucht Graph\"",
"return",
"G"
] |
https://github.com/networkx/networkx/blob/1620568e36702b1cfeaf1c0277b167b6cb93e48d/networkx/generators/small.py#L267-L292
|
|
1040003585/WebScrapingWithPython
|
a770fa5b03894076c8c9539b1ffff34424ffc016
|
portia_examle/lib/python2.7/site-packages/pip/_vendor/pyparsing.py
|
python
|
Keyword.setDefaultKeywordChars
|
( chars )
|
Overrides the default Keyword chars
|
Overrides the default Keyword chars
|
[
"Overrides",
"the",
"default",
"Keyword",
"chars"
] |
def setDefaultKeywordChars( chars ):
"""Overrides the default Keyword chars
"""
Keyword.DEFAULT_KEYWORD_CHARS = chars
|
[
"def",
"setDefaultKeywordChars",
"(",
"chars",
")",
":",
"Keyword",
".",
"DEFAULT_KEYWORD_CHARS",
"=",
"chars"
] |
https://github.com/1040003585/WebScrapingWithPython/blob/a770fa5b03894076c8c9539b1ffff34424ffc016/portia_examle/lib/python2.7/site-packages/pip/_vendor/pyparsing.py#L2458-L2461
|
||
google/ci_edit
|
ffaa52473673cc7ec2080bc59996d61414d662c9
|
app/actions.py
|
python
|
Actions.get_bookmark_color
|
(self)
|
return goodColorIndices[self.nextBookmarkColorPos]
|
Returns a new color by cycling through a predefined section of the
color palette.
Args:
None.
Returns:
A color (int) for a new bookmark.
|
Returns a new color by cycling through a predefined section of the
color palette.
|
[
"Returns",
"a",
"new",
"color",
"by",
"cycling",
"through",
"a",
"predefined",
"section",
"of",
"the",
"color",
"palette",
"."
] |
def get_bookmark_color(self):
"""Returns a new color by cycling through a predefined section of the
color palette.
Args:
None.
Returns:
A color (int) for a new bookmark.
"""
if self.program.prefs.startup[u"numColors"] == 8:
goodColorIndices = [1, 2, 3, 4, 5]
else:
goodColorIndices = [97, 98, 113, 117, 127]
self.nextBookmarkColorPos = (self.nextBookmarkColorPos + 1) % len(
goodColorIndices
)
return goodColorIndices[self.nextBookmarkColorPos]
|
[
"def",
"get_bookmark_color",
"(",
"self",
")",
":",
"if",
"self",
".",
"program",
".",
"prefs",
".",
"startup",
"[",
"u\"numColors\"",
"]",
"==",
"8",
":",
"goodColorIndices",
"=",
"[",
"1",
",",
"2",
",",
"3",
",",
"4",
",",
"5",
"]",
"else",
":",
"goodColorIndices",
"=",
"[",
"97",
",",
"98",
",",
"113",
",",
"117",
",",
"127",
"]",
"self",
".",
"nextBookmarkColorPos",
"=",
"(",
"self",
".",
"nextBookmarkColorPos",
"+",
"1",
")",
"%",
"len",
"(",
"goodColorIndices",
")",
"return",
"goodColorIndices",
"[",
"self",
".",
"nextBookmarkColorPos",
"]"
] |
https://github.com/google/ci_edit/blob/ffaa52473673cc7ec2080bc59996d61414d662c9/app/actions.py#L186-L203
|
|
buke/GreenOdoo
|
3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df
|
runtime/python/lib/python2.7/site-packages/Mako-0.7.3-py2.7.egg/mako/util.py
|
python
|
SetLikeDict.union
|
(self, other)
|
return x
|
produce a 'union' of this dict and another (at the key level).
values in the second dict take precedence over that of the first
|
produce a 'union' of this dict and another (at the key level).
|
[
"produce",
"a",
"union",
"of",
"this",
"dict",
"and",
"another",
"(",
"at",
"the",
"key",
"level",
")",
"."
] |
def union(self, other):
"""produce a 'union' of this dict and another (at the key level).
values in the second dict take precedence over that of the first"""
x = SetLikeDict(**self)
x.update(other)
return x
|
[
"def",
"union",
"(",
"self",
",",
"other",
")",
":",
"x",
"=",
"SetLikeDict",
"(",
"*",
"*",
"self",
")",
"x",
".",
"update",
"(",
"other",
")",
"return",
"x"
] |
https://github.com/buke/GreenOdoo/blob/3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df/runtime/python/lib/python2.7/site-packages/Mako-0.7.3-py2.7.egg/mako/util.py#L173-L179
|
|
numba/numba
|
bf480b9e0da858a65508c2b17759a72ee6a44c51
|
numba/core/caching.py
|
python
|
_CacheLocator.get_source_stamp
|
(self)
|
Get a timestamp representing the source code's freshness.
Can return any picklable Python object.
|
Get a timestamp representing the source code's freshness.
Can return any picklable Python object.
|
[
"Get",
"a",
"timestamp",
"representing",
"the",
"source",
"code",
"s",
"freshness",
".",
"Can",
"return",
"any",
"picklable",
"Python",
"object",
"."
] |
def get_source_stamp(self):
"""
Get a timestamp representing the source code's freshness.
Can return any picklable Python object.
"""
|
[
"def",
"get_source_stamp",
"(",
"self",
")",
":"
] |
https://github.com/numba/numba/blob/bf480b9e0da858a65508c2b17759a72ee6a44c51/numba/core/caching.py#L129-L133
|
||
apple/ccs-calendarserver
|
13c706b985fb728b9aab42dc0fef85aae21921c3
|
twistedcaldav/ical.py
|
python
|
Property.removeParameter
|
(self, paramname)
|
[] |
def removeParameter(self, paramname):
self._pycalendar.removeParameters(paramname)
self._markAsDirty()
|
[
"def",
"removeParameter",
"(",
"self",
",",
"paramname",
")",
":",
"self",
".",
"_pycalendar",
".",
"removeParameters",
"(",
"paramname",
")",
"self",
".",
"_markAsDirty",
"(",
")"
] |
https://github.com/apple/ccs-calendarserver/blob/13c706b985fb728b9aab42dc0fef85aae21921c3/twistedcaldav/ical.py#L310-L312
|
||||
tobegit3hub/deep_image_model
|
8a53edecd9e00678b278bb10f6fb4bdb1e4ee25e
|
java_predict_client/src/main/proto/tensorflow/contrib/learn/python/learn/estimators/linear.py
|
python
|
LinearRegressor.fit
|
(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
monitors=None, max_steps=None)
|
return result
|
See trainable.Trainable.
|
See trainable.Trainable.
|
[
"See",
"trainable",
".",
"Trainable",
"."
] |
def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
monitors=None, max_steps=None):
"""See trainable.Trainable."""
# TODO(roumposg): Remove when deprecated monitors are removed.
if monitors is None:
monitors = []
deprecated_monitors = [
m for m in monitors
if not isinstance(m, session_run_hook.SessionRunHook)
]
for monitor in deprecated_monitors:
monitor.set_estimator(self)
monitor._lock_estimator() # pylint: disable=protected-access
if self._additional_run_hook:
monitors.append(self._additional_run_hook)
result = self._estimator.fit(x=x, y=y, input_fn=input_fn, steps=steps,
batch_size=batch_size, monitors=monitors,
max_steps=max_steps)
for monitor in deprecated_monitors:
monitor._unlock_estimator() # pylint: disable=protected-access
return result
|
[
"def",
"fit",
"(",
"self",
",",
"x",
"=",
"None",
",",
"y",
"=",
"None",
",",
"input_fn",
"=",
"None",
",",
"steps",
"=",
"None",
",",
"batch_size",
"=",
"None",
",",
"monitors",
"=",
"None",
",",
"max_steps",
"=",
"None",
")",
":",
"# TODO(roumposg): Remove when deprecated monitors are removed.",
"if",
"monitors",
"is",
"None",
":",
"monitors",
"=",
"[",
"]",
"deprecated_monitors",
"=",
"[",
"m",
"for",
"m",
"in",
"monitors",
"if",
"not",
"isinstance",
"(",
"m",
",",
"session_run_hook",
".",
"SessionRunHook",
")",
"]",
"for",
"monitor",
"in",
"deprecated_monitors",
":",
"monitor",
".",
"set_estimator",
"(",
"self",
")",
"monitor",
".",
"_lock_estimator",
"(",
")",
"# pylint: disable=protected-access",
"if",
"self",
".",
"_additional_run_hook",
":",
"monitors",
".",
"append",
"(",
"self",
".",
"_additional_run_hook",
")",
"result",
"=",
"self",
".",
"_estimator",
".",
"fit",
"(",
"x",
"=",
"x",
",",
"y",
"=",
"y",
",",
"input_fn",
"=",
"input_fn",
",",
"steps",
"=",
"steps",
",",
"batch_size",
"=",
"batch_size",
",",
"monitors",
"=",
"monitors",
",",
"max_steps",
"=",
"max_steps",
")",
"for",
"monitor",
"in",
"deprecated_monitors",
":",
"monitor",
".",
"_unlock_estimator",
"(",
")",
"# pylint: disable=protected-access",
"return",
"result"
] |
https://github.com/tobegit3hub/deep_image_model/blob/8a53edecd9e00678b278bb10f6fb4bdb1e4ee25e/java_predict_client/src/main/proto/tensorflow/contrib/learn/python/learn/estimators/linear.py#L697-L720
|
|
cloudera/impyla
|
0c736af4cad2bade9b8e313badc08ec50e81c948
|
impala/_thrift_gen/hive_metastore/ThriftHiveMetastore.py
|
python
|
Iface.create_type
|
(self, type)
|
Parameters:
- type
|
Parameters:
- type
|
[
"Parameters",
":",
"-",
"type"
] |
def create_type(self, type):
"""
Parameters:
- type
"""
pass
|
[
"def",
"create_type",
"(",
"self",
",",
"type",
")",
":",
"pass"
] |
https://github.com/cloudera/impyla/blob/0c736af4cad2bade9b8e313badc08ec50e81c948/impala/_thrift_gen/hive_metastore/ThriftHiveMetastore.py#L88-L93
|
||
volatilityfoundation/volatility3
|
168b0d0b053ab97a7cb096ef2048795cc54d885f
|
volatility3/framework/symbols/windows/pdbutil.py
|
python
|
PDBUtility.symbol_table_from_pdb
|
(cls, context: interfaces.context.ContextInterface, config_path: str, layer_name: str,
pdb_name: str, module_offset: int, module_size: int)
|
return cls.load_windows_symbol_table(context,
guid["GUID"],
guid["age"],
guid["pdb_name"],
"volatility3.framework.symbols.intermed.IntermediateSymbolTable",
config_path = config_path)
|
Creates symbol table for a module in the specified layer_name.
Searches the memory section of the loaded module for its PDB GUID
and loads the associated symbol table into the symbol space.
Args:
context: The context to retrieve required elements (layers, symbol tables) from
config_path: The config path where to find symbol files
layer_name: The name of the layer on which to operate
module_offset: This memory dump's module image offset
module_size: The size of the module for this dump
Returns:
The name of the constructed and loaded symbol table
|
Creates symbol table for a module in the specified layer_name.
|
[
"Creates",
"symbol",
"table",
"for",
"a",
"module",
"in",
"the",
"specified",
"layer_name",
"."
] |
def symbol_table_from_pdb(cls, context: interfaces.context.ContextInterface, config_path: str, layer_name: str,
pdb_name: str, module_offset: int, module_size: int) -> str:
"""Creates symbol table for a module in the specified layer_name.
Searches the memory section of the loaded module for its PDB GUID
and loads the associated symbol table into the symbol space.
Args:
context: The context to retrieve required elements (layers, symbol tables) from
config_path: The config path where to find symbol files
layer_name: The name of the layer on which to operate
module_offset: This memory dump's module image offset
module_size: The size of the module for this dump
Returns:
The name of the constructed and loaded symbol table
"""
guids = list(
cls.pdbname_scan(context,
layer_name,
context.layers[layer_name].page_size, [bytes(pdb_name, 'latin-1')],
start = module_offset,
end = module_offset + module_size))
if not guids:
raise exceptions.VolatilityException(
f"Did not find GUID of {pdb_name} in module @ 0x{module_offset:x}!")
guid = guids[0]
vollog.debug(f"Found {guid['pdb_name']}: {guid['GUID']}-{guid['age']}")
return cls.load_windows_symbol_table(context,
guid["GUID"],
guid["age"],
guid["pdb_name"],
"volatility3.framework.symbols.intermed.IntermediateSymbolTable",
config_path = config_path)
|
[
"def",
"symbol_table_from_pdb",
"(",
"cls",
",",
"context",
":",
"interfaces",
".",
"context",
".",
"ContextInterface",
",",
"config_path",
":",
"str",
",",
"layer_name",
":",
"str",
",",
"pdb_name",
":",
"str",
",",
"module_offset",
":",
"int",
",",
"module_size",
":",
"int",
")",
"->",
"str",
":",
"guids",
"=",
"list",
"(",
"cls",
".",
"pdbname_scan",
"(",
"context",
",",
"layer_name",
",",
"context",
".",
"layers",
"[",
"layer_name",
"]",
".",
"page_size",
",",
"[",
"bytes",
"(",
"pdb_name",
",",
"'latin-1'",
")",
"]",
",",
"start",
"=",
"module_offset",
",",
"end",
"=",
"module_offset",
"+",
"module_size",
")",
")",
"if",
"not",
"guids",
":",
"raise",
"exceptions",
".",
"VolatilityException",
"(",
"f\"Did not find GUID of {pdb_name} in module @ 0x{module_offset:x}!\"",
")",
"guid",
"=",
"guids",
"[",
"0",
"]",
"vollog",
".",
"debug",
"(",
"f\"Found {guid['pdb_name']}: {guid['GUID']}-{guid['age']}\"",
")",
"return",
"cls",
".",
"load_windows_symbol_table",
"(",
"context",
",",
"guid",
"[",
"\"GUID\"",
"]",
",",
"guid",
"[",
"\"age\"",
"]",
",",
"guid",
"[",
"\"pdb_name\"",
"]",
",",
"\"volatility3.framework.symbols.intermed.IntermediateSymbolTable\"",
",",
"config_path",
"=",
"config_path",
")"
] |
https://github.com/volatilityfoundation/volatility3/blob/168b0d0b053ab97a7cb096ef2048795cc54d885f/volatility3/framework/symbols/windows/pdbutil.py#L287-L325
|
|
qutip/qutip
|
52d01da181a21b810c3407812c670f35fdc647e8
|
qutip/qip/circuit.py
|
python
|
QubitCircuit._to_qasm
|
(self, qasm_out)
|
Pipe output of circuit object to QasmOutput object.
Parameters
----------
qasm_out: QasmOutput
object to store QASM output.
|
Pipe output of circuit object to QasmOutput object.
|
[
"Pipe",
"output",
"of",
"circuit",
"object",
"to",
"QasmOutput",
"object",
"."
] |
def _to_qasm(self, qasm_out):
"""
Pipe output of circuit object to QasmOutput object.
Parameters
----------
qasm_out: QasmOutput
object to store QASM output.
"""
qasm_out.output("qreg q[{}];".format(self.N))
if self.num_cbits:
qasm_out.output("creg c[{}];".format(self.num_cbits))
qasm_out.output(n=1)
for op in self.gates:
if ((not isinstance(op, Measurement))
and not qasm_out.is_defined(op.name)):
qasm_out._qasm_defns(op)
for op in self.gates:
op._to_qasm(qasm_out)
|
[
"def",
"_to_qasm",
"(",
"self",
",",
"qasm_out",
")",
":",
"qasm_out",
".",
"output",
"(",
"\"qreg q[{}];\"",
".",
"format",
"(",
"self",
".",
"N",
")",
")",
"if",
"self",
".",
"num_cbits",
":",
"qasm_out",
".",
"output",
"(",
"\"creg c[{}];\"",
".",
"format",
"(",
"self",
".",
"num_cbits",
")",
")",
"qasm_out",
".",
"output",
"(",
"n",
"=",
"1",
")",
"for",
"op",
"in",
"self",
".",
"gates",
":",
"if",
"(",
"(",
"not",
"isinstance",
"(",
"op",
",",
"Measurement",
")",
")",
"and",
"not",
"qasm_out",
".",
"is_defined",
"(",
"op",
".",
"name",
")",
")",
":",
"qasm_out",
".",
"_qasm_defns",
"(",
"op",
")",
"for",
"op",
"in",
"self",
".",
"gates",
":",
"op",
".",
"_to_qasm",
"(",
"qasm_out",
")"
] |
https://github.com/qutip/qutip/blob/52d01da181a21b810c3407812c670f35fdc647e8/qutip/qip/circuit.py#L1795-L1816
|
||
pyg-team/pytorch_geometric
|
b920e9a3a64e22c8356be55301c88444ff051cae
|
torch_geometric/nn/conv/gravnet_conv.py
|
python
|
GravNetConv.__repr__
|
(self)
|
return (f'{self.__class__.__name__}({self.in_channels}, '
f'{self.out_channels}, k={self.k})')
|
[] |
def __repr__(self) -> str:
return (f'{self.__class__.__name__}({self.in_channels}, '
f'{self.out_channels}, k={self.k})')
|
[
"def",
"__repr__",
"(",
"self",
")",
"->",
"str",
":",
"return",
"(",
"f'{self.__class__.__name__}({self.in_channels}, '",
"f'{self.out_channels}, k={self.k})'",
")"
] |
https://github.com/pyg-team/pytorch_geometric/blob/b920e9a3a64e22c8356be55301c88444ff051cae/torch_geometric/nn/conv/gravnet_conv.py#L130-L132
|
|||
GoogleCloudPlatform/gsutil
|
5be882803e76608e2fd29cf8c504ccd1fe0a7746
|
gslib/resumable_streaming_upload.py
|
python
|
ResumableStreamingJsonUploadWrapper.seekable
|
(self)
|
return True
|
Returns true since limited seek support exists.
|
Returns true since limited seek support exists.
|
[
"Returns",
"true",
"since",
"limited",
"seek",
"support",
"exists",
"."
] |
def seekable(self): # pylint: disable=invalid-name
"""Returns true since limited seek support exists."""
return True
|
[
"def",
"seekable",
"(",
"self",
")",
":",
"# pylint: disable=invalid-name",
"return",
"True"
] |
https://github.com/GoogleCloudPlatform/gsutil/blob/5be882803e76608e2fd29cf8c504ccd1fe0a7746/gslib/resumable_streaming_upload.py#L177-L179
|
|
openstack/neutron
|
fb229fb527ac8b95526412f7762d90826ac41428
|
neutron/agent/common/ovs_lib.py
|
python
|
OVSBridge.get_local_port_mac
|
(self)
|
Retrieve the mac of the bridge's local port.
|
Retrieve the mac of the bridge's local port.
|
[
"Retrieve",
"the",
"mac",
"of",
"the",
"bridge",
"s",
"local",
"port",
"."
] |
def get_local_port_mac(self):
"""Retrieve the mac of the bridge's local port."""
address = ip_lib.IPDevice(self.br_name).link.address
if address:
return address
else:
msg = _('Unable to determine mac address for %s') % self.br_name
raise Exception(msg)
|
[
"def",
"get_local_port_mac",
"(",
"self",
")",
":",
"address",
"=",
"ip_lib",
".",
"IPDevice",
"(",
"self",
".",
"br_name",
")",
".",
"link",
".",
"address",
"if",
"address",
":",
"return",
"address",
"else",
":",
"msg",
"=",
"_",
"(",
"'Unable to determine mac address for %s'",
")",
"%",
"self",
".",
"br_name",
"raise",
"Exception",
"(",
"msg",
")"
] |
https://github.com/openstack/neutron/blob/fb229fb527ac8b95526412f7762d90826ac41428/neutron/agent/common/ovs_lib.py#L720-L727
|
||
invesalius/invesalius3
|
0616d3e73bfe0baf7525877dbf6acab697395eb9
|
invesalius/gui/task_generic.py
|
python
|
InnerTaskPanel.__bind_wx_events
|
(self)
|
Bind wx general events
|
Bind wx general events
|
[
"Bind",
"wx",
"general",
"events"
] |
def __bind_wx_events(self):
"""
Bind wx general events
"""
# Example: self.Bind(wx.EVT_BUTTON, self.OnButton)
self.link_test.Bind(hl.EVT_HYPERLINK_LEFT, self.OnTest)
|
[
"def",
"__bind_wx_events",
"(",
"self",
")",
":",
"# Example: self.Bind(wx.EVT_BUTTON, self.OnButton)",
"self",
".",
"link_test",
".",
"Bind",
"(",
"hl",
".",
"EVT_HYPERLINK_LEFT",
",",
"self",
".",
"OnTest",
")"
] |
https://github.com/invesalius/invesalius3/blob/0616d3e73bfe0baf7525877dbf6acab697395eb9/invesalius/gui/task_generic.py#L85-L90
|
||
oilshell/oil
|
94388e7d44a9ad879b12615f6203b38596b5a2d3
|
osh/builtin_misc.py
|
python
|
Help.__init__
|
(self, loader, errfmt)
|
[] |
def __init__(self, loader, errfmt):
# type: (_ResourceLoader, ErrorFormatter) -> None
self.loader = loader
self.errfmt = errfmt
|
[
"def",
"__init__",
"(",
"self",
",",
"loader",
",",
"errfmt",
")",
":",
"# type: (_ResourceLoader, ErrorFormatter) -> None",
"self",
".",
"loader",
"=",
"loader",
"self",
".",
"errfmt",
"=",
"errfmt"
] |
https://github.com/oilshell/oil/blob/94388e7d44a9ad879b12615f6203b38596b5a2d3/osh/builtin_misc.py#L763-L766
|
||||
jgagneastro/coffeegrindsize
|
22661ebd21831dba4cf32bfc6ba59fe3d49f879c
|
App/venv/lib/python3.7/site-packages/pip/_vendor/webencodings/__init__.py
|
python
|
encode
|
(input, encoding=UTF8, errors='strict')
|
return _get_encoding(encoding).codec_info.encode(input, errors)[0]
|
Encode a single string.
:param input: An Unicode string.
:param encoding: An :class:`Encoding` object or a label string.
:param errors: Type of error handling. See :func:`codecs.register`.
:raises: :exc:`~exceptions.LookupError` for an unknown encoding label.
:return: A byte string.
|
Encode a single string.
|
[
"Encode",
"a",
"single",
"string",
"."
] |
def encode(input, encoding=UTF8, errors='strict'):
"""
Encode a single string.
:param input: An Unicode string.
:param encoding: An :class:`Encoding` object or a label string.
:param errors: Type of error handling. See :func:`codecs.register`.
:raises: :exc:`~exceptions.LookupError` for an unknown encoding label.
:return: A byte string.
"""
return _get_encoding(encoding).codec_info.encode(input, errors)[0]
|
[
"def",
"encode",
"(",
"input",
",",
"encoding",
"=",
"UTF8",
",",
"errors",
"=",
"'strict'",
")",
":",
"return",
"_get_encoding",
"(",
"encoding",
")",
".",
"codec_info",
".",
"encode",
"(",
"input",
",",
"errors",
")",
"[",
"0",
"]"
] |
https://github.com/jgagneastro/coffeegrindsize/blob/22661ebd21831dba4cf32bfc6ba59fe3d49f879c/App/venv/lib/python3.7/site-packages/pip/_vendor/webencodings/__init__.py#L172-L183
|
|
etetoolkit/ete
|
2b207357dc2a40ccad7bfd8f54964472c72e4726
|
ete3/phyloxml/_phyloxml.py
|
python
|
Phylogeny.get_id
|
(self)
|
return self.id
|
[] |
def get_id(self): return self.id
|
[
"def",
"get_id",
"(",
"self",
")",
":",
"return",
"self",
".",
"id"
] |
https://github.com/etetoolkit/ete/blob/2b207357dc2a40ccad7bfd8f54964472c72e4726/ete3/phyloxml/_phyloxml.py#L523-L523
|
|||
buke/GreenOdoo
|
3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df
|
runtime/python/lib/python2.7/rfc822.py
|
python
|
Message.getdate
|
(self, name)
|
return parsedate(data)
|
Retrieve a date field from a header.
Retrieves a date field from the named header, returning a tuple
compatible with time.mktime().
|
Retrieve a date field from a header.
|
[
"Retrieve",
"a",
"date",
"field",
"from",
"a",
"header",
"."
] |
def getdate(self, name):
"""Retrieve a date field from a header.
Retrieves a date field from the named header, returning a tuple
compatible with time.mktime().
"""
try:
data = self[name]
except KeyError:
return None
return parsedate(data)
|
[
"def",
"getdate",
"(",
"self",
",",
"name",
")",
":",
"try",
":",
"data",
"=",
"self",
"[",
"name",
"]",
"except",
"KeyError",
":",
"return",
"None",
"return",
"parsedate",
"(",
"data",
")"
] |
https://github.com/buke/GreenOdoo/blob/3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df/runtime/python/lib/python2.7/rfc822.py#L355-L365
|
|
missionpinball/mpf
|
8e6b74cff4ba06d2fec9445742559c1068b88582
|
mpf/core/config_loader.py
|
python
|
MpfConfig.get_show_config
|
(self, show_name)
|
Return a show.
|
Return a show.
|
[
"Return",
"a",
"show",
"."
] |
def get_show_config(self, show_name):
"""Return a show."""
try:
return self._show_config[show_name]
except KeyError:
raise AssertionError("No config found for show '{}'.".format(show_name))
|
[
"def",
"get_show_config",
"(",
"self",
",",
"show_name",
")",
":",
"try",
":",
"return",
"self",
".",
"_show_config",
"[",
"show_name",
"]",
"except",
"KeyError",
":",
"raise",
"AssertionError",
"(",
"\"No config found for show '{}'.\"",
".",
"format",
"(",
"show_name",
")",
")"
] |
https://github.com/missionpinball/mpf/blob/8e6b74cff4ba06d2fec9445742559c1068b88582/mpf/core/config_loader.py#L68-L73
|
||
Miserlou/Zappa
|
5a11c17f5ecf0568bdb73b4baf6fb08ff0184f39
|
zappa/cli.py
|
python
|
ZappaCLI.format_invoke_command
|
(self, string)
|
return formated_response
|
Formats correctly the string output from the invoke() method,
replacing line breaks and tabs when necessary.
|
Formats correctly the string output from the invoke() method,
replacing line breaks and tabs when necessary.
|
[
"Formats",
"correctly",
"the",
"string",
"output",
"from",
"the",
"invoke",
"()",
"method",
"replacing",
"line",
"breaks",
"and",
"tabs",
"when",
"necessary",
"."
] |
def format_invoke_command(self, string):
"""
Formats correctly the string output from the invoke() method,
replacing line breaks and tabs when necessary.
"""
string = string.replace('\\n', '\n')
formated_response = ''
for line in string.splitlines():
if line.startswith('REPORT'):
line = line.replace('\t', '\n')
if line.startswith('[DEBUG]'):
line = line.replace('\t', ' ')
formated_response += line + '\n'
formated_response = formated_response.replace('\n\n', '\n')
return formated_response
|
[
"def",
"format_invoke_command",
"(",
"self",
",",
"string",
")",
":",
"string",
"=",
"string",
".",
"replace",
"(",
"'\\\\n'",
",",
"'\\n'",
")",
"formated_response",
"=",
"''",
"for",
"line",
"in",
"string",
".",
"splitlines",
"(",
")",
":",
"if",
"line",
".",
"startswith",
"(",
"'REPORT'",
")",
":",
"line",
"=",
"line",
".",
"replace",
"(",
"'\\t'",
",",
"'\\n'",
")",
"if",
"line",
".",
"startswith",
"(",
"'[DEBUG]'",
")",
":",
"line",
"=",
"line",
".",
"replace",
"(",
"'\\t'",
",",
"' '",
")",
"formated_response",
"+=",
"line",
"+",
"'\\n'",
"formated_response",
"=",
"formated_response",
".",
"replace",
"(",
"'\\n\\n'",
",",
"'\\n'",
")",
"return",
"formated_response"
] |
https://github.com/Miserlou/Zappa/blob/5a11c17f5ecf0568bdb73b4baf6fb08ff0184f39/zappa/cli.py#L1311-L1328
|
|
edwardlib/observations
|
2c8b1ac31025938cb17762e540f2f592e302d5de
|
observations/r/nsw74psid3.py
|
python
|
nsw74psid3
|
(path)
|
return x_train, metadata
|
Labour Training Evaluation Data
These data are pertinent to an investigation of the way that earnings
changed, between 1974-1975 and 1978, in the absence of training. The
data frame combines data for the experimental treatment group (NSW, 185
observations), using as control data results from the PSID (Panel Study
of Income Dynamics) study (128 observations). The latter were chosen to
mimic the characteristics of the NSW training and control groups. These
are a subset of the `nsw74psid1` data.
This data frame contains the following columns:
trt
a numeric vector identifying the study in which the subjects were
enrolled (0 = PSID, 1 = NSW)
age
age (in years)
educ
years of education
black
(0 = not black, 1 = black)
hisp
(0 = not hispanic, 1 = hispanic)
marr
(0 = not married, 1 = married)
nodeg
(0 = completed high school, 1 = dropout)
re74
real earnings in 1974
re75
real earnings in 1975
re78
real earnings in 1978
http://www.columbia.edu/~rd247/nswdata.html
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `nsw74psid3.csv`.
Returns:
Tuple of np.ndarray `x_train` with 313 rows and 10 columns and
dictionary `metadata` of column headers (feature names).
|
Labour Training Evaluation Data
|
[
"Labour",
"Training",
"Evaluation",
"Data"
] |
def nsw74psid3(path):
"""Labour Training Evaluation Data
These data are pertinent to an investigation of the way that earnings
changed, between 1974-1975 and 1978, in the absence of training. The
data frame combines data for the experimental treatment group (NSW, 185
observations), using as control data results from the PSID (Panel Study
of Income Dynamics) study (128 observations). The latter were chosen to
mimic the characteristics of the NSW training and control groups. These
are a subset of the `nsw74psid1` data.
This data frame contains the following columns:
trt
a numeric vector identifying the study in which the subjects were
enrolled (0 = PSID, 1 = NSW)
age
age (in years)
educ
years of education
black
(0 = not black, 1 = black)
hisp
(0 = not hispanic, 1 = hispanic)
marr
(0 = not married, 1 = married)
nodeg
(0 = completed high school, 1 = dropout)
re74
real earnings in 1974
re75
real earnings in 1975
re78
real earnings in 1978
http://www.columbia.edu/~rd247/nswdata.html
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `nsw74psid3.csv`.
Returns:
Tuple of np.ndarray `x_train` with 313 rows and 10 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'nsw74psid3.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/DAAG/nsw74psid3.csv'
maybe_download_and_extract(path, url,
save_file_name='nsw74psid3.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
|
[
"def",
"nsw74psid3",
"(",
"path",
")",
":",
"import",
"pandas",
"as",
"pd",
"path",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"path",
")",
"filename",
"=",
"'nsw74psid3.csv'",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"filename",
")",
")",
":",
"url",
"=",
"'http://dustintran.com/data/r/DAAG/nsw74psid3.csv'",
"maybe_download_and_extract",
"(",
"path",
",",
"url",
",",
"save_file_name",
"=",
"'nsw74psid3.csv'",
",",
"resume",
"=",
"False",
")",
"data",
"=",
"pd",
".",
"read_csv",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"filename",
")",
",",
"index_col",
"=",
"0",
",",
"parse_dates",
"=",
"True",
")",
"x_train",
"=",
"data",
".",
"values",
"metadata",
"=",
"{",
"'columns'",
":",
"data",
".",
"columns",
"}",
"return",
"x_train",
",",
"metadata"
] |
https://github.com/edwardlib/observations/blob/2c8b1ac31025938cb17762e540f2f592e302d5de/observations/r/nsw74psid3.py#L14-L85
|
|
JinpengLI/deep_ocr
|
450148c0c51b3565a96ac2f3c94ee33022e55307
|
deep_ocr/ocrolib/lstm.py
|
python
|
LSTM1
|
(Ni,Ns,No)
|
return stacked
|
An LSTM layer with a `Logreg` layer for the output.
|
An LSTM layer with a `Logreg` layer for the output.
|
[
"An",
"LSTM",
"layer",
"with",
"a",
"Logreg",
"layer",
"for",
"the",
"output",
"."
] |
def LSTM1(Ni,Ns,No):
"""An LSTM layer with a `Logreg` layer for the output."""
lstm = LSTM(Ni,Ns)
if No==1:
logreg = Logreg(Ns,No)
else:
logreg = Softmax(Ns,No)
stacked = Stacked([lstm,logreg])
return stacked
|
[
"def",
"LSTM1",
"(",
"Ni",
",",
"Ns",
",",
"No",
")",
":",
"lstm",
"=",
"LSTM",
"(",
"Ni",
",",
"Ns",
")",
"if",
"No",
"==",
"1",
":",
"logreg",
"=",
"Logreg",
"(",
"Ns",
",",
"No",
")",
"else",
":",
"logreg",
"=",
"Softmax",
"(",
"Ns",
",",
"No",
")",
"stacked",
"=",
"Stacked",
"(",
"[",
"lstm",
",",
"logreg",
"]",
")",
"return",
"stacked"
] |
https://github.com/JinpengLI/deep_ocr/blob/450148c0c51b3565a96ac2f3c94ee33022e55307/deep_ocr/ocrolib/lstm.py#L702-L710
|
|
karpathy/ulogme
|
416163e9c2399cc0f2f271630ccc6e15fe63a9fd
|
osx/dist/ulogme_osx.app/Contents/Resources/lib/python2.7/email/parser.py
|
python
|
Parser.__init__
|
(self, *args, **kws)
|
Parser of RFC 2822 and MIME email messages.
Creates an in-memory object tree representing the email message, which
can then be manipulated and turned over to a Generator to return the
textual representation of the message.
The string must be formatted as a block of RFC 2822 headers and header
continuation lines, optionally preceeded by a `Unix-from' header. The
header block is terminated either by the end of the string or by a
blank line.
_class is the class to instantiate for new message objects when they
must be created. This class must have a constructor that can take
zero arguments. Default is Message.Message.
|
Parser of RFC 2822 and MIME email messages.
|
[
"Parser",
"of",
"RFC",
"2822",
"and",
"MIME",
"email",
"messages",
"."
] |
def __init__(self, *args, **kws):
"""Parser of RFC 2822 and MIME email messages.
Creates an in-memory object tree representing the email message, which
can then be manipulated and turned over to a Generator to return the
textual representation of the message.
The string must be formatted as a block of RFC 2822 headers and header
continuation lines, optionally preceeded by a `Unix-from' header. The
header block is terminated either by the end of the string or by a
blank line.
_class is the class to instantiate for new message objects when they
must be created. This class must have a constructor that can take
zero arguments. Default is Message.Message.
"""
if len(args) >= 1:
if '_class' in kws:
raise TypeError("Multiple values for keyword arg '_class'")
kws['_class'] = args[0]
if len(args) == 2:
if 'strict' in kws:
raise TypeError("Multiple values for keyword arg 'strict'")
kws['strict'] = args[1]
if len(args) > 2:
raise TypeError('Too many arguments')
if '_class' in kws:
self._class = kws['_class']
del kws['_class']
else:
self._class = Message
if 'strict' in kws:
warnings.warn("'strict' argument is deprecated (and ignored)",
DeprecationWarning, 2)
del kws['strict']
if kws:
raise TypeError('Unexpected keyword arguments')
|
[
"def",
"__init__",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kws",
")",
":",
"if",
"len",
"(",
"args",
")",
">=",
"1",
":",
"if",
"'_class'",
"in",
"kws",
":",
"raise",
"TypeError",
"(",
"\"Multiple values for keyword arg '_class'\"",
")",
"kws",
"[",
"'_class'",
"]",
"=",
"args",
"[",
"0",
"]",
"if",
"len",
"(",
"args",
")",
"==",
"2",
":",
"if",
"'strict'",
"in",
"kws",
":",
"raise",
"TypeError",
"(",
"\"Multiple values for keyword arg 'strict'\"",
")",
"kws",
"[",
"'strict'",
"]",
"=",
"args",
"[",
"1",
"]",
"if",
"len",
"(",
"args",
")",
">",
"2",
":",
"raise",
"TypeError",
"(",
"'Too many arguments'",
")",
"if",
"'_class'",
"in",
"kws",
":",
"self",
".",
"_class",
"=",
"kws",
"[",
"'_class'",
"]",
"del",
"kws",
"[",
"'_class'",
"]",
"else",
":",
"self",
".",
"_class",
"=",
"Message",
"if",
"'strict'",
"in",
"kws",
":",
"warnings",
".",
"warn",
"(",
"\"'strict' argument is deprecated (and ignored)\"",
",",
"DeprecationWarning",
",",
"2",
")",
"del",
"kws",
"[",
"'strict'",
"]",
"if",
"kws",
":",
"raise",
"TypeError",
"(",
"'Unexpected keyword arguments'",
")"
] |
https://github.com/karpathy/ulogme/blob/416163e9c2399cc0f2f271630ccc6e15fe63a9fd/osx/dist/ulogme_osx.app/Contents/Resources/lib/python2.7/email/parser.py#L18-L54
|
||
xdress/xdress
|
eb7f0a02b3edf617d401939ede7f0d713a88917f
|
xdress/doxygen.py
|
python
|
func_docstr
|
(func_dict, is_method=False)
|
return msg
|
Generate the docstring for a function given a dictionary of the
parsed dOxygen xml.
Parameters
----------
func_dict : dict
This is a dictionary that should be the return value of the
function parse_function defined in this module. If this is a
class method it can be a sub-dictionary of the return value of
the parse_class function.
is_method : bool, optional(default=False)
Whether or not to the function is a class method. If it is,
the text will be wrapped 4 spaces earlier to offset additional
indentation
Returns
-------
msg : str
The docstring to be inserted into the desc dictionary for the
function.
|
Generate the docstring for a function given a dictionary of the
parsed dOxygen xml.
|
[
"Generate",
"the",
"docstring",
"for",
"a",
"function",
"given",
"a",
"dictionary",
"of",
"the",
"parsed",
"dOxygen",
"xml",
"."
] |
def func_docstr(func_dict, is_method=False):
"""Generate the docstring for a function given a dictionary of the
parsed dOxygen xml.
Parameters
----------
func_dict : dict
This is a dictionary that should be the return value of the
function parse_function defined in this module. If this is a
class method it can be a sub-dictionary of the return value of
the parse_class function.
is_method : bool, optional(default=False)
Whether or not to the function is a class method. If it is,
the text will be wrapped 4 spaces earlier to offset additional
indentation
Returns
-------
msg : str
The docstring to be inserted into the desc dictionary for the
function.
"""
if is_method:
wrapper = wrap_64
else:
wrapper = wrap_68
detailed_desc = func_dict['detaileddescription']
brief_desc = func_dict['briefdescription']
if detailed_desc is None or brief_desc is None:
desc = "\n\n"
else:
desc = '\n\n'.join([brief_desc, detailed_desc]).strip()
args = func_dict['args']
if args is None:
params = ['None']
else:
params = []
for arg in args:
arg_str = "%s : %s" % (arg, args[arg]['type'])
if 'desc' in args[arg]:
arg_str += '\n%s' % (args[arg]['desc'])
params.append(arg_str)
params = tuple(params)
returning = func_dict['ret_type']
if returning is None:
rets = ['None']
else:
rets = []
i = 1
if isinstance(returning, str):
rets.append('res%i : ' % i + returning)
else:
for ret in returning:
rets.append('res%i : ' % i + ret)
i += 1
# put main section in
msg = wrapper.fill(desc)
# skip a line and begin parameters section
msg += '\n\n'
msg += wrapper.fill('Parameters')
msg += '\n'
msg += wrapper.fill('----------')
msg += '\n'
# add parameters
for p in params:
lines = str.splitlines(p)
msg += wrapper.fill(lines[0])
msg += '\n'
more = False
for i in range(1, len(lines)):
more = True
l = lines[i]
msg += wrapper.fill(l)
if more:
msg += '\n\n'
else:
msg += '\n'
# skip a line and begin returns section
msg += wrapper.fill('Returns')
msg += '\n'
msg += wrapper.fill('-------')
msg += '\n'
# add return values
for r in rets:
lines = str.splitlines(r)
msg += wrapper.fill(lines[0])
msg += '\n'
for i in range(1, len(lines)):
l = lines[i]
msg += wrapper.fill(l)
msg += '\n'
# TODO: add notes section like in class function above.
# # skip a line and begin notes section
# msg += wrapper.fill('Notes')
# msg += '\n'
# msg += wrapper.fill('-----')
# msg += '\n'
return msg
|
[
"def",
"func_docstr",
"(",
"func_dict",
",",
"is_method",
"=",
"False",
")",
":",
"if",
"is_method",
":",
"wrapper",
"=",
"wrap_64",
"else",
":",
"wrapper",
"=",
"wrap_68",
"detailed_desc",
"=",
"func_dict",
"[",
"'detaileddescription'",
"]",
"brief_desc",
"=",
"func_dict",
"[",
"'briefdescription'",
"]",
"if",
"detailed_desc",
"is",
"None",
"or",
"brief_desc",
"is",
"None",
":",
"desc",
"=",
"\"\\n\\n\"",
"else",
":",
"desc",
"=",
"'\\n\\n'",
".",
"join",
"(",
"[",
"brief_desc",
",",
"detailed_desc",
"]",
")",
".",
"strip",
"(",
")",
"args",
"=",
"func_dict",
"[",
"'args'",
"]",
"if",
"args",
"is",
"None",
":",
"params",
"=",
"[",
"'None'",
"]",
"else",
":",
"params",
"=",
"[",
"]",
"for",
"arg",
"in",
"args",
":",
"arg_str",
"=",
"\"%s : %s\"",
"%",
"(",
"arg",
",",
"args",
"[",
"arg",
"]",
"[",
"'type'",
"]",
")",
"if",
"'desc'",
"in",
"args",
"[",
"arg",
"]",
":",
"arg_str",
"+=",
"'\\n%s'",
"%",
"(",
"args",
"[",
"arg",
"]",
"[",
"'desc'",
"]",
")",
"params",
".",
"append",
"(",
"arg_str",
")",
"params",
"=",
"tuple",
"(",
"params",
")",
"returning",
"=",
"func_dict",
"[",
"'ret_type'",
"]",
"if",
"returning",
"is",
"None",
":",
"rets",
"=",
"[",
"'None'",
"]",
"else",
":",
"rets",
"=",
"[",
"]",
"i",
"=",
"1",
"if",
"isinstance",
"(",
"returning",
",",
"str",
")",
":",
"rets",
".",
"append",
"(",
"'res%i : '",
"%",
"i",
"+",
"returning",
")",
"else",
":",
"for",
"ret",
"in",
"returning",
":",
"rets",
".",
"append",
"(",
"'res%i : '",
"%",
"i",
"+",
"ret",
")",
"i",
"+=",
"1",
"# put main section in",
"msg",
"=",
"wrapper",
".",
"fill",
"(",
"desc",
")",
"# skip a line and begin parameters section",
"msg",
"+=",
"'\\n\\n'",
"msg",
"+=",
"wrapper",
".",
"fill",
"(",
"'Parameters'",
")",
"msg",
"+=",
"'\\n'",
"msg",
"+=",
"wrapper",
".",
"fill",
"(",
"'----------'",
")",
"msg",
"+=",
"'\\n'",
"# add parameters",
"for",
"p",
"in",
"params",
":",
"lines",
"=",
"str",
".",
"splitlines",
"(",
"p",
")",
"msg",
"+=",
"wrapper",
".",
"fill",
"(",
"lines",
"[",
"0",
"]",
")",
"msg",
"+=",
"'\\n'",
"more",
"=",
"False",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"lines",
")",
")",
":",
"more",
"=",
"True",
"l",
"=",
"lines",
"[",
"i",
"]",
"msg",
"+=",
"wrapper",
".",
"fill",
"(",
"l",
")",
"if",
"more",
":",
"msg",
"+=",
"'\\n\\n'",
"else",
":",
"msg",
"+=",
"'\\n'",
"# skip a line and begin returns section",
"msg",
"+=",
"wrapper",
".",
"fill",
"(",
"'Returns'",
")",
"msg",
"+=",
"'\\n'",
"msg",
"+=",
"wrapper",
".",
"fill",
"(",
"'-------'",
")",
"msg",
"+=",
"'\\n'",
"# add return values",
"for",
"r",
"in",
"rets",
":",
"lines",
"=",
"str",
".",
"splitlines",
"(",
"r",
")",
"msg",
"+=",
"wrapper",
".",
"fill",
"(",
"lines",
"[",
"0",
"]",
")",
"msg",
"+=",
"'\\n'",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"lines",
")",
")",
":",
"l",
"=",
"lines",
"[",
"i",
"]",
"msg",
"+=",
"wrapper",
".",
"fill",
"(",
"l",
")",
"msg",
"+=",
"'\\n'",
"# TODO: add notes section like in class function above.",
"# # skip a line and begin notes section",
"# msg += wrapper.fill('Notes')",
"# msg += '\\n'",
"# msg += wrapper.fill('-----')",
"# msg += '\\n'",
"return",
"msg"
] |
https://github.com/xdress/xdress/blob/eb7f0a02b3edf617d401939ede7f0d713a88917f/xdress/doxygen.py#L259-L370
|
|
sebastianruder/learn-to-select-data
|
4d3659cdfa097dea9cfd13c3260ae373a0461181
|
similarity.py
|
python
|
kl_divergence
|
(repr1, repr2)
|
return sim
|
Calculates Kullback-Leibler divergence (https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence).
|
Calculates Kullback-Leibler divergence (https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence).
|
[
"Calculates",
"Kullback",
"-",
"Leibler",
"divergence",
"(",
"https",
":",
"//",
"en",
".",
"wikipedia",
".",
"org",
"/",
"wiki",
"/",
"Kullback%E2%80%93Leibler_divergence",
")",
"."
] |
def kl_divergence(repr1, repr2):
"""Calculates Kullback-Leibler divergence (https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence)."""
sim = scipy.stats.entropy(repr1, repr2)
return sim
|
[
"def",
"kl_divergence",
"(",
"repr1",
",",
"repr2",
")",
":",
"sim",
"=",
"scipy",
".",
"stats",
".",
"entropy",
"(",
"repr1",
",",
"repr2",
")",
"return",
"sim"
] |
https://github.com/sebastianruder/learn-to-select-data/blob/4d3659cdfa097dea9cfd13c3260ae373a0461181/similarity.py#L63-L66
|
|
mkusner/grammarVAE
|
ffffe272a8cf1772578dfc92254c55c224cddc02
|
Theano-master/theano/gof/link.py
|
python
|
WrapLinkerMany
|
(linkers, wrappers)
|
return WrapLinker(linkers, wrapper)
|
Variant on WrapLinker that runs a series of wrapper functions instead of
just one.
|
Variant on WrapLinker that runs a series of wrapper functions instead of
just one.
|
[
"Variant",
"on",
"WrapLinker",
"that",
"runs",
"a",
"series",
"of",
"wrapper",
"functions",
"instead",
"of",
"just",
"one",
"."
] |
def WrapLinkerMany(linkers, wrappers):
"""
Variant on WrapLinker that runs a series of wrapper functions instead of
just one.
"""
def wrapper(*args):
for f in wrappers:
f(*args)
return WrapLinker(linkers, wrapper)
|
[
"def",
"WrapLinkerMany",
"(",
"linkers",
",",
"wrappers",
")",
":",
"def",
"wrapper",
"(",
"*",
"args",
")",
":",
"for",
"f",
"in",
"wrappers",
":",
"f",
"(",
"*",
"args",
")",
"return",
"WrapLinker",
"(",
"linkers",
",",
"wrapper",
")"
] |
https://github.com/mkusner/grammarVAE/blob/ffffe272a8cf1772578dfc92254c55c224cddc02/Theano-master/theano/gof/link.py#L1020-L1029
|
|
prawn-cake/vk-requests
|
bdd796b4d421b0fb125f2fa28468c67fc6124d69
|
vk_requests/session.py
|
python
|
VKSession._send_api_request
|
(self, request, captcha_response=None)
|
return response
|
Prepare and send HTTP API request
:param request: vk_requests.api.Request instance
:param captcha_response: None or dict
:return: HTTP response
|
Prepare and send HTTP API request
|
[
"Prepare",
"and",
"send",
"HTTP",
"API",
"request"
] |
def _send_api_request(self, request, captcha_response=None):
"""Prepare and send HTTP API request
:param request: vk_requests.api.Request instance
:param captcha_response: None or dict
:return: HTTP response
"""
url = self.API_URL + request.method_name
# Prepare request arguments
method_kwargs = {'v': self.api_version}
# Shape up the request data
for values in (request.method_args,):
method_kwargs.update(stringify_values(values))
if self.is_token_required() or self._service_token:
# Auth api call if access_token hadn't been gotten earlier
method_kwargs['access_token'] = self.access_token
if captcha_response:
method_kwargs['captcha_sid'] = captcha_response['sid']
method_kwargs['captcha_key'] = captcha_response['key']
http_params = dict(url=url,
data=method_kwargs,
**request.http_params)
logger.debug('send_api_request:http_params: %s', http_params)
response = self.http_session.post(**http_params)
return response
|
[
"def",
"_send_api_request",
"(",
"self",
",",
"request",
",",
"captcha_response",
"=",
"None",
")",
":",
"url",
"=",
"self",
".",
"API_URL",
"+",
"request",
".",
"method_name",
"# Prepare request arguments",
"method_kwargs",
"=",
"{",
"'v'",
":",
"self",
".",
"api_version",
"}",
"# Shape up the request data",
"for",
"values",
"in",
"(",
"request",
".",
"method_args",
",",
")",
":",
"method_kwargs",
".",
"update",
"(",
"stringify_values",
"(",
"values",
")",
")",
"if",
"self",
".",
"is_token_required",
"(",
")",
"or",
"self",
".",
"_service_token",
":",
"# Auth api call if access_token hadn't been gotten earlier",
"method_kwargs",
"[",
"'access_token'",
"]",
"=",
"self",
".",
"access_token",
"if",
"captcha_response",
":",
"method_kwargs",
"[",
"'captcha_sid'",
"]",
"=",
"captcha_response",
"[",
"'sid'",
"]",
"method_kwargs",
"[",
"'captcha_key'",
"]",
"=",
"captcha_response",
"[",
"'key'",
"]",
"http_params",
"=",
"dict",
"(",
"url",
"=",
"url",
",",
"data",
"=",
"method_kwargs",
",",
"*",
"*",
"request",
".",
"http_params",
")",
"logger",
".",
"debug",
"(",
"'send_api_request:http_params: %s'",
",",
"http_params",
")",
"response",
"=",
"self",
".",
"http_session",
".",
"post",
"(",
"*",
"*",
"http_params",
")",
"return",
"response"
] |
https://github.com/prawn-cake/vk-requests/blob/bdd796b4d421b0fb125f2fa28468c67fc6124d69/vk_requests/session.py#L444-L473
|
|
google/python-gflags
|
4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6
|
gflags/flagvalues.py
|
python
|
FlagValues.FlagsByModuleDict
|
(self)
|
return self.__dict__['__flags_by_module']
|
Returns the dictionary of module_name -> list of defined flags.
Returns:
A dictionary. Its keys are module names (strings). Its values
are lists of Flag objects.
|
Returns the dictionary of module_name -> list of defined flags.
|
[
"Returns",
"the",
"dictionary",
"of",
"module_name",
"-",
">",
"list",
"of",
"defined",
"flags",
"."
] |
def FlagsByModuleDict(self):
"""Returns the dictionary of module_name -> list of defined flags.
Returns:
A dictionary. Its keys are module names (strings). Its values
are lists of Flag objects.
"""
return self.__dict__['__flags_by_module']
|
[
"def",
"FlagsByModuleDict",
"(",
"self",
")",
":",
"return",
"self",
".",
"__dict__",
"[",
"'__flags_by_module'",
"]"
] |
https://github.com/google/python-gflags/blob/4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6/gflags/flagvalues.py#L160-L167
|
|
facelessuser/pymdown-extensions
|
7a9d548ed3aa921e77fbedd202947ba884cca04c
|
pymdownx/emoji.py
|
python
|
EmojiExtension.__init__
|
(self, *args, **kwargs)
|
Initialize.
|
Initialize.
|
[
"Initialize",
"."
] |
def __init__(self, *args, **kwargs):
"""Initialize."""
self.config = {
'emoji_index': [
emojione,
"Function that returns the desired emoji index. - Default: 'pymdownx.emoji.emojione'"
],
'emoji_generator': [
to_png,
"Emoji generator method. - Default: pymdownx.emoji.to_png"
],
'title': [
'short',
"What title to use on images. You can use 'long' which shows the long name, "
"'short' which shows the shortname (:short:), or 'none' which shows no title. "
"- Default: 'short'"
],
'alt': [
'unicode',
"Control alt form. 'short' sets alt to the shortname (:short:), 'uniocde' sets "
"alt to the raw Unicode value, and 'html_entity' sets alt to the HTML entity. "
"- Default: 'unicode'"
],
'remove_variation_selector': [
False,
"Remove variation selector 16 from unicode. - Default: False"
],
'options': [
{},
"Emoji options see documentation for options for github and emojione."
]
}
super(EmojiExtension, self).__init__(*args, **kwargs)
|
[
"def",
"__init__",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"config",
"=",
"{",
"'emoji_index'",
":",
"[",
"emojione",
",",
"\"Function that returns the desired emoji index. - Default: 'pymdownx.emoji.emojione'\"",
"]",
",",
"'emoji_generator'",
":",
"[",
"to_png",
",",
"\"Emoji generator method. - Default: pymdownx.emoji.to_png\"",
"]",
",",
"'title'",
":",
"[",
"'short'",
",",
"\"What title to use on images. You can use 'long' which shows the long name, \"",
"\"'short' which shows the shortname (:short:), or 'none' which shows no title. \"",
"\"- Default: 'short'\"",
"]",
",",
"'alt'",
":",
"[",
"'unicode'",
",",
"\"Control alt form. 'short' sets alt to the shortname (:short:), 'uniocde' sets \"",
"\"alt to the raw Unicode value, and 'html_entity' sets alt to the HTML entity. \"",
"\"- Default: 'unicode'\"",
"]",
",",
"'remove_variation_selector'",
":",
"[",
"False",
",",
"\"Remove variation selector 16 from unicode. - Default: False\"",
"]",
",",
"'options'",
":",
"[",
"{",
"}",
",",
"\"Emoji options see documentation for options for github and emojione.\"",
"]",
"}",
"super",
"(",
"EmojiExtension",
",",
"self",
")",
".",
"__init__",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/facelessuser/pymdown-extensions/blob/7a9d548ed3aa921e77fbedd202947ba884cca04c/pymdownx/emoji.py#L350-L383
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.