nwo
stringlengths 5
106
| sha
stringlengths 40
40
| path
stringlengths 4
174
| language
stringclasses 1
value | identifier
stringlengths 1
140
| parameters
stringlengths 0
87.7k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
426k
| docstring
stringlengths 0
64.3k
| docstring_summary
stringlengths 0
26.3k
| docstring_tokens
list | function
stringlengths 18
4.83M
| function_tokens
list | url
stringlengths 83
304
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
twilio/twilio-python
|
6e1e811ea57a1edfadd5161ace87397c563f6915
|
twilio/rest/ip_messaging/v1/service/user/__init__.py
|
python
|
UserList.stream
|
(self, limit=None, page_size=None)
|
return self._version.stream(page, limits['limit'])
|
Streams UserInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.ip_messaging.v1.service.user.UserInstance]
|
Streams UserInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
|
[
"Streams",
"UserInstance",
"records",
"from",
"the",
"API",
"as",
"a",
"generator",
"stream",
".",
"This",
"operation",
"lazily",
"loads",
"records",
"as",
"efficiently",
"as",
"possible",
"until",
"the",
"limit",
"is",
"reached",
".",
"The",
"results",
"are",
"returned",
"as",
"a",
"generator",
"so",
"this",
"operation",
"is",
"memory",
"efficient",
"."
] |
def stream(self, limit=None, page_size=None):
"""
Streams UserInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.ip_messaging.v1.service.user.UserInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'])
|
[
"def",
"stream",
"(",
"self",
",",
"limit",
"=",
"None",
",",
"page_size",
"=",
"None",
")",
":",
"limits",
"=",
"self",
".",
"_version",
".",
"read_limits",
"(",
"limit",
",",
"page_size",
")",
"page",
"=",
"self",
".",
"page",
"(",
"page_size",
"=",
"limits",
"[",
"'page_size'",
"]",
",",
")",
"return",
"self",
".",
"_version",
".",
"stream",
"(",
"page",
",",
"limits",
"[",
"'limit'",
"]",
")"
] |
https://github.com/twilio/twilio-python/blob/6e1e811ea57a1edfadd5161ace87397c563f6915/twilio/rest/ip_messaging/v1/service/user/__init__.py#L60-L81
|
|
goace/personal-file-sharing-center
|
4a5b903b003f2db1306e77c5e51b6660fc5dbc6a
|
web/template.py
|
python
|
GAE_Render._load_template
|
(self, name)
|
[] |
def _load_template(self, name):
t = getattr(self.mod, name)
import types
if isinstance(t, types.ModuleType):
return GAE_Render(t, cache=self._cache is not None, base=self._base, **self._keywords)
else:
return t
|
[
"def",
"_load_template",
"(",
"self",
",",
"name",
")",
":",
"t",
"=",
"getattr",
"(",
"self",
".",
"mod",
",",
"name",
")",
"import",
"types",
"if",
"isinstance",
"(",
"t",
",",
"types",
".",
"ModuleType",
")",
":",
"return",
"GAE_Render",
"(",
"t",
",",
"cache",
"=",
"self",
".",
"_cache",
"is",
"not",
"None",
",",
"base",
"=",
"self",
".",
"_base",
",",
"*",
"*",
"self",
".",
"_keywords",
")",
"else",
":",
"return",
"t"
] |
https://github.com/goace/personal-file-sharing-center/blob/4a5b903b003f2db1306e77c5e51b6660fc5dbc6a/web/template.py#L1042-L1048
|
||||
JacksonWuxs/DaPy
|
b2bf72707ffcc92d05af1ac890e0786d5787816e
|
DaPy/core/base/BaseSheet.py
|
python
|
BaseSheet._quickly_append_col
|
(self, col, seq, miss, pos=None)
|
return self
|
append a new column to the sheet without checking
|
append a new column to the sheet without checking
|
[
"append",
"a",
"new",
"column",
"to",
"the",
"sheet",
"without",
"checking"
] |
def _quickly_append_col(self, col, seq, miss, pos=None):
'''append a new column to the sheet without checking'''
col = self._check_col_new_name(col)
if pos is None:
pos = len(self.columns)
self._data[col] = seq
self._columns.insert(pos, col)
self._missing.insert(pos, miss)
self._dim = SHEET_DIM(len(seq), self._dim.Col + 1)
return self
|
[
"def",
"_quickly_append_col",
"(",
"self",
",",
"col",
",",
"seq",
",",
"miss",
",",
"pos",
"=",
"None",
")",
":",
"col",
"=",
"self",
".",
"_check_col_new_name",
"(",
"col",
")",
"if",
"pos",
"is",
"None",
":",
"pos",
"=",
"len",
"(",
"self",
".",
"columns",
")",
"self",
".",
"_data",
"[",
"col",
"]",
"=",
"seq",
"self",
".",
"_columns",
".",
"insert",
"(",
"pos",
",",
"col",
")",
"self",
".",
"_missing",
".",
"insert",
"(",
"pos",
",",
"miss",
")",
"self",
".",
"_dim",
"=",
"SHEET_DIM",
"(",
"len",
"(",
"seq",
")",
",",
"self",
".",
"_dim",
".",
"Col",
"+",
"1",
")",
"return",
"self"
] |
https://github.com/JacksonWuxs/DaPy/blob/b2bf72707ffcc92d05af1ac890e0786d5787816e/DaPy/core/base/BaseSheet.py#L1244-L1253
|
|
nodejs/node-gyp
|
a2f298870692022302fa27a1d42363c4a72df407
|
gyp/pylib/gyp/generator/msvs.py
|
python
|
_VerifySourcesExist
|
(sources, root_dir)
|
return missing_sources
|
Verifies that all source files exist on disk.
Checks that all regular source files, i.e. not created at run time,
exist on disk. Missing files cause needless recompilation but no otherwise
visible errors.
Arguments:
sources: A recursive list of Filter/file names.
root_dir: The root directory for the relative path names.
Returns:
A list of source files that cannot be found on disk.
|
Verifies that all source files exist on disk.
|
[
"Verifies",
"that",
"all",
"source",
"files",
"exist",
"on",
"disk",
"."
] |
def _VerifySourcesExist(sources, root_dir):
"""Verifies that all source files exist on disk.
Checks that all regular source files, i.e. not created at run time,
exist on disk. Missing files cause needless recompilation but no otherwise
visible errors.
Arguments:
sources: A recursive list of Filter/file names.
root_dir: The root directory for the relative path names.
Returns:
A list of source files that cannot be found on disk.
"""
missing_sources = []
for source in sources:
if isinstance(source, MSVSProject.Filter):
missing_sources.extend(_VerifySourcesExist(source.contents, root_dir))
else:
if "$" not in source:
full_path = os.path.join(root_dir, source)
if not os.path.exists(full_path):
missing_sources.append(full_path)
return missing_sources
|
[
"def",
"_VerifySourcesExist",
"(",
"sources",
",",
"root_dir",
")",
":",
"missing_sources",
"=",
"[",
"]",
"for",
"source",
"in",
"sources",
":",
"if",
"isinstance",
"(",
"source",
",",
"MSVSProject",
".",
"Filter",
")",
":",
"missing_sources",
".",
"extend",
"(",
"_VerifySourcesExist",
"(",
"source",
".",
"contents",
",",
"root_dir",
")",
")",
"else",
":",
"if",
"\"$\"",
"not",
"in",
"source",
":",
"full_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"source",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"full_path",
")",
":",
"missing_sources",
".",
"append",
"(",
"full_path",
")",
"return",
"missing_sources"
] |
https://github.com/nodejs/node-gyp/blob/a2f298870692022302fa27a1d42363c4a72df407/gyp/pylib/gyp/generator/msvs.py#L3474-L3496
|
|
aceisace/Inkycal
|
552744bc5d80769c1015d48fd8b13201683ee679
|
inkycal/display/drivers/epdconfig.py
|
python
|
JetsonNano.digital_write
|
(self, pin, value)
|
[] |
def digital_write(self, pin, value):
self.GPIO.output(pin, value)
|
[
"def",
"digital_write",
"(",
"self",
",",
"pin",
",",
"value",
")",
":",
"self",
".",
"GPIO",
".",
"output",
"(",
"pin",
",",
"value",
")"
] |
https://github.com/aceisace/Inkycal/blob/552744bc5d80769c1015d48fd8b13201683ee679/inkycal/display/drivers/epdconfig.py#L114-L115
|
||||
pypa/setuptools
|
9f37366aab9cd8f6baa23e6a77cfdb8daf97757e
|
setuptools/_distutils/ccompiler.py
|
python
|
CCompiler.create_static_lib
|
(self, objects, output_libname, output_dir=None,
debug=0, target_lang=None)
|
Link a bunch of stuff together to create a static library file.
The "bunch of stuff" consists of the list of object files supplied
as 'objects', the extra object files supplied to
'add_link_object()' and/or 'set_link_objects()', the libraries
supplied to 'add_library()' and/or 'set_libraries()', and the
libraries supplied as 'libraries' (if any).
'output_libname' should be a library name, not a filename; the
filename will be inferred from the library name. 'output_dir' is
the directory where the library file will be put.
'debug' is a boolean; if true, debugging information will be
included in the library (note that on most platforms, it is the
compile step where this matters: the 'debug' flag is included here
just for consistency).
'target_lang' is the target language for which the given objects
are being compiled. This allows specific linkage time treatment of
certain languages.
Raises LibError on failure.
|
Link a bunch of stuff together to create a static library file.
The "bunch of stuff" consists of the list of object files supplied
as 'objects', the extra object files supplied to
'add_link_object()' and/or 'set_link_objects()', the libraries
supplied to 'add_library()' and/or 'set_libraries()', and the
libraries supplied as 'libraries' (if any).
|
[
"Link",
"a",
"bunch",
"of",
"stuff",
"together",
"to",
"create",
"a",
"static",
"library",
"file",
".",
"The",
"bunch",
"of",
"stuff",
"consists",
"of",
"the",
"list",
"of",
"object",
"files",
"supplied",
"as",
"objects",
"the",
"extra",
"object",
"files",
"supplied",
"to",
"add_link_object",
"()",
"and",
"/",
"or",
"set_link_objects",
"()",
"the",
"libraries",
"supplied",
"to",
"add_library",
"()",
"and",
"/",
"or",
"set_libraries",
"()",
"and",
"the",
"libraries",
"supplied",
"as",
"libraries",
"(",
"if",
"any",
")",
"."
] |
def create_static_lib(self, objects, output_libname, output_dir=None,
debug=0, target_lang=None):
"""Link a bunch of stuff together to create a static library file.
The "bunch of stuff" consists of the list of object files supplied
as 'objects', the extra object files supplied to
'add_link_object()' and/or 'set_link_objects()', the libraries
supplied to 'add_library()' and/or 'set_libraries()', and the
libraries supplied as 'libraries' (if any).
'output_libname' should be a library name, not a filename; the
filename will be inferred from the library name. 'output_dir' is
the directory where the library file will be put.
'debug' is a boolean; if true, debugging information will be
included in the library (note that on most platforms, it is the
compile step where this matters: the 'debug' flag is included here
just for consistency).
'target_lang' is the target language for which the given objects
are being compiled. This allows specific linkage time treatment of
certain languages.
Raises LibError on failure.
"""
pass
|
[
"def",
"create_static_lib",
"(",
"self",
",",
"objects",
",",
"output_libname",
",",
"output_dir",
"=",
"None",
",",
"debug",
"=",
"0",
",",
"target_lang",
"=",
"None",
")",
":",
"pass"
] |
https://github.com/pypa/setuptools/blob/9f37366aab9cd8f6baa23e6a77cfdb8daf97757e/setuptools/_distutils/ccompiler.py#L585-L609
|
||
intel/virtual-storage-manager
|
00706ab9701acbd0d5e04b19cc80c6b66a2973b8
|
source/vsm/vsm/api/openstack/wsgi.py
|
python
|
Controller.__init__
|
(self, view_builder=None)
|
Initialize controller with a view builder instance.
|
Initialize controller with a view builder instance.
|
[
"Initialize",
"controller",
"with",
"a",
"view",
"builder",
"instance",
"."
] |
def __init__(self, view_builder=None):
"""Initialize controller with a view builder instance."""
if view_builder:
self._view_builder = view_builder
elif self._view_builder_class:
self._view_builder = self._view_builder_class()
else:
self._view_builder = None
|
[
"def",
"__init__",
"(",
"self",
",",
"view_builder",
"=",
"None",
")",
":",
"if",
"view_builder",
":",
"self",
".",
"_view_builder",
"=",
"view_builder",
"elif",
"self",
".",
"_view_builder_class",
":",
"self",
".",
"_view_builder",
"=",
"self",
".",
"_view_builder_class",
"(",
")",
"else",
":",
"self",
".",
"_view_builder",
"=",
"None"
] |
https://github.com/intel/virtual-storage-manager/blob/00706ab9701acbd0d5e04b19cc80c6b66a2973b8/source/vsm/vsm/api/openstack/wsgi.py#L991-L998
|
||
xtiankisutsa/MARA_Framework
|
ac4ac88bfd38f33ae8780a606ed09ab97177c562
|
tools/AndroBugs/tools/modified/androguard/core/bytecodes/dvm.py
|
python
|
EncodedMethod.is_native
|
(self)
|
return False
|
Return whether the access_flag is boolean
:rtype: boolean
|
Return whether the access_flag is boolean
|
[
"Return",
"whether",
"the",
"access_flag",
"is",
"boolean"
] |
def is_native(self):
"""
Return whether the access_flag is boolean
:rtype: boolean
"""
if 0x100 & self.get_access_flags():
return True
return False
|
[
"def",
"is_native",
"(",
"self",
")",
":",
"if",
"0x100",
"&",
"self",
".",
"get_access_flags",
"(",
")",
":",
"return",
"True",
"return",
"False"
] |
https://github.com/xtiankisutsa/MARA_Framework/blob/ac4ac88bfd38f33ae8780a606ed09ab97177c562/tools/AndroBugs/tools/modified/androguard/core/bytecodes/dvm.py#L2713-L2721
|
|
NeuralEnsemble/python-neo
|
34d4db8fb0dc950dbbc6defd7fb75e99ea877286
|
neo/io/basefromrawio.py
|
python
|
BaseFromRaw.get_sub_signal_streams
|
(self, signal_group_mode='group-by-same-units')
|
return sub_streams
|
When signal streams don't have homogeneous SI units across channels,
they have to be split in sub streams to construct AnalogSignal objects with unique units.
For backward compatibility (neo version <= 0.5) sub-streams can also be
used to generate one AnalogSignal per channel.
|
When signal streams don't have homogeneous SI units across channels,
they have to be split in sub streams to construct AnalogSignal objects with unique units.
|
[
"When",
"signal",
"streams",
"don",
"t",
"have",
"homogeneous",
"SI",
"units",
"across",
"channels",
"they",
"have",
"to",
"be",
"split",
"in",
"sub",
"streams",
"to",
"construct",
"AnalogSignal",
"objects",
"with",
"unique",
"units",
"."
] |
def get_sub_signal_streams(self, signal_group_mode='group-by-same-units'):
"""
When signal streams don't have homogeneous SI units across channels,
they have to be split in sub streams to construct AnalogSignal objects with unique units.
For backward compatibility (neo version <= 0.5) sub-streams can also be
used to generate one AnalogSignal per channel.
"""
signal_streams = self.header['signal_streams']
signal_channels = self.header['signal_channels']
sub_streams = []
for stream_index in range(len(signal_streams)):
stream_id = signal_streams[stream_index]['id']
stream_name = signal_streams[stream_index]['name']
mask = signal_channels['stream_id'] == stream_id
channels = signal_channels[mask]
if signal_group_mode == 'group-by-same-units':
# this does not keep the original order
_, idx = np.unique(channels['units'], return_index=True)
all_units = channels['units'][np.sort(idx)]
if len(all_units) == 1:
# no substream
# None iwill be transform as slice later
inner_stream_channels = None
name = stream_name
sub_stream = (stream_index, inner_stream_channels, name)
sub_streams.append(sub_stream)
else:
for units in all_units:
inner_stream_channels, = np.nonzero(channels['units'] == units)
chan_names = channels[inner_stream_channels]['name']
name = 'Channels: (' + ' '.join(chan_names) + ')'
sub_stream = (stream_index, inner_stream_channels, name)
sub_streams.append(sub_stream)
elif signal_group_mode == 'split-all':
# mimic all neo <= 0.5 behavior
for i, channel in enumerate(channels):
inner_stream_channels = [i]
name = channels[i]['name']
sub_stream = (stream_index, inner_stream_channels, name)
sub_streams.append(sub_stream)
else:
raise (NotImplementedError)
return sub_streams
|
[
"def",
"get_sub_signal_streams",
"(",
"self",
",",
"signal_group_mode",
"=",
"'group-by-same-units'",
")",
":",
"signal_streams",
"=",
"self",
".",
"header",
"[",
"'signal_streams'",
"]",
"signal_channels",
"=",
"self",
".",
"header",
"[",
"'signal_channels'",
"]",
"sub_streams",
"=",
"[",
"]",
"for",
"stream_index",
"in",
"range",
"(",
"len",
"(",
"signal_streams",
")",
")",
":",
"stream_id",
"=",
"signal_streams",
"[",
"stream_index",
"]",
"[",
"'id'",
"]",
"stream_name",
"=",
"signal_streams",
"[",
"stream_index",
"]",
"[",
"'name'",
"]",
"mask",
"=",
"signal_channels",
"[",
"'stream_id'",
"]",
"==",
"stream_id",
"channels",
"=",
"signal_channels",
"[",
"mask",
"]",
"if",
"signal_group_mode",
"==",
"'group-by-same-units'",
":",
"# this does not keep the original order",
"_",
",",
"idx",
"=",
"np",
".",
"unique",
"(",
"channels",
"[",
"'units'",
"]",
",",
"return_index",
"=",
"True",
")",
"all_units",
"=",
"channels",
"[",
"'units'",
"]",
"[",
"np",
".",
"sort",
"(",
"idx",
")",
"]",
"if",
"len",
"(",
"all_units",
")",
"==",
"1",
":",
"# no substream",
"# None iwill be transform as slice later",
"inner_stream_channels",
"=",
"None",
"name",
"=",
"stream_name",
"sub_stream",
"=",
"(",
"stream_index",
",",
"inner_stream_channels",
",",
"name",
")",
"sub_streams",
".",
"append",
"(",
"sub_stream",
")",
"else",
":",
"for",
"units",
"in",
"all_units",
":",
"inner_stream_channels",
",",
"=",
"np",
".",
"nonzero",
"(",
"channels",
"[",
"'units'",
"]",
"==",
"units",
")",
"chan_names",
"=",
"channels",
"[",
"inner_stream_channels",
"]",
"[",
"'name'",
"]",
"name",
"=",
"'Channels: ('",
"+",
"' '",
".",
"join",
"(",
"chan_names",
")",
"+",
"')'",
"sub_stream",
"=",
"(",
"stream_index",
",",
"inner_stream_channels",
",",
"name",
")",
"sub_streams",
".",
"append",
"(",
"sub_stream",
")",
"elif",
"signal_group_mode",
"==",
"'split-all'",
":",
"# mimic all neo <= 0.5 behavior",
"for",
"i",
",",
"channel",
"in",
"enumerate",
"(",
"channels",
")",
":",
"inner_stream_channels",
"=",
"[",
"i",
"]",
"name",
"=",
"channels",
"[",
"i",
"]",
"[",
"'name'",
"]",
"sub_stream",
"=",
"(",
"stream_index",
",",
"inner_stream_channels",
",",
"name",
")",
"sub_streams",
".",
"append",
"(",
"sub_stream",
")",
"else",
":",
"raise",
"(",
"NotImplementedError",
")",
"return",
"sub_streams"
] |
https://github.com/NeuralEnsemble/python-neo/blob/34d4db8fb0dc950dbbc6defd7fb75e99ea877286/neo/io/basefromrawio.py#L284-L330
|
|
golemhq/golem
|
84f51478b169cdeab73fc7e2a22a64d0a2a29263
|
golem/webdriver/extended_driver.py
|
python
|
GolemExtendedDriver.wait_for_element_text_not_contains
|
(self, element, text, timeout)
|
return element.wait_text_not_contains(text, timeout)
|
Wait for element to not contain text
:Args:
- element: an element tuple, a CSS string, an XPath string or a WebElement object
- text: expected text to not be contained in element
- timeout: time to wait (in seconds)
|
Wait for element to not contain text
|
[
"Wait",
"for",
"element",
"to",
"not",
"contain",
"text"
] |
def wait_for_element_text_not_contains(self, element, text, timeout):
"""Wait for element to not contain text
:Args:
- element: an element tuple, a CSS string, an XPath string or a WebElement object
- text: expected text to not be contained in element
- timeout: time to wait (in seconds)
"""
element = self.find(element, timeout=0)
return element.wait_text_not_contains(text, timeout)
|
[
"def",
"wait_for_element_text_not_contains",
"(",
"self",
",",
"element",
",",
"text",
",",
"timeout",
")",
":",
"element",
"=",
"self",
".",
"find",
"(",
"element",
",",
"timeout",
"=",
"0",
")",
"return",
"element",
".",
"wait_text_not_contains",
"(",
"text",
",",
"timeout",
")"
] |
https://github.com/golemhq/golem/blob/84f51478b169cdeab73fc7e2a22a64d0a2a29263/golem/webdriver/extended_driver.py#L458-L467
|
|
magmax/python-inquirer
|
ef7487247b46f33032f54a1547c9b8d9b8287c2b
|
noxfile.py
|
python
|
typeguard
|
(session: Session)
|
Runtime type checking using Typeguard.
|
Runtime type checking using Typeguard.
|
[
"Runtime",
"type",
"checking",
"using",
"Typeguard",
"."
] |
def typeguard(session: Session) -> None:
"""Runtime type checking using Typeguard."""
session.install(".")
session.install("pytest", "typeguard", "pygments")
session.run("pytest", f"--typeguard-packages={package}", *session.posargs)
|
[
"def",
"typeguard",
"(",
"session",
":",
"Session",
")",
"->",
"None",
":",
"session",
".",
"install",
"(",
"\".\"",
")",
"session",
".",
"install",
"(",
"\"pytest\"",
",",
"\"typeguard\"",
",",
"\"pygments\"",
")",
"session",
".",
"run",
"(",
"\"pytest\"",
",",
"f\"--typeguard-packages={package}\"",
",",
"*",
"session",
".",
"posargs",
")"
] |
https://github.com/magmax/python-inquirer/blob/ef7487247b46f33032f54a1547c9b8d9b8287c2b/noxfile.py#L168-L172
|
||
aws/aws-sam-cli
|
2aa7bf01b2e0b0864ef63b1898a8b30577443acc
|
samcli/lib/sync/flows/layer_sync_flow.py
|
python
|
AbstractLayerSyncFlow._publish_new_layer_version
|
(self)
|
Publish new layer version and keep new layer version arn so that we can update related functions
|
Publish new layer version and keep new layer version arn so that we can update related functions
|
[
"Publish",
"new",
"layer",
"version",
"and",
"keep",
"new",
"layer",
"version",
"arn",
"so",
"that",
"we",
"can",
"update",
"related",
"functions"
] |
def _publish_new_layer_version(self) -> int:
"""
Publish new layer version and keep new layer version arn so that we can update related functions
"""
compatible_runtimes = self._get_compatible_runtimes()
with open(cast(str, self._zip_file), "rb") as zip_file:
data = zip_file.read()
layer_publish_result = self._lambda_client.publish_layer_version(
LayerName=self._layer_arn, Content={"ZipFile": data}, CompatibleRuntimes=compatible_runtimes
)
LOG.debug("%sPublish Layer Version Result %s", self.log_prefix, layer_publish_result)
return int(layer_publish_result.get("Version"))
|
[
"def",
"_publish_new_layer_version",
"(",
"self",
")",
"->",
"int",
":",
"compatible_runtimes",
"=",
"self",
".",
"_get_compatible_runtimes",
"(",
")",
"with",
"open",
"(",
"cast",
"(",
"str",
",",
"self",
".",
"_zip_file",
")",
",",
"\"rb\"",
")",
"as",
"zip_file",
":",
"data",
"=",
"zip_file",
".",
"read",
"(",
")",
"layer_publish_result",
"=",
"self",
".",
"_lambda_client",
".",
"publish_layer_version",
"(",
"LayerName",
"=",
"self",
".",
"_layer_arn",
",",
"Content",
"=",
"{",
"\"ZipFile\"",
":",
"data",
"}",
",",
"CompatibleRuntimes",
"=",
"compatible_runtimes",
")",
"LOG",
".",
"debug",
"(",
"\"%sPublish Layer Version Result %s\"",
",",
"self",
".",
"log_prefix",
",",
"layer_publish_result",
")",
"return",
"int",
"(",
"layer_publish_result",
".",
"get",
"(",
"\"Version\"",
")",
")"
] |
https://github.com/aws/aws-sam-cli/blob/2aa7bf01b2e0b0864ef63b1898a8b30577443acc/samcli/lib/sync/flows/layer_sync_flow.py#L118-L129
|
||
twilio/twilio-python
|
6e1e811ea57a1edfadd5161ace87397c563f6915
|
twilio/rest/api/v2010/account/sip/credential_list/credential.py
|
python
|
CredentialContext.__init__
|
(self, version, account_sid, credential_list_sid, sid)
|
Initialize the CredentialContext
:param Version version: Version that contains the resource
:param account_sid: The unique id of the Account that is responsible for this resource.
:param credential_list_sid: The unique id that identifies the credential list that contains the desired credential
:param sid: The unique id that identifies the resource to fetch.
:returns: twilio.rest.api.v2010.account.sip.credential_list.credential.CredentialContext
:rtype: twilio.rest.api.v2010.account.sip.credential_list.credential.CredentialContext
|
Initialize the CredentialContext
|
[
"Initialize",
"the",
"CredentialContext"
] |
def __init__(self, version, account_sid, credential_list_sid, sid):
"""
Initialize the CredentialContext
:param Version version: Version that contains the resource
:param account_sid: The unique id of the Account that is responsible for this resource.
:param credential_list_sid: The unique id that identifies the credential list that contains the desired credential
:param sid: The unique id that identifies the resource to fetch.
:returns: twilio.rest.api.v2010.account.sip.credential_list.credential.CredentialContext
:rtype: twilio.rest.api.v2010.account.sip.credential_list.credential.CredentialContext
"""
super(CredentialContext, self).__init__(version)
# Path Solution
self._solution = {
'account_sid': account_sid,
'credential_list_sid': credential_list_sid,
'sid': sid,
}
self._uri = '/Accounts/{account_sid}/SIP/CredentialLists/{credential_list_sid}/Credentials/{sid}.json'.format(**self._solution)
|
[
"def",
"__init__",
"(",
"self",
",",
"version",
",",
"account_sid",
",",
"credential_list_sid",
",",
"sid",
")",
":",
"super",
"(",
"CredentialContext",
",",
"self",
")",
".",
"__init__",
"(",
"version",
")",
"# Path Solution",
"self",
".",
"_solution",
"=",
"{",
"'account_sid'",
":",
"account_sid",
",",
"'credential_list_sid'",
":",
"credential_list_sid",
",",
"'sid'",
":",
"sid",
",",
"}",
"self",
".",
"_uri",
"=",
"'/Accounts/{account_sid}/SIP/CredentialLists/{credential_list_sid}/Credentials/{sid}.json'",
".",
"format",
"(",
"*",
"*",
"self",
".",
"_solution",
")"
] |
https://github.com/twilio/twilio-python/blob/6e1e811ea57a1edfadd5161ace87397c563f6915/twilio/rest/api/v2010/account/sip/credential_list/credential.py#L223-L243
|
||
dimagi/commcare-hq
|
d67ff1d3b4c51fa050c19e60c3253a79d3452a39
|
corehq/apps/app_manager/views/app_summary.py
|
python
|
DownloadCaseSummaryView._get_load_question_row
|
(self, prop, form, language, load_question)
|
return PropertyRow(
prop.name,
form.form_id,
_get_translated_form_name(self.app, form.form_id, language),
load_question.question.value,
"{} {} {}".format(
load_question.condition.question,
load_question.condition.operator,
load_question.condition.answer
) if load_question.condition else "",
None,
None,
None,
)
|
[] |
def _get_load_question_row(self, prop, form, language, load_question):
return PropertyRow(
prop.name,
form.form_id,
_get_translated_form_name(self.app, form.form_id, language),
load_question.question.value,
"{} {} {}".format(
load_question.condition.question,
load_question.condition.operator,
load_question.condition.answer
) if load_question.condition else "",
None,
None,
None,
)
|
[
"def",
"_get_load_question_row",
"(",
"self",
",",
"prop",
",",
"form",
",",
"language",
",",
"load_question",
")",
":",
"return",
"PropertyRow",
"(",
"prop",
".",
"name",
",",
"form",
".",
"form_id",
",",
"_get_translated_form_name",
"(",
"self",
".",
"app",
",",
"form",
".",
"form_id",
",",
"language",
")",
",",
"load_question",
".",
"question",
".",
"value",
",",
"\"{} {} {}\"",
".",
"format",
"(",
"load_question",
".",
"condition",
".",
"question",
",",
"load_question",
".",
"condition",
".",
"operator",
",",
"load_question",
".",
"condition",
".",
"answer",
")",
"if",
"load_question",
".",
"condition",
"else",
"\"\"",
",",
"None",
",",
"None",
",",
"None",
",",
")"
] |
https://github.com/dimagi/commcare-hq/blob/d67ff1d3b4c51fa050c19e60c3253a79d3452a39/corehq/apps/app_manager/views/app_summary.py#L574-L588
|
|||
pypa/pip
|
7f8a6844037fb7255cfd0d34ff8e8cf44f2598d4
|
src/pip/_vendor/resolvelib/resolvers.py
|
python
|
Resolution._backtrack
|
(self)
|
return False
|
Perform backtracking.
When we enter here, the stack is like this::
[ state Z ]
[ state Y ]
[ state X ]
.... earlier states are irrelevant.
1. No pins worked for Z, so it does not have a pin.
2. We want to reset state Y to unpinned, and pin another candidate.
3. State X holds what state Y was before the pin, but does not
have the incompatibility information gathered in state Y.
Each iteration of the loop will:
1. Discard Z.
2. Discard Y but remember its incompatibility information gathered
previously, and the failure we're dealing with right now.
3. Push a new state Y' based on X, and apply the incompatibility
information from Y to Y'.
4a. If this causes Y' to conflict, we need to backtrack again. Make Y'
the new Z and go back to step 2.
4b. If the incompatibilities apply cleanly, end backtracking.
|
Perform backtracking.
|
[
"Perform",
"backtracking",
"."
] |
def _backtrack(self):
"""Perform backtracking.
When we enter here, the stack is like this::
[ state Z ]
[ state Y ]
[ state X ]
.... earlier states are irrelevant.
1. No pins worked for Z, so it does not have a pin.
2. We want to reset state Y to unpinned, and pin another candidate.
3. State X holds what state Y was before the pin, but does not
have the incompatibility information gathered in state Y.
Each iteration of the loop will:
1. Discard Z.
2. Discard Y but remember its incompatibility information gathered
previously, and the failure we're dealing with right now.
3. Push a new state Y' based on X, and apply the incompatibility
information from Y to Y'.
4a. If this causes Y' to conflict, we need to backtrack again. Make Y'
the new Z and go back to step 2.
4b. If the incompatibilities apply cleanly, end backtracking.
"""
while len(self._states) >= 3:
# Remove the state that triggered backtracking.
del self._states[-1]
# Retrieve the last candidate pin and known incompatibilities.
broken_state = self._states.pop()
name, candidate = broken_state.mapping.popitem()
incompatibilities_from_broken = [
(k, list(v.incompatibilities))
for k, v in broken_state.criteria.items()
]
# Also mark the newly known incompatibility.
incompatibilities_from_broken.append((name, [candidate]))
self._r.backtracking(candidate=candidate)
# Create a new state from the last known-to-work one, and apply
# the previously gathered incompatibility information.
def _patch_criteria():
for k, incompatibilities in incompatibilities_from_broken:
if not incompatibilities:
continue
try:
criterion = self.state.criteria[k]
except KeyError:
continue
matches = self._p.find_matches(
identifier=k,
requirements=IteratorMapping(
self.state.criteria,
operator.methodcaller("iter_requirement"),
),
incompatibilities=IteratorMapping(
self.state.criteria,
operator.attrgetter("incompatibilities"),
{k: incompatibilities},
),
)
candidates = build_iter_view(matches)
if not candidates:
return False
incompatibilities.extend(criterion.incompatibilities)
self.state.criteria[k] = Criterion(
candidates=candidates,
information=list(criterion.information),
incompatibilities=incompatibilities,
)
return True
self._push_new_state()
success = _patch_criteria()
# It works! Let's work on this new state.
if success:
return True
# State does not work after applying known incompatibilities.
# Try the still previous state.
# No way to backtrack anymore.
return False
|
[
"def",
"_backtrack",
"(",
"self",
")",
":",
"while",
"len",
"(",
"self",
".",
"_states",
")",
">=",
"3",
":",
"# Remove the state that triggered backtracking.",
"del",
"self",
".",
"_states",
"[",
"-",
"1",
"]",
"# Retrieve the last candidate pin and known incompatibilities.",
"broken_state",
"=",
"self",
".",
"_states",
".",
"pop",
"(",
")",
"name",
",",
"candidate",
"=",
"broken_state",
".",
"mapping",
".",
"popitem",
"(",
")",
"incompatibilities_from_broken",
"=",
"[",
"(",
"k",
",",
"list",
"(",
"v",
".",
"incompatibilities",
")",
")",
"for",
"k",
",",
"v",
"in",
"broken_state",
".",
"criteria",
".",
"items",
"(",
")",
"]",
"# Also mark the newly known incompatibility.",
"incompatibilities_from_broken",
".",
"append",
"(",
"(",
"name",
",",
"[",
"candidate",
"]",
")",
")",
"self",
".",
"_r",
".",
"backtracking",
"(",
"candidate",
"=",
"candidate",
")",
"# Create a new state from the last known-to-work one, and apply",
"# the previously gathered incompatibility information.",
"def",
"_patch_criteria",
"(",
")",
":",
"for",
"k",
",",
"incompatibilities",
"in",
"incompatibilities_from_broken",
":",
"if",
"not",
"incompatibilities",
":",
"continue",
"try",
":",
"criterion",
"=",
"self",
".",
"state",
".",
"criteria",
"[",
"k",
"]",
"except",
"KeyError",
":",
"continue",
"matches",
"=",
"self",
".",
"_p",
".",
"find_matches",
"(",
"identifier",
"=",
"k",
",",
"requirements",
"=",
"IteratorMapping",
"(",
"self",
".",
"state",
".",
"criteria",
",",
"operator",
".",
"methodcaller",
"(",
"\"iter_requirement\"",
")",
",",
")",
",",
"incompatibilities",
"=",
"IteratorMapping",
"(",
"self",
".",
"state",
".",
"criteria",
",",
"operator",
".",
"attrgetter",
"(",
"\"incompatibilities\"",
")",
",",
"{",
"k",
":",
"incompatibilities",
"}",
",",
")",
",",
")",
"candidates",
"=",
"build_iter_view",
"(",
"matches",
")",
"if",
"not",
"candidates",
":",
"return",
"False",
"incompatibilities",
".",
"extend",
"(",
"criterion",
".",
"incompatibilities",
")",
"self",
".",
"state",
".",
"criteria",
"[",
"k",
"]",
"=",
"Criterion",
"(",
"candidates",
"=",
"candidates",
",",
"information",
"=",
"list",
"(",
"criterion",
".",
"information",
")",
",",
"incompatibilities",
"=",
"incompatibilities",
",",
")",
"return",
"True",
"self",
".",
"_push_new_state",
"(",
")",
"success",
"=",
"_patch_criteria",
"(",
")",
"# It works! Let's work on this new state.",
"if",
"success",
":",
"return",
"True",
"# State does not work after applying known incompatibilities.",
"# Try the still previous state.",
"# No way to backtrack anymore.",
"return",
"False"
] |
https://github.com/pypa/pip/blob/7f8a6844037fb7255cfd0d34ff8e8cf44f2598d4/src/pip/_vendor/resolvelib/resolvers.py#L243-L330
|
|
punchagan/cinspect
|
23834b9d02511a88cba8ca0aa1397eef927822c3
|
cinspect/vendor/clang/cindex.py
|
python
|
Cursor.is_bitfield
|
(self)
|
return conf.lib.clang_Cursor_isBitField(self)
|
Check if the field is a bitfield.
|
Check if the field is a bitfield.
|
[
"Check",
"if",
"the",
"field",
"is",
"a",
"bitfield",
"."
] |
def is_bitfield(self):
"""
Check if the field is a bitfield.
"""
return conf.lib.clang_Cursor_isBitField(self)
|
[
"def",
"is_bitfield",
"(",
"self",
")",
":",
"return",
"conf",
".",
"lib",
".",
"clang_Cursor_isBitField",
"(",
"self",
")"
] |
https://github.com/punchagan/cinspect/blob/23834b9d02511a88cba8ca0aa1397eef927822c3/cinspect/vendor/clang/cindex.py#L1408-L1412
|
|
gnuradio/SigMF
|
3f60b653d8e6a529962b58c97267924702dd9dea
|
sigmf/sigmffile.py
|
python
|
SigMFFile._validate_dict_in_section
|
(self, entries, section_key)
|
Checks a dictionary for validity.
Throws if not.
|
Checks a dictionary for validity.
Throws if not.
|
[
"Checks",
"a",
"dictionary",
"for",
"validity",
".",
"Throws",
"if",
"not",
"."
] |
def _validate_dict_in_section(self, entries, section_key):
"""
Checks a dictionary for validity.
Throws if not.
"""
schema_section = self.get_schema()[section_key]
for key, value in entries.items():
validate.validate_key_throw(
value, schema_section.get(key, {}), schema_section, key
)
|
[
"def",
"_validate_dict_in_section",
"(",
"self",
",",
"entries",
",",
"section_key",
")",
":",
"schema_section",
"=",
"self",
".",
"get_schema",
"(",
")",
"[",
"section_key",
"]",
"for",
"key",
",",
"value",
"in",
"entries",
".",
"items",
"(",
")",
":",
"validate",
".",
"validate_key_throw",
"(",
"value",
",",
"schema_section",
".",
"get",
"(",
"key",
",",
"{",
"}",
")",
",",
"schema_section",
",",
"key",
")"
] |
https://github.com/gnuradio/SigMF/blob/3f60b653d8e6a529962b58c97267924702dd9dea/sigmf/sigmffile.py#L159-L168
|
||
pyvista/pyvista
|
012dbb95a9aae406c3cd4cd94fc8c477f871e426
|
pyvista/plotting/tools.py
|
python
|
system_supports_plotting
|
()
|
return SUPPORTS_PLOTTING
|
Check if the environment supports plotting.
Returns
-------
bool
``True`` when system supports plotting.
|
Check if the environment supports plotting.
|
[
"Check",
"if",
"the",
"environment",
"supports",
"plotting",
"."
] |
def system_supports_plotting():
"""Check if the environment supports plotting.
Returns
-------
bool
``True`` when system supports plotting.
"""
global SUPPORTS_PLOTTING
if SUPPORTS_PLOTTING is None:
SUPPORTS_PLOTTING = _system_supports_plotting()
# always use the cached response
return SUPPORTS_PLOTTING
|
[
"def",
"system_supports_plotting",
"(",
")",
":",
"global",
"SUPPORTS_PLOTTING",
"if",
"SUPPORTS_PLOTTING",
"is",
"None",
":",
"SUPPORTS_PLOTTING",
"=",
"_system_supports_plotting",
"(",
")",
"# always use the cached response",
"return",
"SUPPORTS_PLOTTING"
] |
https://github.com/pyvista/pyvista/blob/012dbb95a9aae406c3cd4cd94fc8c477f871e426/pyvista/plotting/tools.py#L76-L90
|
|
kbandla/ImmunityDebugger
|
2abc03fb15c8f3ed0914e1175c4d8933977c73e3
|
1.83/Libs/immlib.py
|
python
|
Debugger.createWindow
|
(self, title, col_titles)
|
return self.createTable( title, col_titles )
|
Creates a custom window.
@type title: STRING
@param title: Window title
@type col_titles: LIST OF STRING
@param col_titles: Column titles list
@return HWND: Handler of created table
|
Creates a custom window.
|
[
"Creates",
"a",
"custom",
"window",
"."
] |
def createWindow(self, title, col_titles):
"""
Creates a custom window.
@type title: STRING
@param title: Window title
@type col_titles: LIST OF STRING
@param col_titles: Column titles list
@return HWND: Handler of created table
"""
return self.createTable( title, col_titles )
|
[
"def",
"createWindow",
"(",
"self",
",",
"title",
",",
"col_titles",
")",
":",
"return",
"self",
".",
"createTable",
"(",
"title",
",",
"col_titles",
")"
] |
https://github.com/kbandla/ImmunityDebugger/blob/2abc03fb15c8f3ed0914e1175c4d8933977c73e3/1.83/Libs/immlib.py#L1647-L1659
|
|
linuxscout/pyarabic
|
010bddadb7c9b5c6bd24cc02d4aeddde0c4a10c4
|
pyarabic/araby.py
|
python
|
is_vocalized
|
(word)
|
return True
|
Checks if the arabic word is vocalized.
the word musn't have any spaces and pounctuations.
@param word: arabic unicode char
@type word: unicode
@return: if the word is vocalized
@rtype:Boolean
|
Checks if the arabic word is vocalized.
the word musn't have any spaces and pounctuations.
|
[
"Checks",
"if",
"the",
"arabic",
"word",
"is",
"vocalized",
".",
"the",
"word",
"musn",
"t",
"have",
"any",
"spaces",
"and",
"pounctuations",
"."
] |
def is_vocalized(word):
"""Checks if the arabic word is vocalized.
the word musn't have any spaces and pounctuations.
@param word: arabic unicode char
@type word: unicode
@return: if the word is vocalized
@rtype:Boolean
"""
if word.isalpha():
return False
for char in word:
if is_tashkeel(char):
break
else:
return False
return True
|
[
"def",
"is_vocalized",
"(",
"word",
")",
":",
"if",
"word",
".",
"isalpha",
"(",
")",
":",
"return",
"False",
"for",
"char",
"in",
"word",
":",
"if",
"is_tashkeel",
"(",
"char",
")",
":",
"break",
"else",
":",
"return",
"False",
"return",
"True"
] |
https://github.com/linuxscout/pyarabic/blob/010bddadb7c9b5c6bd24cc02d4aeddde0c4a10c4/pyarabic/araby.py#L559-L574
|
|
Cadene/tensorflow-model-zoo.torch
|
990b10ffc22d4c8eacb2a502f20415b4f70c74c2
|
models/research/object_detection/core/box_list.py
|
python
|
BoxList.set_field
|
(self, field, value)
|
Sets the value of a field.
Updates the field of a box_list with a given value.
Args:
field: (string) name of the field to set value.
value: the value to assign to the field.
Raises:
ValueError: if the box_list does not have specified field.
|
Sets the value of a field.
|
[
"Sets",
"the",
"value",
"of",
"a",
"field",
"."
] |
def set_field(self, field, value):
"""Sets the value of a field.
Updates the field of a box_list with a given value.
Args:
field: (string) name of the field to set value.
value: the value to assign to the field.
Raises:
ValueError: if the box_list does not have specified field.
"""
if not self.has_field(field):
raise ValueError('field %s does not exist' % field)
self.data[field] = value
|
[
"def",
"set_field",
"(",
"self",
",",
"field",
",",
"value",
")",
":",
"if",
"not",
"self",
".",
"has_field",
"(",
"field",
")",
":",
"raise",
"ValueError",
"(",
"'field %s does not exist'",
"%",
"field",
")",
"self",
".",
"data",
"[",
"field",
"]",
"=",
"value"
] |
https://github.com/Cadene/tensorflow-model-zoo.torch/blob/990b10ffc22d4c8eacb2a502f20415b4f70c74c2/models/research/object_detection/core/box_list.py#L142-L156
|
||
uclnlp/jack
|
9e5ffbd4fb2b0bd6b816fe6e14b9045ac776bb8e
|
jack/core/tensorport.py
|
python
|
TensorPort.torch_to_numpy
|
(value)
|
Convenience method that produces a tensor given the value of the defined type.
Returns: a torch tensor of same type.
|
Convenience method that produces a tensor given the value of the defined type.
|
[
"Convenience",
"method",
"that",
"produces",
"a",
"tensor",
"given",
"the",
"value",
"of",
"the",
"defined",
"type",
"."
] |
def torch_to_numpy(value):
"""Convenience method that produces a tensor given the value of the defined type.
Returns: a torch tensor of same type.
"""
if isinstance(value, torch.autograd.Variable):
value = value.data
if torch.is_tensor(value):
return value.cpu().numpy()
elif isinstance(value, np.ndarray):
return value
else:
return np.ndarray(value)
|
[
"def",
"torch_to_numpy",
"(",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"torch",
".",
"autograd",
".",
"Variable",
")",
":",
"value",
"=",
"value",
".",
"data",
"if",
"torch",
".",
"is_tensor",
"(",
"value",
")",
":",
"return",
"value",
".",
"cpu",
"(",
")",
".",
"numpy",
"(",
")",
"elif",
"isinstance",
"(",
"value",
",",
"np",
".",
"ndarray",
")",
":",
"return",
"value",
"else",
":",
"return",
"np",
".",
"ndarray",
"(",
"value",
")"
] |
https://github.com/uclnlp/jack/blob/9e5ffbd4fb2b0bd6b816fe6e14b9045ac776bb8e/jack/core/tensorport.py#L80-L92
|
||
bcbio/bcbio-nextgen
|
c80f9b6b1be3267d1f981b7035e3b72441d258f2
|
bcbio/variation/freebayes.py
|
python
|
clean_vcf_output
|
(orig_file, clean_fn, config, name="clean")
|
Provide framework to clean a file in-place, with the specified clean
function.
|
Provide framework to clean a file in-place, with the specified clean
function.
|
[
"Provide",
"framework",
"to",
"clean",
"a",
"file",
"in",
"-",
"place",
"with",
"the",
"specified",
"clean",
"function",
"."
] |
def clean_vcf_output(orig_file, clean_fn, config, name="clean"):
"""Provide framework to clean a file in-place, with the specified clean
function.
"""
base, ext = utils.splitext_plus(orig_file)
out_file = "{0}-{1}{2}".format(base, name, ext)
if not utils.file_exists(out_file):
with open(orig_file) as in_handle:
with file_transaction(config, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for line in in_handle:
update_line = clean_fn(line)
if update_line:
out_handle.write(update_line)
move_vcf(orig_file, "{0}.orig".format(orig_file))
move_vcf(out_file, orig_file)
with open(out_file, "w") as out_handle:
out_handle.write("Moved to {0}".format(orig_file))
|
[
"def",
"clean_vcf_output",
"(",
"orig_file",
",",
"clean_fn",
",",
"config",
",",
"name",
"=",
"\"clean\"",
")",
":",
"base",
",",
"ext",
"=",
"utils",
".",
"splitext_plus",
"(",
"orig_file",
")",
"out_file",
"=",
"\"{0}-{1}{2}\"",
".",
"format",
"(",
"base",
",",
"name",
",",
"ext",
")",
"if",
"not",
"utils",
".",
"file_exists",
"(",
"out_file",
")",
":",
"with",
"open",
"(",
"orig_file",
")",
"as",
"in_handle",
":",
"with",
"file_transaction",
"(",
"config",
",",
"out_file",
")",
"as",
"tx_out_file",
":",
"with",
"open",
"(",
"tx_out_file",
",",
"\"w\"",
")",
"as",
"out_handle",
":",
"for",
"line",
"in",
"in_handle",
":",
"update_line",
"=",
"clean_fn",
"(",
"line",
")",
"if",
"update_line",
":",
"out_handle",
".",
"write",
"(",
"update_line",
")",
"move_vcf",
"(",
"orig_file",
",",
"\"{0}.orig\"",
".",
"format",
"(",
"orig_file",
")",
")",
"move_vcf",
"(",
"out_file",
",",
"orig_file",
")",
"with",
"open",
"(",
"out_file",
",",
"\"w\"",
")",
"as",
"out_handle",
":",
"out_handle",
".",
"write",
"(",
"\"Moved to {0}\"",
".",
"format",
"(",
"orig_file",
")",
")"
] |
https://github.com/bcbio/bcbio-nextgen/blob/c80f9b6b1be3267d1f981b7035e3b72441d258f2/bcbio/variation/freebayes.py#L355-L372
|
||
holzschu/Carnets
|
44effb10ddfc6aa5c8b0687582a724ba82c6b547
|
Library/lib/python3.7/site-packages/numpy-1.16.0-py3.7-macosx-10.9-x86_64.egg/numpy/lib/financial.py
|
python
|
_rbl
|
(rate, per, pmt, pv, when)
|
return fv(rate, (per - 1), pmt, pv, when)
|
This function is here to simply have a different name for the 'fv'
function to not interfere with the 'fv' keyword argument within the 'ipmt'
function. It is the 'remaining balance on loan' which might be useful as
it's own function, but is easily calculated with the 'fv' function.
|
This function is here to simply have a different name for the 'fv'
function to not interfere with the 'fv' keyword argument within the 'ipmt'
function. It is the 'remaining balance on loan' which might be useful as
it's own function, but is easily calculated with the 'fv' function.
|
[
"This",
"function",
"is",
"here",
"to",
"simply",
"have",
"a",
"different",
"name",
"for",
"the",
"fv",
"function",
"to",
"not",
"interfere",
"with",
"the",
"fv",
"keyword",
"argument",
"within",
"the",
"ipmt",
"function",
".",
"It",
"is",
"the",
"remaining",
"balance",
"on",
"loan",
"which",
"might",
"be",
"useful",
"as",
"it",
"s",
"own",
"function",
"but",
"is",
"easily",
"calculated",
"with",
"the",
"fv",
"function",
"."
] |
def _rbl(rate, per, pmt, pv, when):
"""
This function is here to simply have a different name for the 'fv'
function to not interfere with the 'fv' keyword argument within the 'ipmt'
function. It is the 'remaining balance on loan' which might be useful as
it's own function, but is easily calculated with the 'fv' function.
"""
return fv(rate, (per - 1), pmt, pv, when)
|
[
"def",
"_rbl",
"(",
"rate",
",",
"per",
",",
"pmt",
",",
"pv",
",",
"when",
")",
":",
"return",
"fv",
"(",
"rate",
",",
"(",
"per",
"-",
"1",
")",
",",
"pmt",
",",
"pv",
",",
"when",
")"
] |
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/numpy-1.16.0-py3.7-macosx-10.9-x86_64.egg/numpy/lib/financial.py#L414-L421
|
|
albertz/music-player
|
d23586f5bf657cbaea8147223be7814d117ae73d
|
mac/pyobjc-framework-Cocoa/Examples/AppKit/DragItemAround/DragItemAround.py
|
python
|
DraggableItemView.keyDown_
|
(self, event)
|
.
|
.
|
[
"."
] |
def keyDown_(self, event):
"""."""
handled = False
characters = event.charactersIgnoringModifiers()
if characters.isEqual_('r'):
handled = True
self.setItemPropertiesToDefault_(self)
if handled is False:
super(DraggableItemView, self).keyDown_(event)
|
[
"def",
"keyDown_",
"(",
"self",
",",
"event",
")",
":",
"handled",
"=",
"False",
"characters",
"=",
"event",
".",
"charactersIgnoringModifiers",
"(",
")",
"if",
"characters",
".",
"isEqual_",
"(",
"'r'",
")",
":",
"handled",
"=",
"True",
"self",
".",
"setItemPropertiesToDefault_",
"(",
"self",
")",
"if",
"handled",
"is",
"False",
":",
"super",
"(",
"DraggableItemView",
",",
"self",
")",
".",
"keyDown_",
"(",
"event",
")"
] |
https://github.com/albertz/music-player/blob/d23586f5bf657cbaea8147223be7814d117ae73d/mac/pyobjc-framework-Cocoa/Examples/AppKit/DragItemAround/DragItemAround.py#L84-L92
|
||
dmlc/gluon-cv
|
709bc139919c02f7454cb411311048be188cde64
|
gluoncv/model_zoo/deeplabv3.py
|
python
|
get_deeplab_resnet101_voc
|
(**kwargs)
|
return get_deeplab('pascal_voc', 'resnet101', **kwargs)
|
r"""DeepLabV3
Parameters
----------
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
Examples
--------
>>> model = get_deeplab_resnet101_voc(pretrained=True)
>>> print(model)
|
r"""DeepLabV3
Parameters
----------
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
|
[
"r",
"DeepLabV3",
"Parameters",
"----------",
"pretrained",
":",
"bool",
"or",
"str",
"Boolean",
"value",
"controls",
"whether",
"to",
"load",
"the",
"default",
"pretrained",
"weights",
"for",
"model",
".",
"String",
"value",
"represents",
"the",
"hashtag",
"for",
"a",
"certain",
"version",
"of",
"pretrained",
"weights",
".",
"ctx",
":",
"Context",
"default",
"CPU",
"The",
"context",
"in",
"which",
"to",
"load",
"the",
"pretrained",
"weights",
".",
"root",
":",
"str",
"default",
"~",
"/",
".",
"mxnet",
"/",
"models",
"Location",
"for",
"keeping",
"the",
"model",
"parameters",
"."
] |
def get_deeplab_resnet101_voc(**kwargs):
r"""DeepLabV3
Parameters
----------
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
Examples
--------
>>> model = get_deeplab_resnet101_voc(pretrained=True)
>>> print(model)
"""
return get_deeplab('pascal_voc', 'resnet101', **kwargs)
|
[
"def",
"get_deeplab_resnet101_voc",
"(",
"*",
"*",
"kwargs",
")",
":",
"return",
"get_deeplab",
"(",
"'pascal_voc'",
",",
"'resnet101'",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/dmlc/gluon-cv/blob/709bc139919c02f7454cb411311048be188cde64/gluoncv/model_zoo/deeplabv3.py#L260-L277
|
|
Tuxemon/Tuxemon
|
ee80708090525391c1dfc43849a6348aca636b22
|
tuxemon/states/combat/combat.py
|
python
|
CombatState.enqueue_action
|
(
self,
user: Union[NPC, Monster, None],
technique: Technique,
target: Monster,
)
|
Add some technique or status to the action queue.
Parameters:
user: The user of the technique.
technique: The technique used.
target: The target of the action.
|
Add some technique or status to the action queue.
|
[
"Add",
"some",
"technique",
"or",
"status",
"to",
"the",
"action",
"queue",
"."
] |
def enqueue_action(
self,
user: Union[NPC, Monster, None],
technique: Technique,
target: Monster,
) -> None:
"""
Add some technique or status to the action queue.
Parameters:
user: The user of the technique.
technique: The technique used.
target: The target of the action.
"""
self._action_queue.append(EnqueuedAction(user, technique, target))
|
[
"def",
"enqueue_action",
"(",
"self",
",",
"user",
":",
"Union",
"[",
"NPC",
",",
"Monster",
",",
"None",
"]",
",",
"technique",
":",
"Technique",
",",
"target",
":",
"Monster",
",",
")",
"->",
"None",
":",
"self",
".",
"_action_queue",
".",
"append",
"(",
"EnqueuedAction",
"(",
"user",
",",
"technique",
",",
"target",
")",
")"
] |
https://github.com/Tuxemon/Tuxemon/blob/ee80708090525391c1dfc43849a6348aca636b22/tuxemon/states/combat/combat.py#L715-L730
|
||
holzschu/Carnets
|
44effb10ddfc6aa5c8b0687582a724ba82c6b547
|
Library/lib/python3.7/site-packages/numpy-1.16.0-py3.7-macosx-10.9-x86_64.egg/numpy/core/fromnumeric.py
|
python
|
around
|
(a, decimals=0, out=None)
|
return _wrapfunc(a, 'round', decimals=decimals, out=out)
|
Evenly round to the given number of decimals.
Parameters
----------
a : array_like
Input data.
decimals : int, optional
Number of decimal places to round to (default: 0). If
decimals is negative, it specifies the number of positions to
the left of the decimal point.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
values will be cast if necessary. See `doc.ufuncs` (Section
"Output arguments") for details.
Returns
-------
rounded_array : ndarray
An array of the same type as `a`, containing the rounded values.
Unless `out` was specified, a new array is created. A reference to
the result is returned.
The real and imaginary parts of complex numbers are rounded
separately. The result of rounding a float is a float.
See Also
--------
ndarray.round : equivalent method
ceil, fix, floor, rint, trunc
Notes
-----
For values exactly halfway between rounded decimal values, NumPy
rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0,
-0.5 and 0.5 round to 0.0, etc. Results may also be surprising due
to the inexact representation of decimal fractions in the IEEE
floating point standard [1]_ and errors introduced when scaling
by powers of ten.
References
----------
.. [1] "Lecture Notes on the Status of IEEE 754", William Kahan,
https://people.eecs.berkeley.edu/~wkahan/ieee754status/IEEE754.PDF
.. [2] "How Futile are Mindless Assessments of
Roundoff in Floating-Point Computation?", William Kahan,
https://people.eecs.berkeley.edu/~wkahan/Mindless.pdf
Examples
--------
>>> np.around([0.37, 1.64])
array([ 0., 2.])
>>> np.around([0.37, 1.64], decimals=1)
array([ 0.4, 1.6])
>>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value
array([ 0., 2., 2., 4., 4.])
>>> np.around([1,2,3,11], decimals=1) # ndarray of ints is returned
array([ 1, 2, 3, 11])
>>> np.around([1,2,3,11], decimals=-1)
array([ 0, 0, 0, 10])
|
Evenly round to the given number of decimals.
|
[
"Evenly",
"round",
"to",
"the",
"given",
"number",
"of",
"decimals",
"."
] |
def around(a, decimals=0, out=None):
"""
Evenly round to the given number of decimals.
Parameters
----------
a : array_like
Input data.
decimals : int, optional
Number of decimal places to round to (default: 0). If
decimals is negative, it specifies the number of positions to
the left of the decimal point.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
values will be cast if necessary. See `doc.ufuncs` (Section
"Output arguments") for details.
Returns
-------
rounded_array : ndarray
An array of the same type as `a`, containing the rounded values.
Unless `out` was specified, a new array is created. A reference to
the result is returned.
The real and imaginary parts of complex numbers are rounded
separately. The result of rounding a float is a float.
See Also
--------
ndarray.round : equivalent method
ceil, fix, floor, rint, trunc
Notes
-----
For values exactly halfway between rounded decimal values, NumPy
rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0,
-0.5 and 0.5 round to 0.0, etc. Results may also be surprising due
to the inexact representation of decimal fractions in the IEEE
floating point standard [1]_ and errors introduced when scaling
by powers of ten.
References
----------
.. [1] "Lecture Notes on the Status of IEEE 754", William Kahan,
https://people.eecs.berkeley.edu/~wkahan/ieee754status/IEEE754.PDF
.. [2] "How Futile are Mindless Assessments of
Roundoff in Floating-Point Computation?", William Kahan,
https://people.eecs.berkeley.edu/~wkahan/Mindless.pdf
Examples
--------
>>> np.around([0.37, 1.64])
array([ 0., 2.])
>>> np.around([0.37, 1.64], decimals=1)
array([ 0.4, 1.6])
>>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value
array([ 0., 2., 2., 4., 4.])
>>> np.around([1,2,3,11], decimals=1) # ndarray of ints is returned
array([ 1, 2, 3, 11])
>>> np.around([1,2,3,11], decimals=-1)
array([ 0, 0, 0, 10])
"""
return _wrapfunc(a, 'round', decimals=decimals, out=out)
|
[
"def",
"around",
"(",
"a",
",",
"decimals",
"=",
"0",
",",
"out",
"=",
"None",
")",
":",
"return",
"_wrapfunc",
"(",
"a",
",",
"'round'",
",",
"decimals",
"=",
"decimals",
",",
"out",
"=",
"out",
")"
] |
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/numpy-1.16.0-py3.7-macosx-10.9-x86_64.egg/numpy/core/fromnumeric.py#L2941-L3007
|
|
CLUEbenchmark/CLUEPretrainedModels
|
b384fd41665a8261f9c689c940cf750b3bc21fce
|
baselines/models/classifier_utils.py
|
python
|
CMNLIProcessor.get_train_examples
|
(self, data_dir)
|
return self._create_examples_json(os.path.join(data_dir, "train.json"), "train")
|
See base class.
|
See base class.
|
[
"See",
"base",
"class",
"."
] |
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples_json(os.path.join(data_dir, "train.json"), "train")
|
[
"def",
"get_train_examples",
"(",
"self",
",",
"data_dir",
")",
":",
"return",
"self",
".",
"_create_examples_json",
"(",
"os",
".",
"path",
".",
"join",
"(",
"data_dir",
",",
"\"train.json\"",
")",
",",
"\"train\"",
")"
] |
https://github.com/CLUEbenchmark/CLUEPretrainedModels/blob/b384fd41665a8261f9c689c940cf750b3bc21fce/baselines/models/classifier_utils.py#L370-L372
|
|
fossasia/x-mario-center
|
fe67afe28d995dcf4e2498e305825a4859566172
|
build/lib.linux-i686-2.7/softwarecenter/utils.py
|
python
|
decode_xml_char_reference
|
(s)
|
return p.sub(r"\u\1", s).decode("unicode-escape")
|
takes a string like
'Search…'
and converts it to
'Search...'
|
takes a string like
'Search…'
and converts it to
'Search...'
|
[
"takes",
"a",
"string",
"like",
"Search…",
";",
"and",
"converts",
"it",
"to",
"Search",
"..."
] |
def decode_xml_char_reference(s):
""" takes a string like
'Search…'
and converts it to
'Search...'
"""
p = re.compile("\&\#x(\d\d\d\d);")
return p.sub(r"\u\1", s).decode("unicode-escape")
|
[
"def",
"decode_xml_char_reference",
"(",
"s",
")",
":",
"p",
"=",
"re",
".",
"compile",
"(",
"\"\\&\\#x(\\d\\d\\d\\d);\"",
")",
"return",
"p",
".",
"sub",
"(",
"r\"\\u\\1\"",
",",
"s",
")",
".",
"decode",
"(",
"\"unicode-escape\"",
")"
] |
https://github.com/fossasia/x-mario-center/blob/fe67afe28d995dcf4e2498e305825a4859566172/build/lib.linux-i686-2.7/softwarecenter/utils.py#L294-L301
|
|
chribsen/simple-machine-learning-examples
|
dc94e52a4cebdc8bb959ff88b81ff8cfeca25022
|
venv/lib/python2.7/site-packages/pandas/core/frame.py
|
python
|
DataFrame.assign
|
(self, **kwargs)
|
return data
|
Assign new columns to a DataFrame, returning a new object
(a copy) with all the original columns in addition to the new ones.
.. versionadded:: 0.16.0
Parameters
----------
kwargs : keyword, value pairs
keywords are the column names. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though pandas doesn't check it).
If the values are not callable, (e.g. a Series, scalar, or array),
they are simply assigned.
Returns
-------
df : DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Notes
-----
Since ``kwargs`` is a dictionary, the order of your
arguments may not be preserved. The make things predicatable,
the columns are inserted in alphabetical order, at the end of
your DataFrame. Assigning multiple columns within the same
``assign`` is possible, but you cannot reference other columns
created within the same ``assign`` call.
Examples
--------
>>> df = DataFrame({'A': range(1, 11), 'B': np.random.randn(10)})
Where the value is a callable, evaluated on `df`:
>>> df.assign(ln_A = lambda x: np.log(x.A))
A B ln_A
0 1 0.426905 0.000000
1 2 -0.780949 0.693147
2 3 -0.418711 1.098612
3 4 -0.269708 1.386294
4 5 -0.274002 1.609438
5 6 -0.500792 1.791759
6 7 1.649697 1.945910
7 8 -1.495604 2.079442
8 9 0.549296 2.197225
9 10 -0.758542 2.302585
Where the value already exists and is inserted:
>>> newcol = np.log(df['A'])
>>> df.assign(ln_A=newcol)
A B ln_A
0 1 0.426905 0.000000
1 2 -0.780949 0.693147
2 3 -0.418711 1.098612
3 4 -0.269708 1.386294
4 5 -0.274002 1.609438
5 6 -0.500792 1.791759
6 7 1.649697 1.945910
7 8 -1.495604 2.079442
8 9 0.549296 2.197225
9 10 -0.758542 2.302585
|
Assign new columns to a DataFrame, returning a new object
(a copy) with all the original columns in addition to the new ones.
|
[
"Assign",
"new",
"columns",
"to",
"a",
"DataFrame",
"returning",
"a",
"new",
"object",
"(",
"a",
"copy",
")",
"with",
"all",
"the",
"original",
"columns",
"in",
"addition",
"to",
"the",
"new",
"ones",
"."
] |
def assign(self, **kwargs):
"""
Assign new columns to a DataFrame, returning a new object
(a copy) with all the original columns in addition to the new ones.
.. versionadded:: 0.16.0
Parameters
----------
kwargs : keyword, value pairs
keywords are the column names. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though pandas doesn't check it).
If the values are not callable, (e.g. a Series, scalar, or array),
they are simply assigned.
Returns
-------
df : DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Notes
-----
Since ``kwargs`` is a dictionary, the order of your
arguments may not be preserved. The make things predicatable,
the columns are inserted in alphabetical order, at the end of
your DataFrame. Assigning multiple columns within the same
``assign`` is possible, but you cannot reference other columns
created within the same ``assign`` call.
Examples
--------
>>> df = DataFrame({'A': range(1, 11), 'B': np.random.randn(10)})
Where the value is a callable, evaluated on `df`:
>>> df.assign(ln_A = lambda x: np.log(x.A))
A B ln_A
0 1 0.426905 0.000000
1 2 -0.780949 0.693147
2 3 -0.418711 1.098612
3 4 -0.269708 1.386294
4 5 -0.274002 1.609438
5 6 -0.500792 1.791759
6 7 1.649697 1.945910
7 8 -1.495604 2.079442
8 9 0.549296 2.197225
9 10 -0.758542 2.302585
Where the value already exists and is inserted:
>>> newcol = np.log(df['A'])
>>> df.assign(ln_A=newcol)
A B ln_A
0 1 0.426905 0.000000
1 2 -0.780949 0.693147
2 3 -0.418711 1.098612
3 4 -0.269708 1.386294
4 5 -0.274002 1.609438
5 6 -0.500792 1.791759
6 7 1.649697 1.945910
7 8 -1.495604 2.079442
8 9 0.549296 2.197225
9 10 -0.758542 2.302585
"""
data = self.copy()
# do all calculations first...
results = {}
for k, v in kwargs.items():
results[k] = com._apply_if_callable(v, data)
# ... and then assign
for k, v in sorted(results.items()):
data[k] = v
return data
|
[
"def",
"assign",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"data",
"=",
"self",
".",
"copy",
"(",
")",
"# do all calculations first...",
"results",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"results",
"[",
"k",
"]",
"=",
"com",
".",
"_apply_if_callable",
"(",
"v",
",",
"data",
")",
"# ... and then assign",
"for",
"k",
",",
"v",
"in",
"sorted",
"(",
"results",
".",
"items",
"(",
")",
")",
":",
"data",
"[",
"k",
"]",
"=",
"v",
"return",
"data"
] |
https://github.com/chribsen/simple-machine-learning-examples/blob/dc94e52a4cebdc8bb959ff88b81ff8cfeca25022/venv/lib/python2.7/site-packages/pandas/core/frame.py#L2513-L2591
|
|
Chaffelson/nipyapi
|
d3b186fd701ce308c2812746d98af9120955e810
|
nipyapi/nifi/models/processor_status_dto.py
|
python
|
ProcessorStatusDTO.aggregate_snapshot
|
(self, aggregate_snapshot)
|
Sets the aggregate_snapshot of this ProcessorStatusDTO.
A status snapshot that represents the aggregate stats of all nodes in the cluster. If the NiFi instance is a standalone instance, rather than a cluster, this represents the stats of the single instance.
:param aggregate_snapshot: The aggregate_snapshot of this ProcessorStatusDTO.
:type: ProcessorStatusSnapshotDTO
|
Sets the aggregate_snapshot of this ProcessorStatusDTO.
A status snapshot that represents the aggregate stats of all nodes in the cluster. If the NiFi instance is a standalone instance, rather than a cluster, this represents the stats of the single instance.
|
[
"Sets",
"the",
"aggregate_snapshot",
"of",
"this",
"ProcessorStatusDTO",
".",
"A",
"status",
"snapshot",
"that",
"represents",
"the",
"aggregate",
"stats",
"of",
"all",
"nodes",
"in",
"the",
"cluster",
".",
"If",
"the",
"NiFi",
"instance",
"is",
"a",
"standalone",
"instance",
"rather",
"than",
"a",
"cluster",
"this",
"represents",
"the",
"stats",
"of",
"the",
"single",
"instance",
"."
] |
def aggregate_snapshot(self, aggregate_snapshot):
"""
Sets the aggregate_snapshot of this ProcessorStatusDTO.
A status snapshot that represents the aggregate stats of all nodes in the cluster. If the NiFi instance is a standalone instance, rather than a cluster, this represents the stats of the single instance.
:param aggregate_snapshot: The aggregate_snapshot of this ProcessorStatusDTO.
:type: ProcessorStatusSnapshotDTO
"""
self._aggregate_snapshot = aggregate_snapshot
|
[
"def",
"aggregate_snapshot",
"(",
"self",
",",
"aggregate_snapshot",
")",
":",
"self",
".",
"_aggregate_snapshot",
"=",
"aggregate_snapshot"
] |
https://github.com/Chaffelson/nipyapi/blob/d3b186fd701ce308c2812746d98af9120955e810/nipyapi/nifi/models/processor_status_dto.py#L242-L251
|
||
microsoft/botbuilder-python
|
3d410365461dc434df59bdfeaa2f16d28d9df868
|
libraries/botbuilder-core/botbuilder/core/teams/teams_activity_handler.py
|
python
|
TeamsActivityHandler.on_teams_members_removed_dispatch
|
( # pylint: disable=unused-argument
self,
members_removed: [ChannelAccount],
team_info: TeamInfo,
turn_context: TurnContext,
)
|
return await self.on_teams_members_removed(
teams_members_removed, team_info, turn_context
)
|
Override this in a derived class to provide logic for when members other than the bot
leave the channel, such as your bot's good-bye logic.
It will get the associated members with the provided accounts.
:param members_removed: A list of all the accounts removed from the channel, as
described by the conversation update activity.
:param team_info: The team info object representing the team.
:param turn_context: A context object for this turn.
:returns: A task that represents the work queued to execute.
|
Override this in a derived class to provide logic for when members other than the bot
leave the channel, such as your bot's good-bye logic.
It will get the associated members with the provided accounts.
|
[
"Override",
"this",
"in",
"a",
"derived",
"class",
"to",
"provide",
"logic",
"for",
"when",
"members",
"other",
"than",
"the",
"bot",
"leave",
"the",
"channel",
"such",
"as",
"your",
"bot",
"s",
"good",
"-",
"bye",
"logic",
".",
"It",
"will",
"get",
"the",
"associated",
"members",
"with",
"the",
"provided",
"accounts",
"."
] |
async def on_teams_members_removed_dispatch( # pylint: disable=unused-argument
self,
members_removed: [ChannelAccount],
team_info: TeamInfo,
turn_context: TurnContext,
):
"""
Override this in a derived class to provide logic for when members other than the bot
leave the channel, such as your bot's good-bye logic.
It will get the associated members with the provided accounts.
:param members_removed: A list of all the accounts removed from the channel, as
described by the conversation update activity.
:param team_info: The team info object representing the team.
:param turn_context: A context object for this turn.
:returns: A task that represents the work queued to execute.
"""
teams_members_removed = []
for member in members_removed:
new_account_json = member.serialize()
if "additional_properties" in new_account_json:
del new_account_json["additional_properties"]
teams_members_removed.append(
TeamsChannelAccount().deserialize(new_account_json)
)
return await self.on_teams_members_removed(
teams_members_removed, team_info, turn_context
)
|
[
"async",
"def",
"on_teams_members_removed_dispatch",
"(",
"# pylint: disable=unused-argument",
"self",
",",
"members_removed",
":",
"[",
"ChannelAccount",
"]",
",",
"team_info",
":",
"TeamInfo",
",",
"turn_context",
":",
"TurnContext",
",",
")",
":",
"teams_members_removed",
"=",
"[",
"]",
"for",
"member",
"in",
"members_removed",
":",
"new_account_json",
"=",
"member",
".",
"serialize",
"(",
")",
"if",
"\"additional_properties\"",
"in",
"new_account_json",
":",
"del",
"new_account_json",
"[",
"\"additional_properties\"",
"]",
"teams_members_removed",
".",
"append",
"(",
"TeamsChannelAccount",
"(",
")",
".",
"deserialize",
"(",
"new_account_json",
")",
")",
"return",
"await",
"self",
".",
"on_teams_members_removed",
"(",
"teams_members_removed",
",",
"team_info",
",",
"turn_context",
")"
] |
https://github.com/microsoft/botbuilder-python/blob/3d410365461dc434df59bdfeaa2f16d28d9df868/libraries/botbuilder-core/botbuilder/core/teams/teams_activity_handler.py#L784-L813
|
|
learningequality/ka-lite
|
571918ea668013dcf022286ea85eff1c5333fb8b
|
kalite/packages/bundled/django/db/backends/__init__.py
|
python
|
BaseDatabaseWrapper.enter_transaction_management
|
(self, managed=True)
|
Enters transaction management for a running thread. It must be balanced with
the appropriate leave_transaction_management call, since the actual state is
managed as a stack.
The state and dirty flag are carried over from the surrounding block or
from the settings, if there is no surrounding block (dirty is always false
when no current block is running).
|
Enters transaction management for a running thread. It must be balanced with
the appropriate leave_transaction_management call, since the actual state is
managed as a stack.
|
[
"Enters",
"transaction",
"management",
"for",
"a",
"running",
"thread",
".",
"It",
"must",
"be",
"balanced",
"with",
"the",
"appropriate",
"leave_transaction_management",
"call",
"since",
"the",
"actual",
"state",
"is",
"managed",
"as",
"a",
"stack",
"."
] |
def enter_transaction_management(self, managed=True):
"""
Enters transaction management for a running thread. It must be balanced with
the appropriate leave_transaction_management call, since the actual state is
managed as a stack.
The state and dirty flag are carried over from the surrounding block or
from the settings, if there is no surrounding block (dirty is always false
when no current block is running).
"""
if self.transaction_state:
self.transaction_state.append(self.transaction_state[-1])
else:
self.transaction_state.append(settings.TRANSACTIONS_MANAGED)
if self._dirty is None:
self._dirty = False
self._enter_transaction_management(managed)
|
[
"def",
"enter_transaction_management",
"(",
"self",
",",
"managed",
"=",
"True",
")",
":",
"if",
"self",
".",
"transaction_state",
":",
"self",
".",
"transaction_state",
".",
"append",
"(",
"self",
".",
"transaction_state",
"[",
"-",
"1",
"]",
")",
"else",
":",
"self",
".",
"transaction_state",
".",
"append",
"(",
"settings",
".",
"TRANSACTIONS_MANAGED",
")",
"if",
"self",
".",
"_dirty",
"is",
"None",
":",
"self",
".",
"_dirty",
"=",
"False",
"self",
".",
"_enter_transaction_management",
"(",
"managed",
")"
] |
https://github.com/learningequality/ka-lite/blob/571918ea668013dcf022286ea85eff1c5333fb8b/kalite/packages/bundled/django/db/backends/__init__.py#L102-L119
|
||
oracle/graalpython
|
577e02da9755d916056184ec441c26e00b70145c
|
graalpython/lib-python/3/xmlrpc/client.py
|
python
|
Unmarshaller.end_array
|
(self, data)
|
[] |
def end_array(self, data):
mark = self._marks.pop()
# map arrays to Python lists
self._stack[mark:] = [self._stack[mark:]]
self._value = 0
|
[
"def",
"end_array",
"(",
"self",
",",
"data",
")",
":",
"mark",
"=",
"self",
".",
"_marks",
".",
"pop",
"(",
")",
"# map arrays to Python lists",
"self",
".",
"_stack",
"[",
"mark",
":",
"]",
"=",
"[",
"self",
".",
"_stack",
"[",
"mark",
":",
"]",
"]",
"self",
".",
"_value",
"=",
"0"
] |
https://github.com/oracle/graalpython/blob/577e02da9755d916056184ec441c26e00b70145c/graalpython/lib-python/3/xmlrpc/client.py#L760-L764
|
||||
python-openxml/python-docx
|
36cac78de080d412e9e50d56c2784e33655cad59
|
docx/styles/style.py
|
python
|
StyleFactory
|
(style_elm)
|
return style_cls(style_elm)
|
Return a style object of the appropriate |BaseStyle| subclass, according
to the type of *style_elm*.
|
Return a style object of the appropriate |BaseStyle| subclass, according
to the type of *style_elm*.
|
[
"Return",
"a",
"style",
"object",
"of",
"the",
"appropriate",
"|BaseStyle|",
"subclass",
"according",
"to",
"the",
"type",
"of",
"*",
"style_elm",
"*",
"."
] |
def StyleFactory(style_elm):
"""
Return a style object of the appropriate |BaseStyle| subclass, according
to the type of *style_elm*.
"""
style_cls = {
WD_STYLE_TYPE.PARAGRAPH: _ParagraphStyle,
WD_STYLE_TYPE.CHARACTER: _CharacterStyle,
WD_STYLE_TYPE.TABLE: _TableStyle,
WD_STYLE_TYPE.LIST: _NumberingStyle
}[style_elm.type]
return style_cls(style_elm)
|
[
"def",
"StyleFactory",
"(",
"style_elm",
")",
":",
"style_cls",
"=",
"{",
"WD_STYLE_TYPE",
".",
"PARAGRAPH",
":",
"_ParagraphStyle",
",",
"WD_STYLE_TYPE",
".",
"CHARACTER",
":",
"_CharacterStyle",
",",
"WD_STYLE_TYPE",
".",
"TABLE",
":",
"_TableStyle",
",",
"WD_STYLE_TYPE",
".",
"LIST",
":",
"_NumberingStyle",
"}",
"[",
"style_elm",
".",
"type",
"]",
"return",
"style_cls",
"(",
"style_elm",
")"
] |
https://github.com/python-openxml/python-docx/blob/36cac78de080d412e9e50d56c2784e33655cad59/docx/styles/style.py#L18-L30
|
|
bwohlberg/sporco
|
df67462abcf83af6ab1961bcb0d51b87a66483fa
|
sporco/admm/cbpdntv.py
|
python
|
ConvBPDNVectorTV.__init__
|
(self, D, S, lmbda, mu=0.0, opt=None, dimK=None, dimN=2)
|
|
**Call graph**
.. image:: ../_static/jonga/cbpdnvtv_init.svg
:width: 20%
:target: ../_static/jonga/cbpdnvtv_init.svg
|
Parameters
----------
D : array_like
Dictionary matrix
S : array_like
Signal vector or matrix
lmbda : float
Regularisation parameter (l1)
mu : float
Regularisation parameter (gradient)
opt : :class:`ConvBPDNScalarTV.Options` object
Algorithm options
dimK : 0, 1, or None, optional (default None)
Number of dimensions in input signal corresponding to multiple
independent signals
dimN : int, optional (default 2)
Number of spatial dimensions
|
[] |
def __init__(self, D, S, lmbda, mu=0.0, opt=None, dimK=None, dimN=2):
"""
|
**Call graph**
.. image:: ../_static/jonga/cbpdnvtv_init.svg
:width: 20%
:target: ../_static/jonga/cbpdnvtv_init.svg
|
Parameters
----------
D : array_like
Dictionary matrix
S : array_like
Signal vector or matrix
lmbda : float
Regularisation parameter (l1)
mu : float
Regularisation parameter (gradient)
opt : :class:`ConvBPDNScalarTV.Options` object
Algorithm options
dimK : 0, 1, or None, optional (default None)
Number of dimensions in input signal corresponding to multiple
independent signals
dimN : int, optional (default 2)
Number of spatial dimensions
"""
super(ConvBPDNVectorTV, self).__init__(D, S, lmbda, mu, opt,
dimK, dimN)
|
[
"def",
"__init__",
"(",
"self",
",",
"D",
",",
"S",
",",
"lmbda",
",",
"mu",
"=",
"0.0",
",",
"opt",
"=",
"None",
",",
"dimK",
"=",
"None",
",",
"dimN",
"=",
"2",
")",
":",
"super",
"(",
"ConvBPDNVectorTV",
",",
"self",
")",
".",
"__init__",
"(",
"D",
",",
"S",
",",
"lmbda",
",",
"mu",
",",
"opt",
",",
"dimK",
",",
"dimN",
")"
] |
https://github.com/bwohlberg/sporco/blob/df67462abcf83af6ab1961bcb0d51b87a66483fa/sporco/admm/cbpdntv.py#L669-L703
|
|||
arrow-py/arrow
|
e43524088f78efacb425524445a886600660d854
|
arrow/arrow.py
|
python
|
Arrow.timetuple
|
(self)
|
return self._datetime.timetuple()
|
Returns a ``time.struct_time``, in the current timezone.
Usage::
>>> arrow.utcnow().timetuple()
time.struct_time(tm_year=2019, tm_mon=1, tm_mday=20, tm_hour=15, tm_min=17, tm_sec=8, tm_wday=6, tm_yday=20, tm_isdst=0)
|
Returns a ``time.struct_time``, in the current timezone.
|
[
"Returns",
"a",
"time",
".",
"struct_time",
"in",
"the",
"current",
"timezone",
"."
] |
def timetuple(self) -> struct_time:
"""Returns a ``time.struct_time``, in the current timezone.
Usage::
>>> arrow.utcnow().timetuple()
time.struct_time(tm_year=2019, tm_mon=1, tm_mday=20, tm_hour=15, tm_min=17, tm_sec=8, tm_wday=6, tm_yday=20, tm_isdst=0)
"""
return self._datetime.timetuple()
|
[
"def",
"timetuple",
"(",
"self",
")",
"->",
"struct_time",
":",
"return",
"self",
".",
"_datetime",
".",
"timetuple",
"(",
")"
] |
https://github.com/arrow-py/arrow/blob/e43524088f78efacb425524445a886600660d854/arrow/arrow.py#L1596-L1606
|
|
mozilla/telemetry-airflow
|
8162470e6eaad5688715ee53f32336ebc00bf352
|
dags/utils/patched/dataproc_operator.py
|
python
|
DataprocSubmitSparkSqlJobOperator.generate_job
|
(self)
|
return self._generate_job_template()
|
Helper method for easier migration to `DataprocSubmitJobOperator`.
:return: Dict representing Dataproc job
|
Helper method for easier migration to `DataprocSubmitJobOperator`.
:return: Dict representing Dataproc job
|
[
"Helper",
"method",
"for",
"easier",
"migration",
"to",
"DataprocSubmitJobOperator",
".",
":",
"return",
":",
"Dict",
"representing",
"Dataproc",
"job"
] |
def generate_job(self):
"""
Helper method for easier migration to `DataprocSubmitJobOperator`.
:return: Dict representing Dataproc job
"""
self.create_job_template()
if self.query is None:
self.job_template.add_query_uri(self.query_uri)
else:
self.job_template.add_query(self.query)
self.job_template.add_variables(self.variables)
return self._generate_job_template()
|
[
"def",
"generate_job",
"(",
"self",
")",
":",
"self",
".",
"create_job_template",
"(",
")",
"if",
"self",
".",
"query",
"is",
"None",
":",
"self",
".",
"job_template",
".",
"add_query_uri",
"(",
"self",
".",
"query_uri",
")",
"else",
":",
"self",
".",
"job_template",
".",
"add_query",
"(",
"self",
".",
"query",
")",
"self",
".",
"job_template",
".",
"add_variables",
"(",
"self",
".",
"variables",
")",
"return",
"self",
".",
"_generate_job_template",
"(",
")"
] |
https://github.com/mozilla/telemetry-airflow/blob/8162470e6eaad5688715ee53f32336ebc00bf352/dags/utils/patched/dataproc_operator.py#L1355-L1366
|
|
Tautulli/Tautulli
|
2410eb33805aaac4bd1c5dad0f71e4f15afaf742
|
lib/future/types/newbytes.py
|
python
|
newbytes.decode
|
(self, encoding='utf-8', errors='strict')
|
return newstr(super(newbytes, self).decode(encoding, errors))
|
Returns a newstr (i.e. unicode subclass)
Decode B using the codec registered for encoding. Default encoding
is 'utf-8'. errors may be given to set a different error
handling scheme. Default is 'strict' meaning that encoding errors raise
a UnicodeDecodeError. Other possible values are 'ignore' and 'replace'
as well as any other name registered with codecs.register_error that is
able to handle UnicodeDecodeErrors.
|
Returns a newstr (i.e. unicode subclass)
|
[
"Returns",
"a",
"newstr",
"(",
"i",
".",
"e",
".",
"unicode",
"subclass",
")"
] |
def decode(self, encoding='utf-8', errors='strict'):
"""
Returns a newstr (i.e. unicode subclass)
Decode B using the codec registered for encoding. Default encoding
is 'utf-8'. errors may be given to set a different error
handling scheme. Default is 'strict' meaning that encoding errors raise
a UnicodeDecodeError. Other possible values are 'ignore' and 'replace'
as well as any other name registered with codecs.register_error that is
able to handle UnicodeDecodeErrors.
"""
# Py2 str.encode() takes encoding and errors as optional parameter,
# not keyword arguments as in Python 3 str.
from future.types.newstr import newstr
if errors == 'surrogateescape':
from future.utils.surrogateescape import register_surrogateescape
register_surrogateescape()
return newstr(super(newbytes, self).decode(encoding, errors))
|
[
"def",
"decode",
"(",
"self",
",",
"encoding",
"=",
"'utf-8'",
",",
"errors",
"=",
"'strict'",
")",
":",
"# Py2 str.encode() takes encoding and errors as optional parameter,",
"# not keyword arguments as in Python 3 str.",
"from",
"future",
".",
"types",
".",
"newstr",
"import",
"newstr",
"if",
"errors",
"==",
"'surrogateescape'",
":",
"from",
"future",
".",
"utils",
".",
"surrogateescape",
"import",
"register_surrogateescape",
"register_surrogateescape",
"(",
")",
"return",
"newstr",
"(",
"super",
"(",
"newbytes",
",",
"self",
")",
".",
"decode",
"(",
"encoding",
",",
"errors",
")",
")"
] |
https://github.com/Tautulli/Tautulli/blob/2410eb33805aaac4bd1c5dad0f71e4f15afaf742/lib/future/types/newbytes.py#L233-L253
|
|
krintoxi/NoobSec-Toolkit
|
38738541cbc03cedb9a3b3ed13b629f781ad64f6
|
NoobSecToolkit /tools/inject/thirdparty/odict/odict.py
|
python
|
Keys.__add__
|
(self, other)
|
return self._main._sequence + other
|
[] |
def __add__(self, other): return self._main._sequence + other
|
[
"def",
"__add__",
"(",
"self",
",",
"other",
")",
":",
"return",
"self",
".",
"_main",
".",
"_sequence",
"+",
"other"
] |
https://github.com/krintoxi/NoobSec-Toolkit/blob/38738541cbc03cedb9a3b3ed13b629f781ad64f6/NoobSecToolkit /tools/inject/thirdparty/odict/odict.py#L956-L956
|
|||
brettviren/python-keepass
|
e814a5b60922387c5303d9ee28dc2ed62724c082
|
python/keepass/hier.py
|
python
|
Node.node_with_group
|
(self,group)
|
return None
|
Return the child node holding the given group
|
Return the child node holding the given group
|
[
"Return",
"the",
"child",
"node",
"holding",
"the",
"given",
"group"
] |
def node_with_group(self,group):
'Return the child node holding the given group'
if self.group == group:
return self
for child in self.nodes:
ret = child.node_with_group(group)
if ret: return ret
continue
return None
|
[
"def",
"node_with_group",
"(",
"self",
",",
"group",
")",
":",
"if",
"self",
".",
"group",
"==",
"group",
":",
"return",
"self",
"for",
"child",
"in",
"self",
".",
"nodes",
":",
"ret",
"=",
"child",
".",
"node_with_group",
"(",
"group",
")",
"if",
"ret",
":",
"return",
"ret",
"continue",
"return",
"None"
] |
https://github.com/brettviren/python-keepass/blob/e814a5b60922387c5303d9ee28dc2ed62724c082/python/keepass/hier.py#L253-L261
|
|
saltstack/salt
|
fae5bc757ad0f1716483ce7ae180b451545c2058
|
salt/modules/schedule.py
|
python
|
copy
|
(name, target, **kwargs)
|
return ret
|
Copy scheduled job to another minion or minions.
CLI Example:
.. code-block:: bash
salt '*' schedule.copy jobname target
|
Copy scheduled job to another minion or minions.
|
[
"Copy",
"scheduled",
"job",
"to",
"another",
"minion",
"or",
"minions",
"."
] |
def copy(name, target, **kwargs):
"""
Copy scheduled job to another minion or minions.
CLI Example:
.. code-block:: bash
salt '*' schedule.copy jobname target
"""
ret = {"comment": [], "result": True}
if not name:
ret["comment"] = "Job name is required."
ret["result"] = False
if "test" in kwargs and kwargs["test"]:
ret["comment"] = "Job: {} would be copied from schedule.".format(name)
else:
opts_schedule = list_(show_all=True, where="opts", return_yaml=False)
pillar_schedule = list_(show_all=True, where="pillar", return_yaml=False)
if name in opts_schedule:
schedule_data = opts_schedule[name]
elif name in pillar_schedule:
schedule_data = pillar_schedule[name]
else:
ret["comment"] = "Job {} does not exist.".format(name)
ret["result"] = False
return ret
schedule_opts = []
for key, value in schedule_data.items():
temp = "{}={}".format(key, value)
schedule_opts.append(temp)
response = __salt__["publish.publish"](target, "schedule.add", schedule_opts)
# Get errors and list of affeced minions
errors = []
minions = []
for minion in response:
minions.append(minion)
if not response[minion]:
errors.append(minion)
# parse response
if not response:
ret["comment"] = "no servers answered the published schedule.add command"
return ret
elif len(errors) > 0:
ret["comment"] = "the following minions return False"
ret["minions"] = errors
return ret
else:
ret["result"] = True
ret["comment"] = "Copied Job {} from schedule to minion(s).".format(name)
ret["minions"] = minions
return ret
return ret
|
[
"def",
"copy",
"(",
"name",
",",
"target",
",",
"*",
"*",
"kwargs",
")",
":",
"ret",
"=",
"{",
"\"comment\"",
":",
"[",
"]",
",",
"\"result\"",
":",
"True",
"}",
"if",
"not",
"name",
":",
"ret",
"[",
"\"comment\"",
"]",
"=",
"\"Job name is required.\"",
"ret",
"[",
"\"result\"",
"]",
"=",
"False",
"if",
"\"test\"",
"in",
"kwargs",
"and",
"kwargs",
"[",
"\"test\"",
"]",
":",
"ret",
"[",
"\"comment\"",
"]",
"=",
"\"Job: {} would be copied from schedule.\"",
".",
"format",
"(",
"name",
")",
"else",
":",
"opts_schedule",
"=",
"list_",
"(",
"show_all",
"=",
"True",
",",
"where",
"=",
"\"opts\"",
",",
"return_yaml",
"=",
"False",
")",
"pillar_schedule",
"=",
"list_",
"(",
"show_all",
"=",
"True",
",",
"where",
"=",
"\"pillar\"",
",",
"return_yaml",
"=",
"False",
")",
"if",
"name",
"in",
"opts_schedule",
":",
"schedule_data",
"=",
"opts_schedule",
"[",
"name",
"]",
"elif",
"name",
"in",
"pillar_schedule",
":",
"schedule_data",
"=",
"pillar_schedule",
"[",
"name",
"]",
"else",
":",
"ret",
"[",
"\"comment\"",
"]",
"=",
"\"Job {} does not exist.\"",
".",
"format",
"(",
"name",
")",
"ret",
"[",
"\"result\"",
"]",
"=",
"False",
"return",
"ret",
"schedule_opts",
"=",
"[",
"]",
"for",
"key",
",",
"value",
"in",
"schedule_data",
".",
"items",
"(",
")",
":",
"temp",
"=",
"\"{}={}\"",
".",
"format",
"(",
"key",
",",
"value",
")",
"schedule_opts",
".",
"append",
"(",
"temp",
")",
"response",
"=",
"__salt__",
"[",
"\"publish.publish\"",
"]",
"(",
"target",
",",
"\"schedule.add\"",
",",
"schedule_opts",
")",
"# Get errors and list of affeced minions",
"errors",
"=",
"[",
"]",
"minions",
"=",
"[",
"]",
"for",
"minion",
"in",
"response",
":",
"minions",
".",
"append",
"(",
"minion",
")",
"if",
"not",
"response",
"[",
"minion",
"]",
":",
"errors",
".",
"append",
"(",
"minion",
")",
"# parse response",
"if",
"not",
"response",
":",
"ret",
"[",
"\"comment\"",
"]",
"=",
"\"no servers answered the published schedule.add command\"",
"return",
"ret",
"elif",
"len",
"(",
"errors",
")",
">",
"0",
":",
"ret",
"[",
"\"comment\"",
"]",
"=",
"\"the following minions return False\"",
"ret",
"[",
"\"minions\"",
"]",
"=",
"errors",
"return",
"ret",
"else",
":",
"ret",
"[",
"\"result\"",
"]",
"=",
"True",
"ret",
"[",
"\"comment\"",
"]",
"=",
"\"Copied Job {} from schedule to minion(s).\"",
".",
"format",
"(",
"name",
")",
"ret",
"[",
"\"minions\"",
"]",
"=",
"minions",
"return",
"ret",
"return",
"ret"
] |
https://github.com/saltstack/salt/blob/fae5bc757ad0f1716483ce7ae180b451545c2058/salt/modules/schedule.py#L1036-L1095
|
|
google/closure-linter
|
c09c885b4e4fec386ff81cebeb8c66c2b0643d49
|
closure_linter/ecmametadatapass.py
|
python
|
EcmaMetaDataPass.Reset
|
(self)
|
Resets the metadata pass to prepare for the next file.
|
Resets the metadata pass to prepare for the next file.
|
[
"Resets",
"the",
"metadata",
"pass",
"to",
"prepare",
"for",
"the",
"next",
"file",
"."
] |
def Reset(self):
"""Resets the metadata pass to prepare for the next file."""
self._token = None
self._context = None
self._AddContext(EcmaContext.ROOT)
self._last_code = None
|
[
"def",
"Reset",
"(",
"self",
")",
":",
"self",
".",
"_token",
"=",
"None",
"self",
".",
"_context",
"=",
"None",
"self",
".",
"_AddContext",
"(",
"EcmaContext",
".",
"ROOT",
")",
"self",
".",
"_last_code",
"=",
"None"
] |
https://github.com/google/closure-linter/blob/c09c885b4e4fec386ff81cebeb8c66c2b0643d49/closure_linter/ecmametadatapass.py#L239-L244
|
||
respeaker/get_started_with_respeaker
|
ec859759fcec7e683a5e09328a8ea307046f353d
|
files/usr/lib/python2.7/site-packages/tornado/ioloop.py
|
python
|
IOLoop.add_handler
|
(self, fd, handler, events)
|
Registers the given handler to receive the given events for fd.
The ``events`` argument is a bitwise or of the constants
``IOLoop.READ``, ``IOLoop.WRITE``, and ``IOLoop.ERROR``.
When an event occurs, ``handler(fd, events)`` will be run.
|
Registers the given handler to receive the given events for fd.
|
[
"Registers",
"the",
"given",
"handler",
"to",
"receive",
"the",
"given",
"events",
"for",
"fd",
"."
] |
def add_handler(self, fd, handler, events):
"""Registers the given handler to receive the given events for fd.
The ``events`` argument is a bitwise or of the constants
``IOLoop.READ``, ``IOLoop.WRITE``, and ``IOLoop.ERROR``.
When an event occurs, ``handler(fd, events)`` will be run.
"""
raise NotImplementedError()
|
[
"def",
"add_handler",
"(",
"self",
",",
"fd",
",",
"handler",
",",
"events",
")",
":",
"raise",
"NotImplementedError",
"(",
")"
] |
https://github.com/respeaker/get_started_with_respeaker/blob/ec859759fcec7e683a5e09328a8ea307046f353d/files/usr/lib/python2.7/site-packages/tornado/ioloop.py#L238-L246
|
||
edisonlz/fastor
|
342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3
|
base/site-packages/pymongo/mongo_client.py
|
python
|
MongoClient.__try_node
|
(self, node)
|
return node, response['ismaster'], isdbgrid, res_time
|
Try to connect to this node and see if it works for our connection
type. Returns ((host, port), ismaster, isdbgrid, res_time).
:Parameters:
- `node`: The (host, port) pair to try.
|
Try to connect to this node and see if it works for our connection
type. Returns ((host, port), ismaster, isdbgrid, res_time).
|
[
"Try",
"to",
"connect",
"to",
"this",
"node",
"and",
"see",
"if",
"it",
"works",
"for",
"our",
"connection",
"type",
".",
"Returns",
"((",
"host",
"port",
")",
"ismaster",
"isdbgrid",
"res_time",
")",
"."
] |
def __try_node(self, node):
"""Try to connect to this node and see if it works for our connection
type. Returns ((host, port), ismaster, isdbgrid, res_time).
:Parameters:
- `node`: The (host, port) pair to try.
"""
self.disconnect()
self.__host, self.__port = node
# Call 'ismaster' directly so we can get a response time.
sock_info = self.__socket()
response, res_time = self.__simple_command(sock_info,
'admin',
{'ismaster': 1})
self.__pool.maybe_return_socket(sock_info)
# Are we talking to a mongos?
isdbgrid = response.get('msg', '') == 'isdbgrid'
if "maxBsonObjectSize" in response:
self.__max_bson_size = response["maxBsonObjectSize"]
# Replica Set?
if not self.__direct:
# Check that this host is part of the given replica set.
if self.__repl:
set_name = response.get('setName')
# The 'setName' field isn't returned by mongod before 1.6.2
# so we can't assume that if it's missing this host isn't in
# the specified set.
if set_name and set_name != self.__repl:
raise ConfigurationError("%s:%d is not a member of "
"replica set %s"
% (node[0], node[1], self.__repl))
if "hosts" in response:
self.__nodes = set([_partition_node(h)
for h in response["hosts"]])
else:
# The user passed a seed list of standalone or
# mongos instances.
self.__nodes.add(node)
if response["ismaster"]:
return node, True, isdbgrid, res_time
elif "primary" in response:
candidate = _partition_node(response["primary"])
return self.__try_node(candidate)
# Explain why we aren't using this connection.
raise AutoReconnect('%s:%d is not primary or master' % node)
# Direct connection
if response.get("arbiterOnly", False) and not self.__direct:
raise ConfigurationError("%s:%d is an arbiter" % node)
return node, response['ismaster'], isdbgrid, res_time
|
[
"def",
"__try_node",
"(",
"self",
",",
"node",
")",
":",
"self",
".",
"disconnect",
"(",
")",
"self",
".",
"__host",
",",
"self",
".",
"__port",
"=",
"node",
"# Call 'ismaster' directly so we can get a response time.",
"sock_info",
"=",
"self",
".",
"__socket",
"(",
")",
"response",
",",
"res_time",
"=",
"self",
".",
"__simple_command",
"(",
"sock_info",
",",
"'admin'",
",",
"{",
"'ismaster'",
":",
"1",
"}",
")",
"self",
".",
"__pool",
".",
"maybe_return_socket",
"(",
"sock_info",
")",
"# Are we talking to a mongos?",
"isdbgrid",
"=",
"response",
".",
"get",
"(",
"'msg'",
",",
"''",
")",
"==",
"'isdbgrid'",
"if",
"\"maxBsonObjectSize\"",
"in",
"response",
":",
"self",
".",
"__max_bson_size",
"=",
"response",
"[",
"\"maxBsonObjectSize\"",
"]",
"# Replica Set?",
"if",
"not",
"self",
".",
"__direct",
":",
"# Check that this host is part of the given replica set.",
"if",
"self",
".",
"__repl",
":",
"set_name",
"=",
"response",
".",
"get",
"(",
"'setName'",
")",
"# The 'setName' field isn't returned by mongod before 1.6.2",
"# so we can't assume that if it's missing this host isn't in",
"# the specified set.",
"if",
"set_name",
"and",
"set_name",
"!=",
"self",
".",
"__repl",
":",
"raise",
"ConfigurationError",
"(",
"\"%s:%d is not a member of \"",
"\"replica set %s\"",
"%",
"(",
"node",
"[",
"0",
"]",
",",
"node",
"[",
"1",
"]",
",",
"self",
".",
"__repl",
")",
")",
"if",
"\"hosts\"",
"in",
"response",
":",
"self",
".",
"__nodes",
"=",
"set",
"(",
"[",
"_partition_node",
"(",
"h",
")",
"for",
"h",
"in",
"response",
"[",
"\"hosts\"",
"]",
"]",
")",
"else",
":",
"# The user passed a seed list of standalone or",
"# mongos instances.",
"self",
".",
"__nodes",
".",
"add",
"(",
"node",
")",
"if",
"response",
"[",
"\"ismaster\"",
"]",
":",
"return",
"node",
",",
"True",
",",
"isdbgrid",
",",
"res_time",
"elif",
"\"primary\"",
"in",
"response",
":",
"candidate",
"=",
"_partition_node",
"(",
"response",
"[",
"\"primary\"",
"]",
")",
"return",
"self",
".",
"__try_node",
"(",
"candidate",
")",
"# Explain why we aren't using this connection.",
"raise",
"AutoReconnect",
"(",
"'%s:%d is not primary or master'",
"%",
"node",
")",
"# Direct connection",
"if",
"response",
".",
"get",
"(",
"\"arbiterOnly\"",
",",
"False",
")",
"and",
"not",
"self",
".",
"__direct",
":",
"raise",
"ConfigurationError",
"(",
"\"%s:%d is an arbiter\"",
"%",
"node",
")",
"return",
"node",
",",
"response",
"[",
"'ismaster'",
"]",
",",
"isdbgrid",
",",
"res_time"
] |
https://github.com/edisonlz/fastor/blob/342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3/base/site-packages/pymongo/mongo_client.py#L582-L636
|
|
IronLanguages/ironpython3
|
7a7bb2a872eeab0d1009fc8a6e24dca43f65b693
|
Src/StdLib/Lib/ipaddress.py
|
python
|
IPv4Address.is_loopback
|
(self)
|
return self in loopback_network
|
Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback per RFC 3330.
|
Test if the address is a loopback address.
|
[
"Test",
"if",
"the",
"address",
"is",
"a",
"loopback",
"address",
"."
] |
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback per RFC 3330.
"""
loopback_network = IPv4Network('127.0.0.0/8')
return self in loopback_network
|
[
"def",
"is_loopback",
"(",
"self",
")",
":",
"loopback_network",
"=",
"IPv4Network",
"(",
"'127.0.0.0/8'",
")",
"return",
"self",
"in",
"loopback_network"
] |
https://github.com/IronLanguages/ironpython3/blob/7a7bb2a872eeab0d1009fc8a6e24dca43f65b693/Src/StdLib/Lib/ipaddress.py#L1308-L1316
|
|
Textualize/rich
|
d39626143036188cb2c9e1619e836540f5b627f8
|
rich/progress.py
|
python
|
Progress.get_renderable
|
(self)
|
return renderable
|
Get a renderable for the progress display.
|
Get a renderable for the progress display.
|
[
"Get",
"a",
"renderable",
"for",
"the",
"progress",
"display",
"."
] |
def get_renderable(self) -> RenderableType:
"""Get a renderable for the progress display."""
renderable = Group(*self.get_renderables())
return renderable
|
[
"def",
"get_renderable",
"(",
"self",
")",
"->",
"RenderableType",
":",
"renderable",
"=",
"Group",
"(",
"*",
"self",
".",
"get_renderables",
"(",
")",
")",
"return",
"renderable"
] |
https://github.com/Textualize/rich/blob/d39626143036188cb2c9e1619e836540f5b627f8/rich/progress.py#L868-L871
|
|
mdiazcl/fuzzbunch-debian
|
2b76c2249ade83a389ae3badb12a1bd09901fd2c
|
windows/Resources/Python/Core/Lib/ntpath.py
|
python
|
splitdrive
|
(p)
|
return ('', p)
|
Split a pathname into drive and path specifiers. Returns a 2-tuple
"(drive,path)"; either part may be empty
|
Split a pathname into drive and path specifiers. Returns a 2-tuple
"(drive,path)"; either part may be empty
|
[
"Split",
"a",
"pathname",
"into",
"drive",
"and",
"path",
"specifiers",
".",
"Returns",
"a",
"2",
"-",
"tuple",
"(",
"drive",
"path",
")",
";",
"either",
"part",
"may",
"be",
"empty"
] |
def splitdrive(p):
"""Split a pathname into drive and path specifiers. Returns a 2-tuple
"(drive,path)"; either part may be empty"""
if p[1:2] == ':':
return (p[0:2], p[2:])
return ('', p)
|
[
"def",
"splitdrive",
"(",
"p",
")",
":",
"if",
"p",
"[",
"1",
":",
"2",
"]",
"==",
"':'",
":",
"return",
"(",
"p",
"[",
"0",
":",
"2",
"]",
",",
"p",
"[",
"2",
":",
"]",
")",
"return",
"(",
"''",
",",
"p",
")"
] |
https://github.com/mdiazcl/fuzzbunch-debian/blob/2b76c2249ade83a389ae3badb12a1bd09901fd2c/windows/Resources/Python/Core/Lib/ntpath.py#L84-L89
|
|
analysiscenter/batchflow
|
294747da0bca309785f925be891441fdd824e9fa
|
batchflow/models/torch/base.py
|
python
|
TorchModel._train_sam_update_gradients
|
(self, inputs, targets, sync_frequency, sam_rho, sam_individual_norm)
|
Update gradients to move to the local maxima.
|
Update gradients to move to the local maxima.
|
[
"Update",
"gradients",
"to",
"move",
"to",
"the",
"local",
"maxima",
"."
] |
def _train_sam_update_gradients(self, inputs, targets, sync_frequency, sam_rho, sam_individual_norm):
""" Update gradients to move to the local maxima. """
# Fetch gradients
grads = []
params_with_grads = []
for p in self.model.parameters():
if p.grad is not None:
grads.append(p.grad.clone().detach())
params_with_grads.append(p)
p.grad = None
# Move to the local maxima
if sam_individual_norm:
epsilons = [grad * sam_rho / (grad.detach().norm(2).to(self.device)) for grad in grads]
else:
grad_norm = torch.stack([g.detach().norm(2).to(self.device) for g in grads]).norm(2)
epsilons = [eps * sam_rho / grad_norm for eps in grads]
if self.amp:
scale = self.scaler.get_scale()
epsilons = [eps / scale for eps in epsilons]
params_with_grads = [p + eps for p, eps in zip(params_with_grads, epsilons)]
# Compute new gradients: direction to move to minimize the local maxima
with torch.cuda.amp.autocast(enabled=self.amp):
predictions_inner = self.model(inputs)
loss_inner = self.loss(predictions_inner, targets) / sync_frequency
(self.scaler.scale(loss_inner) if self.amp else loss_inner).backward()
# Cancel the previous update to model parameters, add stored gradients from previous microbatches
params_with_grads = [p - eps for p, eps in zip(params_with_grads, epsilons)]
for p in self.model.parameters():
previous_grad = self.optimizer.state[p].get('previous_grad')
if previous_grad is not None:
p.grad.add_(previous_grad)
|
[
"def",
"_train_sam_update_gradients",
"(",
"self",
",",
"inputs",
",",
"targets",
",",
"sync_frequency",
",",
"sam_rho",
",",
"sam_individual_norm",
")",
":",
"# Fetch gradients",
"grads",
"=",
"[",
"]",
"params_with_grads",
"=",
"[",
"]",
"for",
"p",
"in",
"self",
".",
"model",
".",
"parameters",
"(",
")",
":",
"if",
"p",
".",
"grad",
"is",
"not",
"None",
":",
"grads",
".",
"append",
"(",
"p",
".",
"grad",
".",
"clone",
"(",
")",
".",
"detach",
"(",
")",
")",
"params_with_grads",
".",
"append",
"(",
"p",
")",
"p",
".",
"grad",
"=",
"None",
"# Move to the local maxima",
"if",
"sam_individual_norm",
":",
"epsilons",
"=",
"[",
"grad",
"*",
"sam_rho",
"/",
"(",
"grad",
".",
"detach",
"(",
")",
".",
"norm",
"(",
"2",
")",
".",
"to",
"(",
"self",
".",
"device",
")",
")",
"for",
"grad",
"in",
"grads",
"]",
"else",
":",
"grad_norm",
"=",
"torch",
".",
"stack",
"(",
"[",
"g",
".",
"detach",
"(",
")",
".",
"norm",
"(",
"2",
")",
".",
"to",
"(",
"self",
".",
"device",
")",
"for",
"g",
"in",
"grads",
"]",
")",
".",
"norm",
"(",
"2",
")",
"epsilons",
"=",
"[",
"eps",
"*",
"sam_rho",
"/",
"grad_norm",
"for",
"eps",
"in",
"grads",
"]",
"if",
"self",
".",
"amp",
":",
"scale",
"=",
"self",
".",
"scaler",
".",
"get_scale",
"(",
")",
"epsilons",
"=",
"[",
"eps",
"/",
"scale",
"for",
"eps",
"in",
"epsilons",
"]",
"params_with_grads",
"=",
"[",
"p",
"+",
"eps",
"for",
"p",
",",
"eps",
"in",
"zip",
"(",
"params_with_grads",
",",
"epsilons",
")",
"]",
"# Compute new gradients: direction to move to minimize the local maxima",
"with",
"torch",
".",
"cuda",
".",
"amp",
".",
"autocast",
"(",
"enabled",
"=",
"self",
".",
"amp",
")",
":",
"predictions_inner",
"=",
"self",
".",
"model",
"(",
"inputs",
")",
"loss_inner",
"=",
"self",
".",
"loss",
"(",
"predictions_inner",
",",
"targets",
")",
"/",
"sync_frequency",
"(",
"self",
".",
"scaler",
".",
"scale",
"(",
"loss_inner",
")",
"if",
"self",
".",
"amp",
"else",
"loss_inner",
")",
".",
"backward",
"(",
")",
"# Cancel the previous update to model parameters, add stored gradients from previous microbatches",
"params_with_grads",
"=",
"[",
"p",
"-",
"eps",
"for",
"p",
",",
"eps",
"in",
"zip",
"(",
"params_with_grads",
",",
"epsilons",
")",
"]",
"for",
"p",
"in",
"self",
".",
"model",
".",
"parameters",
"(",
")",
":",
"previous_grad",
"=",
"self",
".",
"optimizer",
".",
"state",
"[",
"p",
"]",
".",
"get",
"(",
"'previous_grad'",
")",
"if",
"previous_grad",
"is",
"not",
"None",
":",
"p",
".",
"grad",
".",
"add_",
"(",
"previous_grad",
")"
] |
https://github.com/analysiscenter/batchflow/blob/294747da0bca309785f925be891441fdd824e9fa/batchflow/models/torch/base.py#L1192-L1227
|
||
twilio/twilio-python
|
6e1e811ea57a1edfadd5161ace87397c563f6915
|
twilio/rest/chat/v2/service/role.py
|
python
|
RoleInstance.date_created
|
(self)
|
return self._properties['date_created']
|
:returns: The ISO 8601 date and time in GMT when the resource was created
:rtype: datetime
|
:returns: The ISO 8601 date and time in GMT when the resource was created
:rtype: datetime
|
[
":",
"returns",
":",
"The",
"ISO",
"8601",
"date",
"and",
"time",
"in",
"GMT",
"when",
"the",
"resource",
"was",
"created",
":",
"rtype",
":",
"datetime"
] |
def date_created(self):
"""
:returns: The ISO 8601 date and time in GMT when the resource was created
:rtype: datetime
"""
return self._properties['date_created']
|
[
"def",
"date_created",
"(",
"self",
")",
":",
"return",
"self",
".",
"_properties",
"[",
"'date_created'",
"]"
] |
https://github.com/twilio/twilio-python/blob/6e1e811ea57a1edfadd5161ace87397c563f6915/twilio/rest/chat/v2/service/role.py#L378-L383
|
|
raphaelvallat/pingouin
|
dcfdc82bbc7f1ba5991b80717a5ca156617443e8
|
pingouin/circular.py
|
python
|
circ_mean
|
(angles, w=None, axis=0)
|
return np.angle(np.nansum(np.multiply(w, np.exp(1j * angles)), axis=axis))
|
Mean direction for (binned) circular data.
Parameters
----------
angles : array_like
Samples of angles in radians. The range of ``angles`` must be either
:math:`[0, 2\\pi]` or :math:`[-\\pi, \\pi]`. If ``angles`` is not
expressed in radians (e.g. degrees or 24-hours), please use the
:py:func:`pingouin.convert_angles` function prior to using the present
function.
w : array_like
Number of incidences per bins (i.e. "weights"), in case of binned angle
data.
axis : int or None
Compute along this dimension. Default is the first axis (0).
Returns
-------
mu : float
Circular mean, in radians.
See also
--------
scipy.stats.circmean, scipy.stats.circstd, pingouin.circ_r
Notes
-----
From Wikipedia:
*In mathematics, a mean of circular quantities is a mean which is sometimes
better-suited for quantities like angles, daytimes, and fractional parts
of real numbers. This is necessary since most of the usual means may not be
appropriate on circular quantities. For example, the arithmetic mean of 0°
and 360° is 180°, which is misleading because for most purposes 360° is
the same thing as 0°.
As another example, the "average time" between 11 PM and 1 AM is either
midnight or noon, depending on whether the two times are part of a single
night or part of a single calendar day.*
The circular mean of a set of angles :math:`\\alpha` is defined by:
.. math::
\\bar{\\alpha} = \\text{angle} \\left ( \\sum_{j=1}^n \\exp(i \\cdot
\\alpha_j) \\right )
For binned angles with weights :math:`w`, this becomes:
.. math::
\\bar{\\alpha} = \\text{angle} \\left ( \\sum_{j=1}^n w \\cdot
\\exp(i \\cdot \\alpha_j) \\right )
Missing values in ``angles`` are omitted from the calculations.
References
----------
* https://en.wikipedia.org/wiki/Mean_of_circular_quantities
* Berens, P. (2009). CircStat: A MATLAB Toolbox for Circular
Statistics. Journal of Statistical Software, Articles, 31(10),
1–21. https://doi.org/10.18637/jss.v031.i10
Examples
--------
1. Circular mean of a 1-D array of angles, in radians
>>> import pingouin as pg
>>> angles = [0.785, 1.570, 3.141, 0.839, 5.934]
>>> round(pg.circ_mean(angles), 4)
1.013
Compare with SciPy:
>>> from scipy.stats import circmean
>>> import numpy as np
>>> round(circmean(angles, low=0, high=2*np.pi), 4)
1.013
2. Using a 2-D array of angles in degrees
>>> np.random.seed(123)
>>> deg = np.random.randint(low=0, high=360, size=(3, 5))
>>> deg
array([[322, 98, 230, 17, 83],
[106, 123, 57, 214, 225],
[ 96, 113, 126, 47, 73]])
We first need to convert from degrees to radians:
>>> rad = np.round(pg.convert_angles(deg, low=0, high=360), 4)
>>> rad
array([[-0.6632, 1.7104, -2.2689, 0.2967, 1.4486],
[ 1.85 , 2.1468, 0.9948, -2.5482, -2.3562],
[ 1.6755, 1.9722, 2.1991, 0.8203, 1.2741]])
>>> pg.circ_mean(rad) # On the first axis (default)
array([1.27532162, 1.94336576, 2.23195927, 0.52110503, 1.80240563])
>>> pg.circ_mean(rad, axis=-1) # On the last axis (default)
array([0.68920819, 2.49334852, 1.5954149 ])
>>> round(pg.circ_mean(rad, axis=None), 4) # Across the entire array
1.6954
Missing values are omitted from the calculations:
>>> rad[0, 0] = np.nan
>>> pg.circ_mean(rad)
array([1.76275 , 1.94336576, 2.23195927, 0.52110503, 1.80240563])
3. Using binned angles
>>> np.random.seed(123)
>>> nbins = 18 # Number of bins to divide the unit circle
>>> angles_bins = np.linspace(0, 2 * np.pi, nbins)
>>> # w represents the number of incidences per bins, or "weights".
>>> w = np.random.randint(low=0, high=5, size=angles_bins.size)
>>> round(pg.circ_mean(angles_bins, w), 4)
0.606
|
Mean direction for (binned) circular data.
|
[
"Mean",
"direction",
"for",
"(",
"binned",
")",
"circular",
"data",
"."
] |
def circ_mean(angles, w=None, axis=0):
"""Mean direction for (binned) circular data.
Parameters
----------
angles : array_like
Samples of angles in radians. The range of ``angles`` must be either
:math:`[0, 2\\pi]` or :math:`[-\\pi, \\pi]`. If ``angles`` is not
expressed in radians (e.g. degrees or 24-hours), please use the
:py:func:`pingouin.convert_angles` function prior to using the present
function.
w : array_like
Number of incidences per bins (i.e. "weights"), in case of binned angle
data.
axis : int or None
Compute along this dimension. Default is the first axis (0).
Returns
-------
mu : float
Circular mean, in radians.
See also
--------
scipy.stats.circmean, scipy.stats.circstd, pingouin.circ_r
Notes
-----
From Wikipedia:
*In mathematics, a mean of circular quantities is a mean which is sometimes
better-suited for quantities like angles, daytimes, and fractional parts
of real numbers. This is necessary since most of the usual means may not be
appropriate on circular quantities. For example, the arithmetic mean of 0°
and 360° is 180°, which is misleading because for most purposes 360° is
the same thing as 0°.
As another example, the "average time" between 11 PM and 1 AM is either
midnight or noon, depending on whether the two times are part of a single
night or part of a single calendar day.*
The circular mean of a set of angles :math:`\\alpha` is defined by:
.. math::
\\bar{\\alpha} = \\text{angle} \\left ( \\sum_{j=1}^n \\exp(i \\cdot
\\alpha_j) \\right )
For binned angles with weights :math:`w`, this becomes:
.. math::
\\bar{\\alpha} = \\text{angle} \\left ( \\sum_{j=1}^n w \\cdot
\\exp(i \\cdot \\alpha_j) \\right )
Missing values in ``angles`` are omitted from the calculations.
References
----------
* https://en.wikipedia.org/wiki/Mean_of_circular_quantities
* Berens, P. (2009). CircStat: A MATLAB Toolbox for Circular
Statistics. Journal of Statistical Software, Articles, 31(10),
1–21. https://doi.org/10.18637/jss.v031.i10
Examples
--------
1. Circular mean of a 1-D array of angles, in radians
>>> import pingouin as pg
>>> angles = [0.785, 1.570, 3.141, 0.839, 5.934]
>>> round(pg.circ_mean(angles), 4)
1.013
Compare with SciPy:
>>> from scipy.stats import circmean
>>> import numpy as np
>>> round(circmean(angles, low=0, high=2*np.pi), 4)
1.013
2. Using a 2-D array of angles in degrees
>>> np.random.seed(123)
>>> deg = np.random.randint(low=0, high=360, size=(3, 5))
>>> deg
array([[322, 98, 230, 17, 83],
[106, 123, 57, 214, 225],
[ 96, 113, 126, 47, 73]])
We first need to convert from degrees to radians:
>>> rad = np.round(pg.convert_angles(deg, low=0, high=360), 4)
>>> rad
array([[-0.6632, 1.7104, -2.2689, 0.2967, 1.4486],
[ 1.85 , 2.1468, 0.9948, -2.5482, -2.3562],
[ 1.6755, 1.9722, 2.1991, 0.8203, 1.2741]])
>>> pg.circ_mean(rad) # On the first axis (default)
array([1.27532162, 1.94336576, 2.23195927, 0.52110503, 1.80240563])
>>> pg.circ_mean(rad, axis=-1) # On the last axis (default)
array([0.68920819, 2.49334852, 1.5954149 ])
>>> round(pg.circ_mean(rad, axis=None), 4) # Across the entire array
1.6954
Missing values are omitted from the calculations:
>>> rad[0, 0] = np.nan
>>> pg.circ_mean(rad)
array([1.76275 , 1.94336576, 2.23195927, 0.52110503, 1.80240563])
3. Using binned angles
>>> np.random.seed(123)
>>> nbins = 18 # Number of bins to divide the unit circle
>>> angles_bins = np.linspace(0, 2 * np.pi, nbins)
>>> # w represents the number of incidences per bins, or "weights".
>>> w = np.random.randint(low=0, high=5, size=angles_bins.size)
>>> round(pg.circ_mean(angles_bins, w), 4)
0.606
"""
angles = np.asarray(angles)
_checkangles(angles) # Check that angles is in radians
w = np.asarray(w) if w is not None else np.ones(angles.shape)
assert angles.shape == w.shape, "Input dimensions do not match"
return np.angle(np.nansum(np.multiply(w, np.exp(1j * angles)), axis=axis))
|
[
"def",
"circ_mean",
"(",
"angles",
",",
"w",
"=",
"None",
",",
"axis",
"=",
"0",
")",
":",
"angles",
"=",
"np",
".",
"asarray",
"(",
"angles",
")",
"_checkangles",
"(",
"angles",
")",
"# Check that angles is in radians",
"w",
"=",
"np",
".",
"asarray",
"(",
"w",
")",
"if",
"w",
"is",
"not",
"None",
"else",
"np",
".",
"ones",
"(",
"angles",
".",
"shape",
")",
"assert",
"angles",
".",
"shape",
"==",
"w",
".",
"shape",
",",
"\"Input dimensions do not match\"",
"return",
"np",
".",
"angle",
"(",
"np",
".",
"nansum",
"(",
"np",
".",
"multiply",
"(",
"w",
",",
"np",
".",
"exp",
"(",
"1j",
"*",
"angles",
")",
")",
",",
"axis",
"=",
"axis",
")",
")"
] |
https://github.com/raphaelvallat/pingouin/blob/dcfdc82bbc7f1ba5991b80717a5ca156617443e8/pingouin/circular.py#L173-L297
|
|
jgagneastro/coffeegrindsize
|
22661ebd21831dba4cf32bfc6ba59fe3d49f879c
|
App/venv/lib/python3.7/site-packages/pip/_internal/vcs/subversion.py
|
python
|
Subversion.export
|
(self, location)
|
Export the svn repository at the url to the destination location
|
Export the svn repository at the url to the destination location
|
[
"Export",
"the",
"svn",
"repository",
"at",
"the",
"url",
"to",
"the",
"destination",
"location"
] |
def export(self, location):
"""Export the svn repository at the url to the destination location"""
url, rev_options = self.get_url_rev_options(self.url)
logger.info('Exporting svn repository %s to %s', url, location)
with indent_log():
if os.path.exists(location):
# Subversion doesn't like to check out over an existing
# directory --force fixes this, but was only added in svn 1.5
rmtree(location)
cmd_args = ['export'] + rev_options.to_args() + [url, location]
self.run_command(cmd_args, show_stdout=False)
|
[
"def",
"export",
"(",
"self",
",",
"location",
")",
":",
"url",
",",
"rev_options",
"=",
"self",
".",
"get_url_rev_options",
"(",
"self",
".",
"url",
")",
"logger",
".",
"info",
"(",
"'Exporting svn repository %s to %s'",
",",
"url",
",",
"location",
")",
"with",
"indent_log",
"(",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"location",
")",
":",
"# Subversion doesn't like to check out over an existing",
"# directory --force fixes this, but was only added in svn 1.5",
"rmtree",
"(",
"location",
")",
"cmd_args",
"=",
"[",
"'export'",
"]",
"+",
"rev_options",
".",
"to_args",
"(",
")",
"+",
"[",
"url",
",",
"location",
"]",
"self",
".",
"run_command",
"(",
"cmd_args",
",",
"show_stdout",
"=",
"False",
")"
] |
https://github.com/jgagneastro/coffeegrindsize/blob/22661ebd21831dba4cf32bfc6ba59fe3d49f879c/App/venv/lib/python3.7/site-packages/pip/_internal/vcs/subversion.py#L70-L81
|
||
TarrySingh/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials
|
5bb97d7e3ffd913abddb4cfa7d78a1b4c868890e
|
deep-learning/fastai-docs/fastai_docs-master/dev_nb/nb_004.py
|
python
|
OptimWrapper.step
|
(self)
|
Performs a single optimization step
|
Performs a single optimization step
|
[
"Performs",
"a",
"single",
"optimization",
"step"
] |
def step(self)->None:
"Performs a single optimization step "
# weight decay outside of optimizer step (AdamW)
if self.true_wd:
for pg in self.opt.param_groups:
for p in pg['params']: p.data.mul_(1 - self._wd*pg['lr'])
self.set_val('weight_decay', 0)
self.opt.step()
|
[
"def",
"step",
"(",
"self",
")",
"->",
"None",
":",
"# weight decay outside of optimizer step (AdamW)",
"if",
"self",
".",
"true_wd",
":",
"for",
"pg",
"in",
"self",
".",
"opt",
".",
"param_groups",
":",
"for",
"p",
"in",
"pg",
"[",
"'params'",
"]",
":",
"p",
".",
"data",
".",
"mul_",
"(",
"1",
"-",
"self",
".",
"_wd",
"*",
"pg",
"[",
"'lr'",
"]",
")",
"self",
".",
"set_val",
"(",
"'weight_decay'",
",",
"0",
")",
"self",
".",
"opt",
".",
"step",
"(",
")"
] |
https://github.com/TarrySingh/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials/blob/5bb97d7e3ffd913abddb4cfa7d78a1b4c868890e/deep-learning/fastai-docs/fastai_docs-master/dev_nb/nb_004.py#L28-L35
|
||
TeamMsgExtractor/msg-extractor
|
8a3a0255a7306bdb8073bd8f222d3be5c688080a
|
extract_msg/appointment.py
|
python
|
Appointment.startDate
|
(self)
|
return self._ensureSetProperty('_startDate', '00600040')
|
The start date of the appointment.
|
The start date of the appointment.
|
[
"The",
"start",
"date",
"of",
"the",
"appointment",
"."
] |
def startDate(self):
"""
The start date of the appointment.
"""
return self._ensureSetProperty('_startDate', '00600040')
|
[
"def",
"startDate",
"(",
"self",
")",
":",
"return",
"self",
".",
"_ensureSetProperty",
"(",
"'_startDate'",
",",
"'00600040'",
")"
] |
https://github.com/TeamMsgExtractor/msg-extractor/blob/8a3a0255a7306bdb8073bd8f222d3be5c688080a/extract_msg/appointment.py#L62-L66
|
|
pantsbuild/pex
|
473c6ac732ed4bc338b4b20a9ec930d1d722c9b4
|
pex/vendor/_vendored/setuptools/pkg_resources/extern/__init__.py
|
python
|
VendorImporter.__init__
|
(self, root_name, vendored_names=(), vendor_pkg=None)
|
[] |
def __init__(self, root_name, vendored_names=(), vendor_pkg=None):
self.root_name = root_name
self.vendored_names = set(vendored_names)
self.vendor_pkg = vendor_pkg or root_name.replace('extern', '_vendor')
|
[
"def",
"__init__",
"(",
"self",
",",
"root_name",
",",
"vendored_names",
"=",
"(",
")",
",",
"vendor_pkg",
"=",
"None",
")",
":",
"self",
".",
"root_name",
"=",
"root_name",
"self",
".",
"vendored_names",
"=",
"set",
"(",
"vendored_names",
")",
"self",
".",
"vendor_pkg",
"=",
"vendor_pkg",
"or",
"root_name",
".",
"replace",
"(",
"'extern'",
",",
"'_vendor'",
")"
] |
https://github.com/pantsbuild/pex/blob/473c6ac732ed4bc338b4b20a9ec930d1d722c9b4/pex/vendor/_vendored/setuptools/pkg_resources/extern/__init__.py#L10-L13
|
||||
BindsNET/bindsnet
|
f2eabd77793831c1391fccf5b22e2e4e4564ae7c
|
bindsnet/network/monitors.py
|
python
|
NetworkMonitor.get
|
(self)
|
return self.recording
|
Return entire recording to user.
:return: Dictionary of dictionary of all layers' and connections' recorded
state variables.
|
Return entire recording to user.
|
[
"Return",
"entire",
"recording",
"to",
"user",
"."
] |
def get(self) -> Dict[str, Dict[str, Union[Nodes, AbstractConnection]]]:
# language=rst
"""
Return entire recording to user.
:return: Dictionary of dictionary of all layers' and connections' recorded
state variables.
"""
return self.recording
|
[
"def",
"get",
"(",
"self",
")",
"->",
"Dict",
"[",
"str",
",",
"Dict",
"[",
"str",
",",
"Union",
"[",
"Nodes",
",",
"AbstractConnection",
"]",
"]",
"]",
":",
"# language=rst",
"return",
"self",
".",
"recording"
] |
https://github.com/BindsNET/bindsnet/blob/f2eabd77793831c1391fccf5b22e2e4e4564ae7c/bindsnet/network/monitors.py#L175-L183
|
|
sagemath/sage
|
f9b2db94f675ff16963ccdefba4f1a3393b3fe0d
|
src/sage/combinat/set_partition_ordered.py
|
python
|
OrderedSetPartitions_scomp.__iter__
|
(self)
|
TESTS::
sage: [ p for p in OrderedSetPartitions([1,2,3,4], [2,1,1]) ]
[[{1, 2}, {3}, {4}],
[{1, 2}, {4}, {3}],
[{1, 3}, {2}, {4}],
[{1, 4}, {2}, {3}],
[{1, 3}, {4}, {2}],
[{1, 4}, {3}, {2}],
[{2, 3}, {1}, {4}],
[{2, 4}, {1}, {3}],
[{3, 4}, {1}, {2}],
[{2, 3}, {4}, {1}],
[{2, 4}, {3}, {1}],
[{3, 4}, {2}, {1}]]
sage: len(OrderedSetPartitions([1,2,3,4], [1,1,1,1]))
24
sage: [ x for x in OrderedSetPartitions([1,4,7], [3]) ]
[[{1, 4, 7}]]
sage: [ x for x in OrderedSetPartitions([1,4,7], [1,2]) ]
[[{1}, {4, 7}], [{4}, {1, 7}], [{7}, {1, 4}]]
sage: [ p for p in OrderedSetPartitions([], []) ]
[[]]
sage: [ p for p in OrderedSetPartitions([1], [1]) ]
[[{1}]]
Let us check that it works for large size (:trac:`16646`)::
sage: OrderedSetPartitions(42).first()
[{1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}, {10}, {11}, {12},
{13}, {14}, {15}, {16}, {17}, {18}, {19}, {20}, {21}, {22}, {23},
{24}, {25}, {26}, {27}, {28}, {29}, {30}, {31}, {32}, {33}, {34},
{35}, {36}, {37}, {38}, {39}, {40}, {41}, {42}]
|
TESTS::
|
[
"TESTS",
"::"
] |
def __iter__(self):
"""
TESTS::
sage: [ p for p in OrderedSetPartitions([1,2,3,4], [2,1,1]) ]
[[{1, 2}, {3}, {4}],
[{1, 2}, {4}, {3}],
[{1, 3}, {2}, {4}],
[{1, 4}, {2}, {3}],
[{1, 3}, {4}, {2}],
[{1, 4}, {3}, {2}],
[{2, 3}, {1}, {4}],
[{2, 4}, {1}, {3}],
[{3, 4}, {1}, {2}],
[{2, 3}, {4}, {1}],
[{2, 4}, {3}, {1}],
[{3, 4}, {2}, {1}]]
sage: len(OrderedSetPartitions([1,2,3,4], [1,1,1,1]))
24
sage: [ x for x in OrderedSetPartitions([1,4,7], [3]) ]
[[{1, 4, 7}]]
sage: [ x for x in OrderedSetPartitions([1,4,7], [1,2]) ]
[[{1}, {4, 7}], [{4}, {1, 7}], [{7}, {1, 4}]]
sage: [ p for p in OrderedSetPartitions([], []) ]
[[]]
sage: [ p for p in OrderedSetPartitions([1], [1]) ]
[[{1}]]
Let us check that it works for large size (:trac:`16646`)::
sage: OrderedSetPartitions(42).first()
[{1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}, {10}, {11}, {12},
{13}, {14}, {15}, {16}, {17}, {18}, {19}, {20}, {21}, {22}, {23},
{24}, {25}, {26}, {27}, {28}, {29}, {30}, {31}, {32}, {33}, {34},
{35}, {36}, {37}, {38}, {39}, {40}, {41}, {42}]
"""
comp = self.c
lset = [x for x in self._set]
l = len(self.c)
dcomp = [-1] + comp.descents(final_descent=True)
p = []
for j in range(l):
p += [j + 1] * comp[j]
for x in permutation.Permutations_mset(p):
res = permutation.to_standard(x).inverse()
res = [lset[x - 1] for x in res]
yield self.element_class(self, [Set(res[dcomp[i]+1:dcomp[i+1]+1])
for i in range(l)])
|
[
"def",
"__iter__",
"(",
"self",
")",
":",
"comp",
"=",
"self",
".",
"c",
"lset",
"=",
"[",
"x",
"for",
"x",
"in",
"self",
".",
"_set",
"]",
"l",
"=",
"len",
"(",
"self",
".",
"c",
")",
"dcomp",
"=",
"[",
"-",
"1",
"]",
"+",
"comp",
".",
"descents",
"(",
"final_descent",
"=",
"True",
")",
"p",
"=",
"[",
"]",
"for",
"j",
"in",
"range",
"(",
"l",
")",
":",
"p",
"+=",
"[",
"j",
"+",
"1",
"]",
"*",
"comp",
"[",
"j",
"]",
"for",
"x",
"in",
"permutation",
".",
"Permutations_mset",
"(",
"p",
")",
":",
"res",
"=",
"permutation",
".",
"to_standard",
"(",
"x",
")",
".",
"inverse",
"(",
")",
"res",
"=",
"[",
"lset",
"[",
"x",
"-",
"1",
"]",
"for",
"x",
"in",
"res",
"]",
"yield",
"self",
".",
"element_class",
"(",
"self",
",",
"[",
"Set",
"(",
"res",
"[",
"dcomp",
"[",
"i",
"]",
"+",
"1",
":",
"dcomp",
"[",
"i",
"+",
"1",
"]",
"+",
"1",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"l",
")",
"]",
")"
] |
https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/combinat/set_partition_ordered.py#L1225-L1279
|
||
openshift/openshift-tools
|
1188778e728a6e4781acf728123e5b356380fe6f
|
openshift/installer/vendored/openshift-ansible-3.11.28-1/roles/lib_openshift/library/oc_serviceaccount.py
|
python
|
Yedit.get_curr_value
|
(invalue, val_type)
|
return curr_value
|
return the current value
|
return the current value
|
[
"return",
"the",
"current",
"value"
] |
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
curr_value = yaml.safe_load(str(invalue))
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
|
[
"def",
"get_curr_value",
"(",
"invalue",
",",
"val_type",
")",
":",
"if",
"invalue",
"is",
"None",
":",
"return",
"None",
"curr_value",
"=",
"invalue",
"if",
"val_type",
"==",
"'yaml'",
":",
"curr_value",
"=",
"yaml",
".",
"safe_load",
"(",
"str",
"(",
"invalue",
")",
")",
"elif",
"val_type",
"==",
"'json'",
":",
"curr_value",
"=",
"json",
".",
"loads",
"(",
"invalue",
")",
"return",
"curr_value"
] |
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/openshift/installer/vendored/openshift-ansible-3.11.28-1/roles/lib_openshift/library/oc_serviceaccount.py#L657-L668
|
|
mgharbi/hdrnet_legacy
|
b06d0119e2fb22c62c757161e6d351a304720544
|
hdrnet/models.py
|
python
|
HDRNetGaussianPyrNN._output
|
(cls, lvls, guide_lvls, coeffs)
|
return current
|
[] |
def _output(cls, lvls, guide_lvls, coeffs):
for il, (lvl, guide_lvl) in enumerate(reversed(zip(lvls, guide_lvls))):
c = coeffs[:, :, :, :, il*3:(il+1)*3, :]
out_lvl = HDRNetPointwiseNNGuide._output(lvl, guide_lvl, c)
if il == 0:
current = out_lvl
else:
sz = tf.shape(out_lvl)[1:3]
current = tf.image.resize_images(current, sz, tf.image.ResizeMethod.BILINEAR, align_corners=True)
current = tf.add(current, out_lvl)
return current
|
[
"def",
"_output",
"(",
"cls",
",",
"lvls",
",",
"guide_lvls",
",",
"coeffs",
")",
":",
"for",
"il",
",",
"(",
"lvl",
",",
"guide_lvl",
")",
"in",
"enumerate",
"(",
"reversed",
"(",
"zip",
"(",
"lvls",
",",
"guide_lvls",
")",
")",
")",
":",
"c",
"=",
"coeffs",
"[",
":",
",",
":",
",",
":",
",",
":",
",",
"il",
"*",
"3",
":",
"(",
"il",
"+",
"1",
")",
"*",
"3",
",",
":",
"]",
"out_lvl",
"=",
"HDRNetPointwiseNNGuide",
".",
"_output",
"(",
"lvl",
",",
"guide_lvl",
",",
"c",
")",
"if",
"il",
"==",
"0",
":",
"current",
"=",
"out_lvl",
"else",
":",
"sz",
"=",
"tf",
".",
"shape",
"(",
"out_lvl",
")",
"[",
"1",
":",
"3",
"]",
"current",
"=",
"tf",
".",
"image",
".",
"resize_images",
"(",
"current",
",",
"sz",
",",
"tf",
".",
"image",
".",
"ResizeMethod",
".",
"BILINEAR",
",",
"align_corners",
"=",
"True",
")",
"current",
"=",
"tf",
".",
"add",
"(",
"current",
",",
"out_lvl",
")",
"return",
"current"
] |
https://github.com/mgharbi/hdrnet_legacy/blob/b06d0119e2fb22c62c757161e6d351a304720544/hdrnet/models.py#L277-L289
|
|||
open-mmlab/mmdetection3d
|
c7272063e818bcf33aebc498a017a95c8d065143
|
mmdet3d/ops/voxel/voxelize.py
|
python
|
Voxelization.forward
|
(self, input)
|
return voxelization(input, self.voxel_size, self.point_cloud_range,
self.max_num_points, max_voxels,
self.deterministic)
|
Args:
input: NC points
|
Args:
input: NC points
|
[
"Args",
":",
"input",
":",
"NC",
"points"
] |
def forward(self, input):
"""
Args:
input: NC points
"""
if self.training:
max_voxels = self.max_voxels[0]
else:
max_voxels = self.max_voxels[1]
return voxelization(input, self.voxel_size, self.point_cloud_range,
self.max_num_points, max_voxels,
self.deterministic)
|
[
"def",
"forward",
"(",
"self",
",",
"input",
")",
":",
"if",
"self",
".",
"training",
":",
"max_voxels",
"=",
"self",
".",
"max_voxels",
"[",
"0",
"]",
"else",
":",
"max_voxels",
"=",
"self",
".",
"max_voxels",
"[",
"1",
"]",
"return",
"voxelization",
"(",
"input",
",",
"self",
".",
"voxel_size",
",",
"self",
".",
"point_cloud_range",
",",
"self",
".",
"max_num_points",
",",
"max_voxels",
",",
"self",
".",
"deterministic",
")"
] |
https://github.com/open-mmlab/mmdetection3d/blob/c7272063e818bcf33aebc498a017a95c8d065143/mmdet3d/ops/voxel/voxelize.py#L126-L138
|
|
vitorfs/parsifal
|
9386a0fb328d4880d052c94e9224ce50a9b2f6a6
|
parsifal/apps/reviews/models.py
|
python
|
DataExtraction._set_boolean_value
|
(self, value)
|
[] |
def _set_boolean_value(self, value):
if value:
if value in ["True", "False"]:
self.value = value
else:
raise ValueError('Expected values: "True" or "False"')
else:
self.value = ""
|
[
"def",
"_set_boolean_value",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
":",
"if",
"value",
"in",
"[",
"\"True\"",
",",
"\"False\"",
"]",
":",
"self",
".",
"value",
"=",
"value",
"else",
":",
"raise",
"ValueError",
"(",
"'Expected values: \"True\" or \"False\"'",
")",
"else",
":",
"self",
".",
"value",
"=",
"\"\""
] |
https://github.com/vitorfs/parsifal/blob/9386a0fb328d4880d052c94e9224ce50a9b2f6a6/parsifal/apps/reviews/models.py#L466-L473
|
||||
linuxscout/mishkal
|
4f4ae0ebc2d6acbeb3de3f0303151ec7b54d2f76
|
interfaces/web/lib/paste/evalexception/middleware.py
|
python
|
DebugInfo.json
|
(self)
|
return {
'uri': self.view_uri,
'created': time.strftime('%c', time.gmtime(self.created)),
'created_timestamp': self.created,
'exception_type': str(self.exc_type),
'exception': str(self.exc_value),
}
|
Return the JSON-able representation of this object
|
Return the JSON-able representation of this object
|
[
"Return",
"the",
"JSON",
"-",
"able",
"representation",
"of",
"this",
"object"
] |
def json(self):
"""Return the JSON-able representation of this object"""
return {
'uri': self.view_uri,
'created': time.strftime('%c', time.gmtime(self.created)),
'created_timestamp': self.created,
'exception_type': str(self.exc_type),
'exception': str(self.exc_value),
}
|
[
"def",
"json",
"(",
"self",
")",
":",
"return",
"{",
"'uri'",
":",
"self",
".",
"view_uri",
",",
"'created'",
":",
"time",
".",
"strftime",
"(",
"'%c'",
",",
"time",
".",
"gmtime",
"(",
"self",
".",
"created",
")",
")",
",",
"'created_timestamp'",
":",
"self",
".",
"created",
",",
"'exception_type'",
":",
"str",
"(",
"self",
".",
"exc_type",
")",
",",
"'exception'",
":",
"str",
"(",
"self",
".",
"exc_value",
")",
",",
"}"
] |
https://github.com/linuxscout/mishkal/blob/4f4ae0ebc2d6acbeb3de3f0303151ec7b54d2f76/interfaces/web/lib/paste/evalexception/middleware.py#L386-L394
|
|
maxhumber/gazpacho
|
49d8258908729b67e4189a339e1b4c99dd003778
|
gazpacho/soup.py
|
python
|
Parser.handle_start
|
(self, tag, attrs)
|
[] |
def handle_start(self, tag, attrs):
html, attrs_dict = recover_html_and_attrs(tag, attrs)
query_attrs = {} if not self.attrs else self.attrs
matching = match(query_attrs, attrs_dict, partial=self._partial)
if (tag == self.tag) and (matching) and (not self.is_active):
self._groups.append(Soup())
self._groups[-1].tag = tag
self._groups[-1].attrs = attrs_dict
self._groups[-1]._html += html
self._counter[tag] += 1
return
if self.is_active:
self._groups[-1]._html += html
self._counter[tag] += 1
|
[
"def",
"handle_start",
"(",
"self",
",",
"tag",
",",
"attrs",
")",
":",
"html",
",",
"attrs_dict",
"=",
"recover_html_and_attrs",
"(",
"tag",
",",
"attrs",
")",
"query_attrs",
"=",
"{",
"}",
"if",
"not",
"self",
".",
"attrs",
"else",
"self",
".",
"attrs",
"matching",
"=",
"match",
"(",
"query_attrs",
",",
"attrs_dict",
",",
"partial",
"=",
"self",
".",
"_partial",
")",
"if",
"(",
"tag",
"==",
"self",
".",
"tag",
")",
"and",
"(",
"matching",
")",
"and",
"(",
"not",
"self",
".",
"is_active",
")",
":",
"self",
".",
"_groups",
".",
"append",
"(",
"Soup",
"(",
")",
")",
"self",
".",
"_groups",
"[",
"-",
"1",
"]",
".",
"tag",
"=",
"tag",
"self",
".",
"_groups",
"[",
"-",
"1",
"]",
".",
"attrs",
"=",
"attrs_dict",
"self",
".",
"_groups",
"[",
"-",
"1",
"]",
".",
"_html",
"+=",
"html",
"self",
".",
"_counter",
"[",
"tag",
"]",
"+=",
"1",
"return",
"if",
"self",
".",
"is_active",
":",
"self",
".",
"_groups",
"[",
"-",
"1",
"]",
".",
"_html",
"+=",
"html",
"self",
".",
"_counter",
"[",
"tag",
"]",
"+=",
"1"
] |
https://github.com/maxhumber/gazpacho/blob/49d8258908729b67e4189a339e1b4c99dd003778/gazpacho/soup.py#L186-L201
|
||||
pokealarm/pokealarm
|
2edc3a978b7435a453d1917fbf436891fad1e18f
|
PokeAlarm/Alarms/Alarm.py
|
python
|
Alarm.pop_type
|
(data, param_name, kind, default=None)
|
Pops a parameter as a certain type.
|
Pops a parameter as a certain type.
|
[
"Pops",
"a",
"parameter",
"as",
"a",
"certain",
"type",
"."
] |
def pop_type(data, param_name, kind, default=None):
""" Pops a parameter as a certain type. """
try:
value = data.pop(param_name, default)
return kind(value)
except Exception:
raise ValueError(
'Unable to interpret the value "{}" as a valid {} '
'for parameter {}.", '.format(value, kind, param_name))
|
[
"def",
"pop_type",
"(",
"data",
",",
"param_name",
",",
"kind",
",",
"default",
"=",
"None",
")",
":",
"try",
":",
"value",
"=",
"data",
".",
"pop",
"(",
"param_name",
",",
"default",
")",
"return",
"kind",
"(",
"value",
")",
"except",
"Exception",
":",
"raise",
"ValueError",
"(",
"'Unable to interpret the value \"{}\" as a valid {} '",
"'for parameter {}.\", '",
".",
"format",
"(",
"value",
",",
"kind",
",",
"param_name",
")",
")"
] |
https://github.com/pokealarm/pokealarm/blob/2edc3a978b7435a453d1917fbf436891fad1e18f/PokeAlarm/Alarms/Alarm.py#L87-L95
|
||
josiah-wolf-oberholtzer/supriya
|
5ca725a6b97edfbe016a75666d420ecfdf49592f
|
dev/etc/pending_ugens/PartConv.py
|
python
|
PartConv.source
|
(self)
|
return self._inputs[index]
|
Gets `source` input of PartConv.
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> part_conv = supriya.ugens.PartConv.ar(
... fftsize=fftsize,
... irbufnum=irbufnum,
... source=source,
... )
>>> part_conv.source
OutputProxy(
source=In(
bus=0.0,
calculation_rate=CalculationRate.AUDIO,
channel_count=1
),
output_index=0
)
Returns ugen input.
|
Gets `source` input of PartConv.
|
[
"Gets",
"source",
"input",
"of",
"PartConv",
"."
] |
def source(self):
"""
Gets `source` input of PartConv.
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> part_conv = supriya.ugens.PartConv.ar(
... fftsize=fftsize,
... irbufnum=irbufnum,
... source=source,
... )
>>> part_conv.source
OutputProxy(
source=In(
bus=0.0,
calculation_rate=CalculationRate.AUDIO,
channel_count=1
),
output_index=0
)
Returns ugen input.
"""
index = self._ordered_input_names.index('source')
return self._inputs[index]
|
[
"def",
"source",
"(",
"self",
")",
":",
"index",
"=",
"self",
".",
"_ordered_input_names",
".",
"index",
"(",
"'source'",
")",
"return",
"self",
".",
"_inputs",
"[",
"index",
"]"
] |
https://github.com/josiah-wolf-oberholtzer/supriya/blob/5ca725a6b97edfbe016a75666d420ecfdf49592f/dev/etc/pending_ugens/PartConv.py#L131-L156
|
|
HymanLiuTS/flaskTs
|
286648286976e85d9b9a5873632331efcafe0b21
|
flasky/lib/python2.7/site-packages/flask/app.py
|
python
|
setupmethod
|
(f)
|
return update_wrapper(wrapper_func, f)
|
Wraps a method so that it performs a check in debug mode if the
first request was already handled.
|
Wraps a method so that it performs a check in debug mode if the
first request was already handled.
|
[
"Wraps",
"a",
"method",
"so",
"that",
"it",
"performs",
"a",
"check",
"in",
"debug",
"mode",
"if",
"the",
"first",
"request",
"was",
"already",
"handled",
"."
] |
def setupmethod(f):
"""Wraps a method so that it performs a check in debug mode if the
first request was already handled.
"""
def wrapper_func(self, *args, **kwargs):
if self.debug and self._got_first_request:
raise AssertionError('A setup function was called after the '
'first request was handled. This usually indicates a bug '
'in the application where a module was not imported '
'and decorators or other functionality was called too late.\n'
'To fix this make sure to import all your view modules, '
'database models and everything related at a central place '
'before the application starts serving requests.')
return f(self, *args, **kwargs)
return update_wrapper(wrapper_func, f)
|
[
"def",
"setupmethod",
"(",
"f",
")",
":",
"def",
"wrapper_func",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"debug",
"and",
"self",
".",
"_got_first_request",
":",
"raise",
"AssertionError",
"(",
"'A setup function was called after the '",
"'first request was handled. This usually indicates a bug '",
"'in the application where a module was not imported '",
"'and decorators or other functionality was called too late.\\n'",
"'To fix this make sure to import all your view modules, '",
"'database models and everything related at a central place '",
"'before the application starts serving requests.'",
")",
"return",
"f",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"update_wrapper",
"(",
"wrapper_func",
",",
"f",
")"
] |
https://github.com/HymanLiuTS/flaskTs/blob/286648286976e85d9b9a5873632331efcafe0b21/flasky/lib/python2.7/site-packages/flask/app.py#L52-L66
|
|
googleads/google-ads-python
|
2a1d6062221f6aad1992a6bcca0e7e4a93d2db86
|
google/ads/googleads/v7/services/services/ad_parameter_service/client.py
|
python
|
AdParameterServiceClient.parse_common_location_path
|
(path: str)
|
return m.groupdict() if m else {}
|
Parse a location path into its component segments.
|
Parse a location path into its component segments.
|
[
"Parse",
"a",
"location",
"path",
"into",
"its",
"component",
"segments",
"."
] |
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
|
[
"def",
"parse_common_location_path",
"(",
"path",
":",
"str",
")",
"->",
"Dict",
"[",
"str",
",",
"str",
"]",
":",
"m",
"=",
"re",
".",
"match",
"(",
"r\"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$\"",
",",
"path",
")",
"return",
"m",
".",
"groupdict",
"(",
")",
"if",
"m",
"else",
"{",
"}"
] |
https://github.com/googleads/google-ads-python/blob/2a1d6062221f6aad1992a6bcca0e7e4a93d2db86/google/ads/googleads/v7/services/services/ad_parameter_service/client.py#L257-L262
|
|
compas-dev/compas
|
0b33f8786481f710115fb1ae5fe79abc2a9a5175
|
src/compas/geometry/primitives/vector.py
|
python
|
Vector.scale
|
(self, n)
|
Scale this vector by a factor n.
Parameters
----------
n : float
The scaling factor.
Returns
-------
None
Examples
--------
>>> u = Vector(1.0, 0.0, 0.0)
>>> u.scale(3.0)
>>> u.length
3.0
|
Scale this vector by a factor n.
|
[
"Scale",
"this",
"vector",
"by",
"a",
"factor",
"n",
"."
] |
def scale(self, n):
"""Scale this vector by a factor n.
Parameters
----------
n : float
The scaling factor.
Returns
-------
None
Examples
--------
>>> u = Vector(1.0, 0.0, 0.0)
>>> u.scale(3.0)
>>> u.length
3.0
"""
self.x *= n
self.y *= n
self.z *= n
|
[
"def",
"scale",
"(",
"self",
",",
"n",
")",
":",
"self",
".",
"x",
"*=",
"n",
"self",
".",
"y",
"*=",
"n",
"self",
".",
"z",
"*=",
"n"
] |
https://github.com/compas-dev/compas/blob/0b33f8786481f710115fb1ae5fe79abc2a9a5175/src/compas/geometry/primitives/vector.py#L796-L818
|
||
bert-nmt/bert-nmt
|
fcb616d28091ac23c9c16f30e6870fe90b8576d6
|
fairseq/checkpoint_utils.py
|
python
|
save_state
|
(
filename, args, model_state_dict, criterion, optimizer, lr_scheduler,
num_updates, optim_history=None, extra_state=None,
)
|
[] |
def save_state(
filename, args, model_state_dict, criterion, optimizer, lr_scheduler,
num_updates, optim_history=None, extra_state=None,
):
if optim_history is None:
optim_history = []
if extra_state is None:
extra_state = {}
state_dict = {
'args': args,
'model': model_state_dict if model_state_dict else {},
'optimizer_history': optim_history + [
{
'criterion_name': criterion.__class__.__name__,
'optimizer_name': optimizer.__class__.__name__,
'lr_scheduler_state': lr_scheduler.state_dict(),
'num_updates': num_updates,
}
],
'last_optimizer_state': convert_state_dict_type(optimizer.state_dict()),
'extra_state': extra_state,
}
torch_persistent_save(state_dict, filename)
|
[
"def",
"save_state",
"(",
"filename",
",",
"args",
",",
"model_state_dict",
",",
"criterion",
",",
"optimizer",
",",
"lr_scheduler",
",",
"num_updates",
",",
"optim_history",
"=",
"None",
",",
"extra_state",
"=",
"None",
",",
")",
":",
"if",
"optim_history",
"is",
"None",
":",
"optim_history",
"=",
"[",
"]",
"if",
"extra_state",
"is",
"None",
":",
"extra_state",
"=",
"{",
"}",
"state_dict",
"=",
"{",
"'args'",
":",
"args",
",",
"'model'",
":",
"model_state_dict",
"if",
"model_state_dict",
"else",
"{",
"}",
",",
"'optimizer_history'",
":",
"optim_history",
"+",
"[",
"{",
"'criterion_name'",
":",
"criterion",
".",
"__class__",
".",
"__name__",
",",
"'optimizer_name'",
":",
"optimizer",
".",
"__class__",
".",
"__name__",
",",
"'lr_scheduler_state'",
":",
"lr_scheduler",
".",
"state_dict",
"(",
")",
",",
"'num_updates'",
":",
"num_updates",
",",
"}",
"]",
",",
"'last_optimizer_state'",
":",
"convert_state_dict_type",
"(",
"optimizer",
".",
"state_dict",
"(",
")",
")",
",",
"'extra_state'",
":",
"extra_state",
",",
"}",
"torch_persistent_save",
"(",
"state_dict",
",",
"filename",
")"
] |
https://github.com/bert-nmt/bert-nmt/blob/fcb616d28091ac23c9c16f30e6870fe90b8576d6/fairseq/checkpoint_utils.py#L228-L250
|
||||
pysmt/pysmt
|
ade4dc2a825727615033a96d31c71e9f53ce4764
|
pysmt/oracles.py
|
python
|
SizeOracle.walk_count_leaves
|
(self, formula, args, measure, **kwargs)
|
return (1 if is_leaf else 0) + sum(args)
|
[] |
def walk_count_leaves(self, formula, args, measure, **kwargs):
#pylint: disable=unused-argument
is_leaf = (len(args) == 0)
return (1 if is_leaf else 0) + sum(args)
|
[
"def",
"walk_count_leaves",
"(",
"self",
",",
"formula",
",",
"args",
",",
"measure",
",",
"*",
"*",
"kwargs",
")",
":",
"#pylint: disable=unused-argument",
"is_leaf",
"=",
"(",
"len",
"(",
"args",
")",
"==",
"0",
")",
"return",
"(",
"1",
"if",
"is_leaf",
"else",
"0",
")",
"+",
"sum",
"(",
"args",
")"
] |
https://github.com/pysmt/pysmt/blob/ade4dc2a825727615033a96d31c71e9f53ce4764/pysmt/oracles.py#L103-L106
|
|||
JulianEberius/SublimePythonIDE
|
d70e40abc0c9f347af3204c7b910e0d6bfd6e459
|
server/lib/python3/rope/base/change.py
|
python
|
count_changes
|
(change)
|
return 1
|
Counts the number of basic changes a `Change` will make
|
Counts the number of basic changes a `Change` will make
|
[
"Counts",
"the",
"number",
"of",
"basic",
"changes",
"a",
"Change",
"will",
"make"
] |
def count_changes(change):
"""Counts the number of basic changes a `Change` will make"""
if isinstance(change, ChangeSet):
result = 0
for child in change.changes:
result += count_changes(child)
return result
return 1
|
[
"def",
"count_changes",
"(",
"change",
")",
":",
"if",
"isinstance",
"(",
"change",
",",
"ChangeSet",
")",
":",
"result",
"=",
"0",
"for",
"child",
"in",
"change",
".",
"changes",
":",
"result",
"+=",
"count_changes",
"(",
"child",
")",
"return",
"result",
"return",
"1"
] |
https://github.com/JulianEberius/SublimePythonIDE/blob/d70e40abc0c9f347af3204c7b910e0d6bfd6e459/server/lib/python3/rope/base/change.py#L303-L310
|
|
scikit-learn-contrib/py-earth
|
b209d1916f051dbea5b142af25425df2de469c5a
|
versioneer.py
|
python
|
render
|
(pieces, style)
|
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
|
Render the given version pieces into the requested style.
|
Render the given version pieces into the requested style.
|
[
"Render",
"the",
"given",
"version",
"pieces",
"into",
"the",
"requested",
"style",
"."
] |
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
|
[
"def",
"render",
"(",
"pieces",
",",
"style",
")",
":",
"if",
"pieces",
"[",
"\"error\"",
"]",
":",
"return",
"{",
"\"version\"",
":",
"\"unknown\"",
",",
"\"full-revisionid\"",
":",
"pieces",
".",
"get",
"(",
"\"long\"",
")",
",",
"\"dirty\"",
":",
"None",
",",
"\"error\"",
":",
"pieces",
"[",
"\"error\"",
"]",
",",
"\"date\"",
":",
"None",
"}",
"if",
"not",
"style",
"or",
"style",
"==",
"\"default\"",
":",
"style",
"=",
"\"pep440\"",
"# the default",
"if",
"style",
"==",
"\"pep440\"",
":",
"rendered",
"=",
"render_pep440",
"(",
"pieces",
")",
"elif",
"style",
"==",
"\"pep440-pre\"",
":",
"rendered",
"=",
"render_pep440_pre",
"(",
"pieces",
")",
"elif",
"style",
"==",
"\"pep440-post\"",
":",
"rendered",
"=",
"render_pep440_post",
"(",
"pieces",
")",
"elif",
"style",
"==",
"\"pep440-old\"",
":",
"rendered",
"=",
"render_pep440_old",
"(",
"pieces",
")",
"elif",
"style",
"==",
"\"git-describe\"",
":",
"rendered",
"=",
"render_git_describe",
"(",
"pieces",
")",
"elif",
"style",
"==",
"\"git-describe-long\"",
":",
"rendered",
"=",
"render_git_describe_long",
"(",
"pieces",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"unknown style '%s'\"",
"%",
"style",
")",
"return",
"{",
"\"version\"",
":",
"rendered",
",",
"\"full-revisionid\"",
":",
"pieces",
"[",
"\"long\"",
"]",
",",
"\"dirty\"",
":",
"pieces",
"[",
"\"dirty\"",
"]",
",",
"\"error\"",
":",
"None",
",",
"\"date\"",
":",
"pieces",
".",
"get",
"(",
"\"date\"",
")",
"}"
] |
https://github.com/scikit-learn-contrib/py-earth/blob/b209d1916f051dbea5b142af25425df2de469c5a/versioneer.py#L1366-L1395
|
|
pyparallel/pyparallel
|
11e8c6072d48c8f13641925d17b147bf36ee0ba3
|
Lib/site-packages/pandas-0.17.0-py3.3-win-amd64.egg/pandas/tools/merge.py
|
python
|
_get_join_indexers
|
(left_keys, right_keys, sort=False, how='inner')
|
return join_func(lkey, rkey, count, **kwargs)
|
Parameters
----------
Returns
-------
|
[] |
def _get_join_indexers(left_keys, right_keys, sort=False, how='inner'):
"""
Parameters
----------
Returns
-------
"""
from functools import partial
assert len(left_keys) == len(right_keys), \
'left_key and right_keys must be the same length'
# bind `sort` arg. of _factorize_keys
fkeys = partial(_factorize_keys, sort=sort)
# get left & right join labels and num. of levels at each location
llab, rlab, shape = map(list, zip( * map(fkeys, left_keys, right_keys)))
# get flat i8 keys from label lists
lkey, rkey = _get_join_keys(llab, rlab, shape, sort)
# factorize keys to a dense i8 space
# `count` is the num. of unique keys
# set(lkey) | set(rkey) == range(count)
lkey, rkey, count = fkeys(lkey, rkey)
# preserve left frame order if how == 'left' and sort == False
kwargs = {'sort':sort} if how == 'left' else {}
join_func = _join_functions[how]
return join_func(lkey, rkey, count, **kwargs)
|
[
"def",
"_get_join_indexers",
"(",
"left_keys",
",",
"right_keys",
",",
"sort",
"=",
"False",
",",
"how",
"=",
"'inner'",
")",
":",
"from",
"functools",
"import",
"partial",
"assert",
"len",
"(",
"left_keys",
")",
"==",
"len",
"(",
"right_keys",
")",
",",
"'left_key and right_keys must be the same length'",
"# bind `sort` arg. of _factorize_keys",
"fkeys",
"=",
"partial",
"(",
"_factorize_keys",
",",
"sort",
"=",
"sort",
")",
"# get left & right join labels and num. of levels at each location",
"llab",
",",
"rlab",
",",
"shape",
"=",
"map",
"(",
"list",
",",
"zip",
"(",
"*",
"map",
"(",
"fkeys",
",",
"left_keys",
",",
"right_keys",
")",
")",
")",
"# get flat i8 keys from label lists",
"lkey",
",",
"rkey",
"=",
"_get_join_keys",
"(",
"llab",
",",
"rlab",
",",
"shape",
",",
"sort",
")",
"# factorize keys to a dense i8 space",
"# `count` is the num. of unique keys",
"# set(lkey) | set(rkey) == range(count)",
"lkey",
",",
"rkey",
",",
"count",
"=",
"fkeys",
"(",
"lkey",
",",
"rkey",
")",
"# preserve left frame order if how == 'left' and sort == False",
"kwargs",
"=",
"{",
"'sort'",
":",
"sort",
"}",
"if",
"how",
"==",
"'left'",
"else",
"{",
"}",
"join_func",
"=",
"_join_functions",
"[",
"how",
"]",
"return",
"join_func",
"(",
"lkey",
",",
"rkey",
",",
"count",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/pyparallel/pyparallel/blob/11e8c6072d48c8f13641925d17b147bf36ee0ba3/Lib/site-packages/pandas-0.17.0-py3.3-win-amd64.egg/pandas/tools/merge.py#L497-L529
|
||
lovelylain/pyctp
|
fd304de4b50c4ddc31a4190b1caaeb5dec66bc5d
|
example/ctp/lts/ApiStruct.py
|
python
|
SuperUserFunction.__init__
|
(self, UserID='', FunctionCode=FC_ForceUserLogout)
|
[] |
def __init__(self, UserID='', FunctionCode=FC_ForceUserLogout):
self.UserID = '' #用户代码, char[16]
self.FunctionCode = ''
|
[
"def",
"__init__",
"(",
"self",
",",
"UserID",
"=",
"''",
",",
"FunctionCode",
"=",
"FC_ForceUserLogout",
")",
":",
"self",
".",
"UserID",
"=",
"''",
"#用户代码, char[16]",
"self",
".",
"FunctionCode",
"=",
"''"
] |
https://github.com/lovelylain/pyctp/blob/fd304de4b50c4ddc31a4190b1caaeb5dec66bc5d/example/ctp/lts/ApiStruct.py#L690-L692
|
||||
QCoDeS/Qcodes
|
3cda2cef44812e2aa4672781f2423bf5f816f9f9
|
qcodes/instrument_drivers/Keysight/keysightb1500/KeysightB1500_module.py
|
python
|
fixed_negative_float
|
(response: str)
|
return float(output)
|
Keysight sometimes responds for ex. '-0.-1' as an output when you input
'-0.1'. This function can convert such strings also to float.
|
Keysight sometimes responds for ex. '-0.-1' as an output when you input
'-0.1'. This function can convert such strings also to float.
|
[
"Keysight",
"sometimes",
"responds",
"for",
"ex",
".",
"-",
"0",
".",
"-",
"1",
"as",
"an",
"output",
"when",
"you",
"input",
"-",
"0",
".",
"1",
".",
"This",
"function",
"can",
"convert",
"such",
"strings",
"also",
"to",
"float",
"."
] |
def fixed_negative_float(response: str) -> float:
"""
Keysight sometimes responds for ex. '-0.-1' as an output when you input
'-0.1'. This function can convert such strings also to float.
"""
if len(response.split('.')) > 2:
raise ValueError('String must of format `a` or `a.b`')
parts = response.split('.')
number = parts[0]
decimal = parts[1] if len(parts) > 1 else '0'
decimal = decimal.replace("-", "")
output = ".".join([number, decimal])
return float(output)
|
[
"def",
"fixed_negative_float",
"(",
"response",
":",
"str",
")",
"->",
"float",
":",
"if",
"len",
"(",
"response",
".",
"split",
"(",
"'.'",
")",
")",
">",
"2",
":",
"raise",
"ValueError",
"(",
"'String must of format `a` or `a.b`'",
")",
"parts",
"=",
"response",
".",
"split",
"(",
"'.'",
")",
"number",
"=",
"parts",
"[",
"0",
"]",
"decimal",
"=",
"parts",
"[",
"1",
"]",
"if",
"len",
"(",
"parts",
")",
">",
"1",
"else",
"'0'",
"decimal",
"=",
"decimal",
".",
"replace",
"(",
"\"-\"",
",",
"\"\"",
")",
"output",
"=",
"\".\"",
".",
"join",
"(",
"[",
"number",
",",
"decimal",
"]",
")",
"return",
"float",
"(",
"output",
")"
] |
https://github.com/QCoDeS/Qcodes/blob/3cda2cef44812e2aa4672781f2423bf5f816f9f9/qcodes/instrument_drivers/Keysight/keysightb1500/KeysightB1500_module.py#L163-L178
|
|
bourdakos1/capsule-networks
|
84eb67a5b56456fc0a24d7fed8b0a53982fbd1c2
|
capsLayer.py
|
python
|
squash
|
(vector)
|
return(vec_squashed)
|
Squashing function corresponding to Eq. 1
Args:
vector: A tensor with shape [batch_size, 1, num_caps, vec_len, 1] or [batch_size, num_caps, vec_len, 1].
Returns:
A tensor with the same shape as vector but squashed in 'vec_len' dimension.
|
Squashing function corresponding to Eq. 1
Args:
vector: A tensor with shape [batch_size, 1, num_caps, vec_len, 1] or [batch_size, num_caps, vec_len, 1].
Returns:
A tensor with the same shape as vector but squashed in 'vec_len' dimension.
|
[
"Squashing",
"function",
"corresponding",
"to",
"Eq",
".",
"1",
"Args",
":",
"vector",
":",
"A",
"tensor",
"with",
"shape",
"[",
"batch_size",
"1",
"num_caps",
"vec_len",
"1",
"]",
"or",
"[",
"batch_size",
"num_caps",
"vec_len",
"1",
"]",
".",
"Returns",
":",
"A",
"tensor",
"with",
"the",
"same",
"shape",
"as",
"vector",
"but",
"squashed",
"in",
"vec_len",
"dimension",
"."
] |
def squash(vector):
'''Squashing function corresponding to Eq. 1
Args:
vector: A tensor with shape [batch_size, 1, num_caps, vec_len, 1] or [batch_size, num_caps, vec_len, 1].
Returns:
A tensor with the same shape as vector but squashed in 'vec_len' dimension.
'''
vec_squared_norm = tf.reduce_sum(tf.square(vector), -2, keep_dims=True)
scalar_factor = vec_squared_norm / (1 + vec_squared_norm) / tf.sqrt(vec_squared_norm + epsilon)
vec_squashed = scalar_factor * vector # element-wise
return(vec_squashed)
|
[
"def",
"squash",
"(",
"vector",
")",
":",
"vec_squared_norm",
"=",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"square",
"(",
"vector",
")",
",",
"-",
"2",
",",
"keep_dims",
"=",
"True",
")",
"scalar_factor",
"=",
"vec_squared_norm",
"/",
"(",
"1",
"+",
"vec_squared_norm",
")",
"/",
"tf",
".",
"sqrt",
"(",
"vec_squared_norm",
"+",
"epsilon",
")",
"vec_squashed",
"=",
"scalar_factor",
"*",
"vector",
"# element-wise",
"return",
"(",
"vec_squashed",
")"
] |
https://github.com/bourdakos1/capsule-networks/blob/84eb67a5b56456fc0a24d7fed8b0a53982fbd1c2/capsLayer.py#L176-L186
|
|
wbond/package_control
|
cfaaeb57612023e3679ecb7f8cd7ceac9f57990d
|
package_control/deps/asn1crypto/core.py
|
python
|
ObjectIdentifier.unmap
|
(cls, value)
|
return value
|
Converts a mapped unicode string value into a dotted unicode string OID
:param value:
A mapped unicode string OR dotted unicode string OID
:raises:
ValueError - when no _map dict has been defined on the class or the value can't be unmapped
TypeError - when value is not a unicode string
:return:
A dotted unicode string OID
|
Converts a mapped unicode string value into a dotted unicode string OID
|
[
"Converts",
"a",
"mapped",
"unicode",
"string",
"value",
"into",
"a",
"dotted",
"unicode",
"string",
"OID"
] |
def unmap(cls, value):
"""
Converts a mapped unicode string value into a dotted unicode string OID
:param value:
A mapped unicode string OR dotted unicode string OID
:raises:
ValueError - when no _map dict has been defined on the class or the value can't be unmapped
TypeError - when value is not a unicode string
:return:
A dotted unicode string OID
"""
if cls not in _SETUP_CLASSES:
cls()._setup()
_SETUP_CLASSES[cls] = True
if cls._map is None:
raise ValueError(unwrap(
'''
%s._map has not been defined
''',
type_name(cls)
))
if not isinstance(value, str_cls):
raise TypeError(unwrap(
'''
value must be a unicode string, not %s
''',
type_name(value)
))
if value in cls._reverse_map:
return cls._reverse_map[value]
if not _OID_RE.match(value):
raise ValueError(unwrap(
'''
%s._map does not contain an entry for "%s"
''',
type_name(cls),
value
))
return value
|
[
"def",
"unmap",
"(",
"cls",
",",
"value",
")",
":",
"if",
"cls",
"not",
"in",
"_SETUP_CLASSES",
":",
"cls",
"(",
")",
".",
"_setup",
"(",
")",
"_SETUP_CLASSES",
"[",
"cls",
"]",
"=",
"True",
"if",
"cls",
".",
"_map",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"unwrap",
"(",
"'''\n %s._map has not been defined\n '''",
",",
"type_name",
"(",
"cls",
")",
")",
")",
"if",
"not",
"isinstance",
"(",
"value",
",",
"str_cls",
")",
":",
"raise",
"TypeError",
"(",
"unwrap",
"(",
"'''\n value must be a unicode string, not %s\n '''",
",",
"type_name",
"(",
"value",
")",
")",
")",
"if",
"value",
"in",
"cls",
".",
"_reverse_map",
":",
"return",
"cls",
".",
"_reverse_map",
"[",
"value",
"]",
"if",
"not",
"_OID_RE",
".",
"match",
"(",
"value",
")",
":",
"raise",
"ValueError",
"(",
"unwrap",
"(",
"'''\n %s._map does not contain an entry for \"%s\"\n '''",
",",
"type_name",
"(",
"cls",
")",
",",
"value",
")",
")",
"return",
"value"
] |
https://github.com/wbond/package_control/blob/cfaaeb57612023e3679ecb7f8cd7ceac9f57990d/package_control/deps/asn1crypto/core.py#L3021-L3068
|
|
sphinx-doc/sphinx
|
e79681c76843c1339863b365747079b2d662d0c1
|
sphinx/application.py
|
python
|
Sphinx._init_i18n
|
(self)
|
Load translated strings from the configured localedirs if enabled in
the configuration.
|
Load translated strings from the configured localedirs if enabled in
the configuration.
|
[
"Load",
"translated",
"strings",
"from",
"the",
"configured",
"localedirs",
"if",
"enabled",
"in",
"the",
"configuration",
"."
] |
def _init_i18n(self) -> None:
"""Load translated strings from the configured localedirs if enabled in
the configuration.
"""
if self.config.language is None:
self.translator, has_translation = locale.init([], None)
else:
logger.info(bold(__('loading translations [%s]... ') % self.config.language),
nonl=True)
# compile mo files if sphinx.po file in user locale directories are updated
repo = CatalogRepository(self.srcdir, self.config.locale_dirs,
self.config.language, self.config.source_encoding)
for catalog in repo.catalogs:
if catalog.domain == 'sphinx' and catalog.is_outdated():
catalog.write_mo(self.config.language,
self.config.gettext_allow_fuzzy_translations)
locale_dirs: List[Optional[str]] = list(repo.locale_dirs)
locale_dirs += [None]
locale_dirs += [path.join(package_dir, 'locale')]
self.translator, has_translation = locale.init(locale_dirs, self.config.language)
if has_translation or self.config.language == 'en':
# "en" never needs to be translated
logger.info(__('done'))
else:
logger.info(__('not available for built-in messages'))
|
[
"def",
"_init_i18n",
"(",
"self",
")",
"->",
"None",
":",
"if",
"self",
".",
"config",
".",
"language",
"is",
"None",
":",
"self",
".",
"translator",
",",
"has_translation",
"=",
"locale",
".",
"init",
"(",
"[",
"]",
",",
"None",
")",
"else",
":",
"logger",
".",
"info",
"(",
"bold",
"(",
"__",
"(",
"'loading translations [%s]... '",
")",
"%",
"self",
".",
"config",
".",
"language",
")",
",",
"nonl",
"=",
"True",
")",
"# compile mo files if sphinx.po file in user locale directories are updated",
"repo",
"=",
"CatalogRepository",
"(",
"self",
".",
"srcdir",
",",
"self",
".",
"config",
".",
"locale_dirs",
",",
"self",
".",
"config",
".",
"language",
",",
"self",
".",
"config",
".",
"source_encoding",
")",
"for",
"catalog",
"in",
"repo",
".",
"catalogs",
":",
"if",
"catalog",
".",
"domain",
"==",
"'sphinx'",
"and",
"catalog",
".",
"is_outdated",
"(",
")",
":",
"catalog",
".",
"write_mo",
"(",
"self",
".",
"config",
".",
"language",
",",
"self",
".",
"config",
".",
"gettext_allow_fuzzy_translations",
")",
"locale_dirs",
":",
"List",
"[",
"Optional",
"[",
"str",
"]",
"]",
"=",
"list",
"(",
"repo",
".",
"locale_dirs",
")",
"locale_dirs",
"+=",
"[",
"None",
"]",
"locale_dirs",
"+=",
"[",
"path",
".",
"join",
"(",
"package_dir",
",",
"'locale'",
")",
"]",
"self",
".",
"translator",
",",
"has_translation",
"=",
"locale",
".",
"init",
"(",
"locale_dirs",
",",
"self",
".",
"config",
".",
"language",
")",
"if",
"has_translation",
"or",
"self",
".",
"config",
".",
"language",
"==",
"'en'",
":",
"# \"en\" never needs to be translated",
"logger",
".",
"info",
"(",
"__",
"(",
"'done'",
")",
")",
"else",
":",
"logger",
".",
"info",
"(",
"__",
"(",
"'not available for built-in messages'",
")",
")"
] |
https://github.com/sphinx-doc/sphinx/blob/e79681c76843c1339863b365747079b2d662d0c1/sphinx/application.py#L265-L292
|
||
jparkhill/TensorMol
|
d52104dc7ee46eec8301d332a95d672270ac0bd1
|
TensorMol/TFDescriptors/RawSymFunc.py
|
python
|
DifferenceVectorsSet
|
(r_,prec = tf.float64)
|
return (ri-rj)
|
Given a nmol X maxnatom X 3 tensor of coordinates this
returns a nmol X maxnatom X maxnatom X 3 tensor of Rij
|
Given a nmol X maxnatom X 3 tensor of coordinates this
returns a nmol X maxnatom X maxnatom X 3 tensor of Rij
|
[
"Given",
"a",
"nmol",
"X",
"maxnatom",
"X",
"3",
"tensor",
"of",
"coordinates",
"this",
"returns",
"a",
"nmol",
"X",
"maxnatom",
"X",
"maxnatom",
"X",
"3",
"tensor",
"of",
"Rij"
] |
def DifferenceVectorsSet(r_,prec = tf.float64):
"""
Given a nmol X maxnatom X 3 tensor of coordinates this
returns a nmol X maxnatom X maxnatom X 3 tensor of Rij
"""
natom = tf.shape(r_)[1]
nmol = tf.shape(r_)[0]
#ri = tf.tile(tf.reshape(r_,[nmol,1,natom,3]),[1,natom,1,1])
ri = tf.tile(tf.reshape(tf.cast(r_,prec),[nmol,1,natom*3]),[1,natom,1])
ri = tf.reshape(ri, [nmol, natom, natom, 3])
rj = tf.transpose(ri,perm=(0,2,1,3))
return (ri-rj)
|
[
"def",
"DifferenceVectorsSet",
"(",
"r_",
",",
"prec",
"=",
"tf",
".",
"float64",
")",
":",
"natom",
"=",
"tf",
".",
"shape",
"(",
"r_",
")",
"[",
"1",
"]",
"nmol",
"=",
"tf",
".",
"shape",
"(",
"r_",
")",
"[",
"0",
"]",
"#ri = tf.tile(tf.reshape(r_,[nmol,1,natom,3]),[1,natom,1,1])",
"ri",
"=",
"tf",
".",
"tile",
"(",
"tf",
".",
"reshape",
"(",
"tf",
".",
"cast",
"(",
"r_",
",",
"prec",
")",
",",
"[",
"nmol",
",",
"1",
",",
"natom",
"*",
"3",
"]",
")",
",",
"[",
"1",
",",
"natom",
",",
"1",
"]",
")",
"ri",
"=",
"tf",
".",
"reshape",
"(",
"ri",
",",
"[",
"nmol",
",",
"natom",
",",
"natom",
",",
"3",
"]",
")",
"rj",
"=",
"tf",
".",
"transpose",
"(",
"ri",
",",
"perm",
"=",
"(",
"0",
",",
"2",
",",
"1",
",",
"3",
")",
")",
"return",
"(",
"ri",
"-",
"rj",
")"
] |
https://github.com/jparkhill/TensorMol/blob/d52104dc7ee46eec8301d332a95d672270ac0bd1/TensorMol/TFDescriptors/RawSymFunc.py#L110-L121
|
|
wzpan/wukong-robot
|
f679798d20ec9b21fc419b4d058f394821b0a56d
|
robot/logging.py
|
python
|
tail
|
(filepath, n=10)
|
return res
|
实现 tail -n
|
实现 tail -n
|
[
"实现",
"tail",
"-",
"n"
] |
def tail(filepath, n=10):
"""
实现 tail -n
"""
res = ""
with open(filepath, "rb") as f:
f_len = f.seek(0, 2)
rem = f_len % PAGE
page_n = f_len // PAGE
r_len = rem if rem else PAGE
while True:
# 如果读取的页大小>=文件大小,直接读取数据输出
if r_len >= f_len:
f.seek(0)
lines = f.readlines()[::-1]
break
f.seek(-r_len, 2)
# print('f_len: {}, rem: {}, page_n: {}, r_len: {}'.format(f_len, rem, page_n, r_len))
lines = f.readlines()[::-1]
count = len(lines) - 1 # 末行可能不完整,减一行,加大读取量
if count >= n: # 如果读取到的行数>=指定行数,则退出循环读取数据
break
else: # 如果读取行数不够,载入更多的页大小读取数据
r_len += PAGE
page_n -= 1
for line in lines[:n][::-1]:
res += line.decode("utf-8")
return res
|
[
"def",
"tail",
"(",
"filepath",
",",
"n",
"=",
"10",
")",
":",
"res",
"=",
"\"\"",
"with",
"open",
"(",
"filepath",
",",
"\"rb\"",
")",
"as",
"f",
":",
"f_len",
"=",
"f",
".",
"seek",
"(",
"0",
",",
"2",
")",
"rem",
"=",
"f_len",
"%",
"PAGE",
"page_n",
"=",
"f_len",
"//",
"PAGE",
"r_len",
"=",
"rem",
"if",
"rem",
"else",
"PAGE",
"while",
"True",
":",
"# 如果读取的页大小>=文件大小,直接读取数据输出",
"if",
"r_len",
">=",
"f_len",
":",
"f",
".",
"seek",
"(",
"0",
")",
"lines",
"=",
"f",
".",
"readlines",
"(",
")",
"[",
":",
":",
"-",
"1",
"]",
"break",
"f",
".",
"seek",
"(",
"-",
"r_len",
",",
"2",
")",
"# print('f_len: {}, rem: {}, page_n: {}, r_len: {}'.format(f_len, rem, page_n, r_len))",
"lines",
"=",
"f",
".",
"readlines",
"(",
")",
"[",
":",
":",
"-",
"1",
"]",
"count",
"=",
"len",
"(",
"lines",
")",
"-",
"1",
"# 末行可能不完整,减一行,加大读取量",
"if",
"count",
">=",
"n",
":",
"# 如果读取到的行数>=指定行数,则退出循环读取数据",
"break",
"else",
":",
"# 如果读取行数不够,载入更多的页大小读取数据",
"r_len",
"+=",
"PAGE",
"page_n",
"-=",
"1",
"for",
"line",
"in",
"lines",
"[",
":",
"n",
"]",
"[",
":",
":",
"-",
"1",
"]",
":",
"res",
"+=",
"line",
".",
"decode",
"(",
"\"utf-8\"",
")",
"return",
"res"
] |
https://github.com/wzpan/wukong-robot/blob/f679798d20ec9b21fc419b4d058f394821b0a56d/robot/logging.py#L14-L44
|
|
donnemartin/gitsome
|
d7c57abc7cb66e9c910a844f15d4536866da3310
|
xonsh/pygments_cache.py
|
python
|
get_lexer_for_filename
|
(filename, text="", **options)
|
return lexer
|
Gets a lexer from a filename (usually via the filename extension).
This mimics the behavior of ``pygments.lexers.get_lexer_for_filename()``
and ``pygments.lexers.guess_lexer_for_filename()``.
|
Gets a lexer from a filename (usually via the filename extension).
This mimics the behavior of ``pygments.lexers.get_lexer_for_filename()``
and ``pygments.lexers.guess_lexer_for_filename()``.
|
[
"Gets",
"a",
"lexer",
"from",
"a",
"filename",
"(",
"usually",
"via",
"the",
"filename",
"extension",
")",
".",
"This",
"mimics",
"the",
"behavior",
"of",
"pygments",
".",
"lexers",
".",
"get_lexer_for_filename",
"()",
"and",
"pygments",
".",
"lexers",
".",
"guess_lexer_for_filename",
"()",
"."
] |
def get_lexer_for_filename(filename, text="", **options):
"""Gets a lexer from a filename (usually via the filename extension).
This mimics the behavior of ``pygments.lexers.get_lexer_for_filename()``
and ``pygments.lexers.guess_lexer_for_filename()``.
"""
if CACHE is None:
load_or_build()
exts = CACHE["lexers"]["exts"]
fname = os.path.basename(filename)
key = fname if fname in exts else os.path.splitext(fname)[1]
if key in exts:
modname, clsname = exts[key]
mod = importlib.import_module(modname)
cls = getattr(mod, clsname)
lexer = cls(**options)
else:
# couldn't find lexer in cache, fallback to the hard way
import inspect
from pygments.lexers import guess_lexer_for_filename
lexer = guess_lexer_for_filename(filename, text, **options)
# add this filename to the cache for future use
cls = type(lexer)
mod = inspect.getmodule(cls)
exts[fname] = (mod.__name__, cls.__name__)
write_cache(cache_filename())
return lexer
|
[
"def",
"get_lexer_for_filename",
"(",
"filename",
",",
"text",
"=",
"\"\"",
",",
"*",
"*",
"options",
")",
":",
"if",
"CACHE",
"is",
"None",
":",
"load_or_build",
"(",
")",
"exts",
"=",
"CACHE",
"[",
"\"lexers\"",
"]",
"[",
"\"exts\"",
"]",
"fname",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"filename",
")",
"key",
"=",
"fname",
"if",
"fname",
"in",
"exts",
"else",
"os",
".",
"path",
".",
"splitext",
"(",
"fname",
")",
"[",
"1",
"]",
"if",
"key",
"in",
"exts",
":",
"modname",
",",
"clsname",
"=",
"exts",
"[",
"key",
"]",
"mod",
"=",
"importlib",
".",
"import_module",
"(",
"modname",
")",
"cls",
"=",
"getattr",
"(",
"mod",
",",
"clsname",
")",
"lexer",
"=",
"cls",
"(",
"*",
"*",
"options",
")",
"else",
":",
"# couldn't find lexer in cache, fallback to the hard way",
"import",
"inspect",
"from",
"pygments",
".",
"lexers",
"import",
"guess_lexer_for_filename",
"lexer",
"=",
"guess_lexer_for_filename",
"(",
"filename",
",",
"text",
",",
"*",
"*",
"options",
")",
"# add this filename to the cache for future use",
"cls",
"=",
"type",
"(",
"lexer",
")",
"mod",
"=",
"inspect",
".",
"getmodule",
"(",
"cls",
")",
"exts",
"[",
"fname",
"]",
"=",
"(",
"mod",
".",
"__name__",
",",
"cls",
".",
"__name__",
")",
"write_cache",
"(",
"cache_filename",
"(",
")",
")",
"return",
"lexer"
] |
https://github.com/donnemartin/gitsome/blob/d7c57abc7cb66e9c910a844f15d4536866da3310/xonsh/pygments_cache.py#L312-L338
|
|
magenta/magenta
|
be6558f1a06984faff6d6949234f5fe9ad0ffdb5
|
magenta/models/piano_genie/util.py
|
python
|
demidify
|
(pitches)
|
Transforms MIDI pitches [21,108] to [0, 88).
|
Transforms MIDI pitches [21,108] to [0, 88).
|
[
"Transforms",
"MIDI",
"pitches",
"[",
"21",
"108",
"]",
"to",
"[",
"0",
"88",
")",
"."
] |
def demidify(pitches):
"""Transforms MIDI pitches [21,108] to [0, 88)."""
assertions = [
tf.assert_greater_equal(pitches, 21),
tf.assert_less_equal(pitches, 108)
]
with tf.control_dependencies(assertions):
return pitches - 21
|
[
"def",
"demidify",
"(",
"pitches",
")",
":",
"assertions",
"=",
"[",
"tf",
".",
"assert_greater_equal",
"(",
"pitches",
",",
"21",
")",
",",
"tf",
".",
"assert_less_equal",
"(",
"pitches",
",",
"108",
")",
"]",
"with",
"tf",
".",
"control_dependencies",
"(",
"assertions",
")",
":",
"return",
"pitches",
"-",
"21"
] |
https://github.com/magenta/magenta/blob/be6558f1a06984faff6d6949234f5fe9ad0ffdb5/magenta/models/piano_genie/util.py#L25-L32
|
||
TencentCloud/tencentcloud-sdk-python
|
3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2
|
tencentcloud/tiems/v20190416/models.py
|
python
|
DeleteInstanceResponse.__init__
|
(self)
|
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
|
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
|
[
"r",
":",
"param",
"RequestId",
":",
"唯一请求",
"ID,每次请求都会返回。定位问题时需要提供该次请求的",
"RequestId。",
":",
"type",
"RequestId",
":",
"str"
] |
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
|
[
"def",
"__init__",
"(",
"self",
")",
":",
"self",
".",
"RequestId",
"=",
"None"
] |
https://github.com/TencentCloud/tencentcloud-sdk-python/blob/3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2/tencentcloud/tiems/v20190416/models.py#L528-L533
|
||
dipu-bd/lightnovel-crawler
|
eca7a71f217ce7a6b0a54d2e2afb349571871880
|
sources/en/m/machinetransorg.py
|
python
|
MachineTransOrg.read_novel_info
|
(self)
|
Get novel title, autor, cover etc
|
Get novel title, autor, cover etc
|
[
"Get",
"novel",
"title",
"autor",
"cover",
"etc"
] |
def read_novel_info(self):
'''Get novel title, autor, cover etc'''
logger.debug('Visiting %s', self.novel_url)
soup = self.get_soup(self.novel_url)
self.novel_title = soup.select_one('div.title h3 b').text
logger.info('Novel title: %s', self.novel_title)
self.novel_author = soup.select_one('div.title h3 span').text
logger.info('Novel author: %s', self.novel_author)
self.novel_cover = self.absolute_url(
soup.select_one('.book-img img')['src'])
logger.info('Novel cover: %s', self.novel_cover)
for a in reversed(soup.select('div.slide-item a')):
ch_title = a.text.strip()
ch_id = len(self.chapters) + 1
if len(self.chapters) % 100 == 0:
vol_id = ch_id//100 + 1
vol_title = 'Volume ' + str(vol_id)
self.volumes.append({
'id': vol_id,
'title': vol_title,
})
# end if
self.chapters.append({
'id': ch_id,
'volume': vol_id,
'title': ch_title,
'url': self.absolute_url(a['href']),
})
# end for
logger.debug('%d chapters and %d volumes found',
len(self.chapters), len(self.volumes))
|
[
"def",
"read_novel_info",
"(",
"self",
")",
":",
"logger",
".",
"debug",
"(",
"'Visiting %s'",
",",
"self",
".",
"novel_url",
")",
"soup",
"=",
"self",
".",
"get_soup",
"(",
"self",
".",
"novel_url",
")",
"self",
".",
"novel_title",
"=",
"soup",
".",
"select_one",
"(",
"'div.title h3 b'",
")",
".",
"text",
"logger",
".",
"info",
"(",
"'Novel title: %s'",
",",
"self",
".",
"novel_title",
")",
"self",
".",
"novel_author",
"=",
"soup",
".",
"select_one",
"(",
"'div.title h3 span'",
")",
".",
"text",
"logger",
".",
"info",
"(",
"'Novel author: %s'",
",",
"self",
".",
"novel_author",
")",
"self",
".",
"novel_cover",
"=",
"self",
".",
"absolute_url",
"(",
"soup",
".",
"select_one",
"(",
"'.book-img img'",
")",
"[",
"'src'",
"]",
")",
"logger",
".",
"info",
"(",
"'Novel cover: %s'",
",",
"self",
".",
"novel_cover",
")",
"for",
"a",
"in",
"reversed",
"(",
"soup",
".",
"select",
"(",
"'div.slide-item a'",
")",
")",
":",
"ch_title",
"=",
"a",
".",
"text",
".",
"strip",
"(",
")",
"ch_id",
"=",
"len",
"(",
"self",
".",
"chapters",
")",
"+",
"1",
"if",
"len",
"(",
"self",
".",
"chapters",
")",
"%",
"100",
"==",
"0",
":",
"vol_id",
"=",
"ch_id",
"//",
"100",
"+",
"1",
"vol_title",
"=",
"'Volume '",
"+",
"str",
"(",
"vol_id",
")",
"self",
".",
"volumes",
".",
"append",
"(",
"{",
"'id'",
":",
"vol_id",
",",
"'title'",
":",
"vol_title",
",",
"}",
")",
"# end if",
"self",
".",
"chapters",
".",
"append",
"(",
"{",
"'id'",
":",
"ch_id",
",",
"'volume'",
":",
"vol_id",
",",
"'title'",
":",
"ch_title",
",",
"'url'",
":",
"self",
".",
"absolute_url",
"(",
"a",
"[",
"'href'",
"]",
")",
",",
"}",
")",
"# end for",
"logger",
".",
"debug",
"(",
"'%d chapters and %d volumes found'",
",",
"len",
"(",
"self",
".",
"chapters",
")",
",",
"len",
"(",
"self",
".",
"volumes",
")",
")"
] |
https://github.com/dipu-bd/lightnovel-crawler/blob/eca7a71f217ce7a6b0a54d2e2afb349571871880/sources/en/m/machinetransorg.py#L32-L67
|
||
Chaffelson/nipyapi
|
d3b186fd701ce308c2812746d98af9120955e810
|
nipyapi/canvas.py
|
python
|
get_variable_registry
|
(process_group, ancestors=True)
|
Gets the contents of the variable registry attached to a Process Group
Args:
process_group (ProcessGroupEntity): The Process Group to retrieve the
Variable Registry from
ancestors (bool): Whether to include the Variable Registries from child
Process Groups
Returns:
(VariableRegistryEntity): The Variable Registry
|
Gets the contents of the variable registry attached to a Process Group
|
[
"Gets",
"the",
"contents",
"of",
"the",
"variable",
"registry",
"attached",
"to",
"a",
"Process",
"Group"
] |
def get_variable_registry(process_group, ancestors=True):
"""
Gets the contents of the variable registry attached to a Process Group
Args:
process_group (ProcessGroupEntity): The Process Group to retrieve the
Variable Registry from
ancestors (bool): Whether to include the Variable Registries from child
Process Groups
Returns:
(VariableRegistryEntity): The Variable Registry
"""
with nipyapi.utils.rest_exceptions():
return nipyapi.nifi.ProcessGroupsApi().get_variable_registry(
process_group.id,
include_ancestor_groups=ancestors
)
|
[
"def",
"get_variable_registry",
"(",
"process_group",
",",
"ancestors",
"=",
"True",
")",
":",
"with",
"nipyapi",
".",
"utils",
".",
"rest_exceptions",
"(",
")",
":",
"return",
"nipyapi",
".",
"nifi",
".",
"ProcessGroupsApi",
"(",
")",
".",
"get_variable_registry",
"(",
"process_group",
".",
"id",
",",
"include_ancestor_groups",
"=",
"ancestors",
")"
] |
https://github.com/Chaffelson/nipyapi/blob/d3b186fd701ce308c2812746d98af9120955e810/nipyapi/canvas.py#L790-L808
|
||
djsutherland/opt-mmd
|
5c02a92972df099628a4bc8351980ad9f317b6d0
|
gan/model_tmmd.py
|
python
|
DCGAN.__init__
|
(self, sess, config, is_crop=True,
batch_size=64, output_size=64,
z_dim=100, gf_dim=64, df_dim=64,
gfc_dim=1024, dfc_dim=1024, c_dim=3, dataset_name='default',
checkpoint_dir=None, sample_dir=None, log_dir=None)
|
Args:
sess: TensorFlow session
batch_size: The size of batch. Should be specified before training.
output_size: (optional) The resolution in pixels of the images. [64]
z_dim: (optional) Dimension of dim for Z. [100]
gf_dim: (optional) Dimension of gen filters in first conv layer. [64]
df_dim: (optional) Dimension of discrim filters in first conv layer. [64]
gfc_dim: (optional) Dimension of gen units for for fully connected layer. [1024]
dfc_dim: (optional) Dimension of discrim units for fully connected layer. [1024]
c_dim: (optional) Dimension of image color. For grayscale input, set to 1. [3]
|
Args:
sess: TensorFlow session
batch_size: The size of batch. Should be specified before training.
output_size: (optional) The resolution in pixels of the images. [64]
z_dim: (optional) Dimension of dim for Z. [100]
gf_dim: (optional) Dimension of gen filters in first conv layer. [64]
df_dim: (optional) Dimension of discrim filters in first conv layer. [64]
gfc_dim: (optional) Dimension of gen units for for fully connected layer. [1024]
dfc_dim: (optional) Dimension of discrim units for fully connected layer. [1024]
c_dim: (optional) Dimension of image color. For grayscale input, set to 1. [3]
|
[
"Args",
":",
"sess",
":",
"TensorFlow",
"session",
"batch_size",
":",
"The",
"size",
"of",
"batch",
".",
"Should",
"be",
"specified",
"before",
"training",
".",
"output_size",
":",
"(",
"optional",
")",
"The",
"resolution",
"in",
"pixels",
"of",
"the",
"images",
".",
"[",
"64",
"]",
"z_dim",
":",
"(",
"optional",
")",
"Dimension",
"of",
"dim",
"for",
"Z",
".",
"[",
"100",
"]",
"gf_dim",
":",
"(",
"optional",
")",
"Dimension",
"of",
"gen",
"filters",
"in",
"first",
"conv",
"layer",
".",
"[",
"64",
"]",
"df_dim",
":",
"(",
"optional",
")",
"Dimension",
"of",
"discrim",
"filters",
"in",
"first",
"conv",
"layer",
".",
"[",
"64",
"]",
"gfc_dim",
":",
"(",
"optional",
")",
"Dimension",
"of",
"gen",
"units",
"for",
"for",
"fully",
"connected",
"layer",
".",
"[",
"1024",
"]",
"dfc_dim",
":",
"(",
"optional",
")",
"Dimension",
"of",
"discrim",
"units",
"for",
"fully",
"connected",
"layer",
".",
"[",
"1024",
"]",
"c_dim",
":",
"(",
"optional",
")",
"Dimension",
"of",
"image",
"color",
".",
"For",
"grayscale",
"input",
"set",
"to",
"1",
".",
"[",
"3",
"]"
] |
def __init__(self, sess, config, is_crop=True,
batch_size=64, output_size=64,
z_dim=100, gf_dim=64, df_dim=64,
gfc_dim=1024, dfc_dim=1024, c_dim=3, dataset_name='default',
checkpoint_dir=None, sample_dir=None, log_dir=None):
"""
Args:
sess: TensorFlow session
batch_size: The size of batch. Should be specified before training.
output_size: (optional) The resolution in pixels of the images. [64]
z_dim: (optional) Dimension of dim for Z. [100]
gf_dim: (optional) Dimension of gen filters in first conv layer. [64]
df_dim: (optional) Dimension of discrim filters in first conv layer. [64]
gfc_dim: (optional) Dimension of gen units for for fully connected layer. [1024]
dfc_dim: (optional) Dimension of discrim units for fully connected layer. [1024]
c_dim: (optional) Dimension of image color. For grayscale input, set to 1. [3]
"""
self.sess = sess
self.config = config
self.is_crop = is_crop
self.is_grayscale = (c_dim == 1)
self.batch_size = batch_size
self.sample_size = batch_size
self.output_size = output_size
self.sample_dir = sample_dir
self.log_dir=log_dir
self.checkpoint_dir = checkpoint_dir
self.z_dim = z_dim
self.gf_dim = gf_dim
self.df_dim = df_dim
self.gfc_dim = gfc_dim
self.dfc_dim = dfc_dim
self.c_dim = c_dim
# batch normalization : deals with poor initialization helps gradient flow
self.d_bn1 = batch_norm(name='d_bn1')
self.d_bn2 = batch_norm(name='d_bn2')
self.d_bn3 = batch_norm(name='d_bn3')
self.g_bn0 = batch_norm(name='g_bn0')
self.g_bn1 = batch_norm(name='g_bn1')
self.g_bn2 = batch_norm(name='g_bn2')
self.g_bn3 = batch_norm(name='g_bn3')
self.dataset_name = dataset_name
self.build_model()
|
[
"def",
"__init__",
"(",
"self",
",",
"sess",
",",
"config",
",",
"is_crop",
"=",
"True",
",",
"batch_size",
"=",
"64",
",",
"output_size",
"=",
"64",
",",
"z_dim",
"=",
"100",
",",
"gf_dim",
"=",
"64",
",",
"df_dim",
"=",
"64",
",",
"gfc_dim",
"=",
"1024",
",",
"dfc_dim",
"=",
"1024",
",",
"c_dim",
"=",
"3",
",",
"dataset_name",
"=",
"'default'",
",",
"checkpoint_dir",
"=",
"None",
",",
"sample_dir",
"=",
"None",
",",
"log_dir",
"=",
"None",
")",
":",
"self",
".",
"sess",
"=",
"sess",
"self",
".",
"config",
"=",
"config",
"self",
".",
"is_crop",
"=",
"is_crop",
"self",
".",
"is_grayscale",
"=",
"(",
"c_dim",
"==",
"1",
")",
"self",
".",
"batch_size",
"=",
"batch_size",
"self",
".",
"sample_size",
"=",
"batch_size",
"self",
".",
"output_size",
"=",
"output_size",
"self",
".",
"sample_dir",
"=",
"sample_dir",
"self",
".",
"log_dir",
"=",
"log_dir",
"self",
".",
"checkpoint_dir",
"=",
"checkpoint_dir",
"self",
".",
"z_dim",
"=",
"z_dim",
"self",
".",
"gf_dim",
"=",
"gf_dim",
"self",
".",
"df_dim",
"=",
"df_dim",
"self",
".",
"gfc_dim",
"=",
"gfc_dim",
"self",
".",
"dfc_dim",
"=",
"dfc_dim",
"self",
".",
"c_dim",
"=",
"c_dim",
"# batch normalization : deals with poor initialization helps gradient flow",
"self",
".",
"d_bn1",
"=",
"batch_norm",
"(",
"name",
"=",
"'d_bn1'",
")",
"self",
".",
"d_bn2",
"=",
"batch_norm",
"(",
"name",
"=",
"'d_bn2'",
")",
"self",
".",
"d_bn3",
"=",
"batch_norm",
"(",
"name",
"=",
"'d_bn3'",
")",
"self",
".",
"g_bn0",
"=",
"batch_norm",
"(",
"name",
"=",
"'g_bn0'",
")",
"self",
".",
"g_bn1",
"=",
"batch_norm",
"(",
"name",
"=",
"'g_bn1'",
")",
"self",
".",
"g_bn2",
"=",
"batch_norm",
"(",
"name",
"=",
"'g_bn2'",
")",
"self",
".",
"g_bn3",
"=",
"batch_norm",
"(",
"name",
"=",
"'g_bn3'",
")",
"self",
".",
"dataset_name",
"=",
"dataset_name",
"self",
".",
"build_model",
"(",
")"
] |
https://github.com/djsutherland/opt-mmd/blob/5c02a92972df099628a4bc8351980ad9f317b6d0/gan/model_tmmd.py#L17-L65
|
||
CarlosGS/Cyclone-PCB-Factory
|
2d3136de424a94ea3579a24caf167e540daf0cad
|
Software/PythonScripts/Replath/pyRepRap/reprap/shapeplotter.py
|
python
|
point
|
(point)
|
return poly
|
Returns polygon for point (x, y) as a Polygon Object
|
Returns polygon for point (x, y) as a Polygon Object
|
[
"Returns",
"polygon",
"for",
"point",
"(",
"x",
"y",
")",
"as",
"a",
"Polygon",
"Object"
] |
def point(point):
"""Returns polygon for point (x, y) as a Polygon Object"""
poly = toolpath.Polygon()
x, y = point
poly.addPoint( toolpath.Point(x, y) )
return poly
|
[
"def",
"point",
"(",
"point",
")",
":",
"poly",
"=",
"toolpath",
".",
"Polygon",
"(",
")",
"x",
",",
"y",
"=",
"point",
"poly",
".",
"addPoint",
"(",
"toolpath",
".",
"Point",
"(",
"x",
",",
"y",
")",
")",
"return",
"poly"
] |
https://github.com/CarlosGS/Cyclone-PCB-Factory/blob/2d3136de424a94ea3579a24caf167e540daf0cad/Software/PythonScripts/Replath/pyRepRap/reprap/shapeplotter.py#L42-L47
|
|
Thriftpy/thriftpy2
|
8755065bdd3a51b55cbab488fe628027f2c060db
|
thriftpy2/parser/parser.py
|
python
|
p_definition_type
|
(p)
|
definition_type : base_type
| container_type
|
definition_type : base_type
| container_type
|
[
"definition_type",
":",
"base_type",
"|",
"container_type"
] |
def p_definition_type(p):
'''definition_type : base_type
| container_type'''
p[0] = p[1]
|
[
"def",
"p_definition_type",
"(",
"p",
")",
":",
"p",
"[",
"0",
"]",
"=",
"p",
"[",
"1",
"]"
] |
https://github.com/Thriftpy/thriftpy2/blob/8755065bdd3a51b55cbab488fe628027f2c060db/thriftpy2/parser/parser.py#L481-L484
|
||
danielfrg/copper
|
956e9ae607aec461d4fe4f6e7b0ccd9ed556fc79
|
copper/ml/gdbn/npmat.py
|
python
|
CUDAMatrix.subtract_mult
|
(self, mat2, alpha = 1.)
|
return self
|
Subtract a multiple of mat2 from the matrix.
|
Subtract a multiple of mat2 from the matrix.
|
[
"Subtract",
"a",
"multiple",
"of",
"mat2",
"from",
"the",
"matrix",
"."
] |
def subtract_mult(self, mat2, alpha = 1.):
"""
Subtract a multiple of mat2 from the matrix.
"""
if mat2.shape != self.shape:
raise IncompatibleDimensionsException
self.numpy_array -= mat2.numpy_array * alpha
return self
|
[
"def",
"subtract_mult",
"(",
"self",
",",
"mat2",
",",
"alpha",
"=",
"1.",
")",
":",
"if",
"mat2",
".",
"shape",
"!=",
"self",
".",
"shape",
":",
"raise",
"IncompatibleDimensionsException",
"self",
".",
"numpy_array",
"-=",
"mat2",
".",
"numpy_array",
"*",
"alpha",
"return",
"self"
] |
https://github.com/danielfrg/copper/blob/956e9ae607aec461d4fe4f6e7b0ccd9ed556fc79/copper/ml/gdbn/npmat.py#L845-L855
|
|
stopstalk/stopstalk-deployment
|
10c3ab44c4ece33ae515f6888c15033db2004bb1
|
aws_lambda/spoj_aws_lambda_function/lambda_code/pip/_vendor/distlib/database.py
|
python
|
DependencyGraph.add_distribution
|
(self, distribution)
|
Add the *distribution* to the graph.
:type distribution: :class:`distutils2.database.InstalledDistribution`
or :class:`distutils2.database.EggInfoDistribution`
|
Add the *distribution* to the graph.
|
[
"Add",
"the",
"*",
"distribution",
"*",
"to",
"the",
"graph",
"."
] |
def add_distribution(self, distribution):
"""Add the *distribution* to the graph.
:type distribution: :class:`distutils2.database.InstalledDistribution`
or :class:`distutils2.database.EggInfoDistribution`
"""
self.adjacency_list[distribution] = []
self.reverse_list[distribution] = []
|
[
"def",
"add_distribution",
"(",
"self",
",",
"distribution",
")",
":",
"self",
".",
"adjacency_list",
"[",
"distribution",
"]",
"=",
"[",
"]",
"self",
".",
"reverse_list",
"[",
"distribution",
"]",
"=",
"[",
"]"
] |
https://github.com/stopstalk/stopstalk-deployment/blob/10c3ab44c4ece33ae515f6888c15033db2004bb1/aws_lambda/spoj_aws_lambda_function/lambda_code/pip/_vendor/distlib/database.py#L1099-L1106
|
||
clinton-hall/nzbToMedia
|
27669389216902d1085660167e7bda0bd8527ecf
|
libs/common/pbr/version.py
|
python
|
SemanticVersion.decrement
|
(self)
|
return SemanticVersion(
new_major, new_minor, new_patch)
|
Return a decremented SemanticVersion.
Decrementing versions doesn't make a lot of sense - this method only
exists to support rendering of pre-release versions strings into
serialisations (such as rpm) with no sort-before operator.
The 9999 magic version component is from the spec on this - pbr-semver.
:return: A new SemanticVersion object.
|
Return a decremented SemanticVersion.
|
[
"Return",
"a",
"decremented",
"SemanticVersion",
"."
] |
def decrement(self):
"""Return a decremented SemanticVersion.
Decrementing versions doesn't make a lot of sense - this method only
exists to support rendering of pre-release versions strings into
serialisations (such as rpm) with no sort-before operator.
The 9999 magic version component is from the spec on this - pbr-semver.
:return: A new SemanticVersion object.
"""
if self._patch:
new_patch = self._patch - 1
new_minor = self._minor
new_major = self._major
else:
new_patch = 9999
if self._minor:
new_minor = self._minor - 1
new_major = self._major
else:
new_minor = 9999
if self._major:
new_major = self._major - 1
else:
new_major = 0
return SemanticVersion(
new_major, new_minor, new_patch)
|
[
"def",
"decrement",
"(",
"self",
")",
":",
"if",
"self",
".",
"_patch",
":",
"new_patch",
"=",
"self",
".",
"_patch",
"-",
"1",
"new_minor",
"=",
"self",
".",
"_minor",
"new_major",
"=",
"self",
".",
"_major",
"else",
":",
"new_patch",
"=",
"9999",
"if",
"self",
".",
"_minor",
":",
"new_minor",
"=",
"self",
".",
"_minor",
"-",
"1",
"new_major",
"=",
"self",
".",
"_major",
"else",
":",
"new_minor",
"=",
"9999",
"if",
"self",
".",
"_major",
":",
"new_major",
"=",
"self",
".",
"_major",
"-",
"1",
"else",
":",
"new_major",
"=",
"0",
"return",
"SemanticVersion",
"(",
"new_major",
",",
"new_minor",
",",
"new_patch",
")"
] |
https://github.com/clinton-hall/nzbToMedia/blob/27669389216902d1085660167e7bda0bd8527ecf/libs/common/pbr/version.py#L247-L274
|
|
larryhastings/gilectomy
|
4315ec3f1d6d4f813cc82ce27a24e7f784dbfc1a
|
Lib/importlib/_bootstrap.py
|
python
|
BuiltinImporter.exec_module
|
(self, module)
|
Exec a built-in module
|
Exec a built-in module
|
[
"Exec",
"a",
"built",
"-",
"in",
"module"
] |
def exec_module(self, module):
"""Exec a built-in module"""
_call_with_frames_removed(_imp.exec_builtin, module)
|
[
"def",
"exec_module",
"(",
"self",
",",
"module",
")",
":",
"_call_with_frames_removed",
"(",
"_imp",
".",
"exec_builtin",
",",
"module",
")"
] |
https://github.com/larryhastings/gilectomy/blob/4315ec3f1d6d4f813cc82ce27a24e7f784dbfc1a/Lib/importlib/_bootstrap.py#L746-L748
|
||
aiortc/aiortc
|
a4acc4c656c12ce1bc23edc2c365167edd6a9237
|
src/aiortc/rtcrtpreceiver.py
|
python
|
RTCRtpReceiver.getCapabilities
|
(self, kind)
|
return get_capabilities(kind)
|
Returns the most optimistic view of the system's capabilities for
receiving media of the given `kind`.
:rtype: :class:`RTCRtpCapabilities`
|
Returns the most optimistic view of the system's capabilities for
receiving media of the given `kind`.
|
[
"Returns",
"the",
"most",
"optimistic",
"view",
"of",
"the",
"system",
"s",
"capabilities",
"for",
"receiving",
"media",
"of",
"the",
"given",
"kind",
"."
] |
def getCapabilities(self, kind) -> Optional[RTCRtpCapabilities]:
"""
Returns the most optimistic view of the system's capabilities for
receiving media of the given `kind`.
:rtype: :class:`RTCRtpCapabilities`
"""
return get_capabilities(kind)
|
[
"def",
"getCapabilities",
"(",
"self",
",",
"kind",
")",
"->",
"Optional",
"[",
"RTCRtpCapabilities",
"]",
":",
"return",
"get_capabilities",
"(",
"kind",
")"
] |
https://github.com/aiortc/aiortc/blob/a4acc4c656c12ce1bc23edc2c365167edd6a9237/src/aiortc/rtcrtpreceiver.py#L291-L298
|
|
CCExtractor/vardbg
|
8baabb93d2e8afccc5ee837bd8301a5f765635c2
|
vardbg/data.py
|
python
|
FrameInfo.__lt__
|
(self, other)
|
return self.line < other.line
|
[] |
def __lt__(self, other):
return self.line < other.line
|
[
"def",
"__lt__",
"(",
"self",
",",
"other",
")",
":",
"return",
"self",
".",
"line",
"<",
"other",
".",
"line"
] |
https://github.com/CCExtractor/vardbg/blob/8baabb93d2e8afccc5ee837bd8301a5f765635c2/vardbg/data.py#L119-L120
|
|||
bendmorris/static-python
|
2e0f8c4d7ed5b359dc7d8a75b6fb37e6b6c5c473
|
Lib/sysconfig.py
|
python
|
get_path
|
(name, scheme=_get_default_scheme(), vars=None, expand=True)
|
return get_paths(scheme, vars, expand)[name]
|
Return a path corresponding to the scheme.
``scheme`` is the install scheme name.
|
Return a path corresponding to the scheme.
|
[
"Return",
"a",
"path",
"corresponding",
"to",
"the",
"scheme",
"."
] |
def get_path(name, scheme=_get_default_scheme(), vars=None, expand=True):
"""Return a path corresponding to the scheme.
``scheme`` is the install scheme name.
"""
return get_paths(scheme, vars, expand)[name]
|
[
"def",
"get_path",
"(",
"name",
",",
"scheme",
"=",
"_get_default_scheme",
"(",
")",
",",
"vars",
"=",
"None",
",",
"expand",
"=",
"True",
")",
":",
"return",
"get_paths",
"(",
"scheme",
",",
"vars",
",",
"expand",
")",
"[",
"name",
"]"
] |
https://github.com/bendmorris/static-python/blob/2e0f8c4d7ed5b359dc7d8a75b6fb37e6b6c5c473/Lib/sysconfig.py#L494-L499
|
|
nextstrain/ncov
|
71e7d593e5c97b67ad657bca41fb8e61b50c2803
|
workflow/lib/persistent_dict.py
|
python
|
PersistentDict.store
|
(self, key, value, _skip_if_present=False, _stacklevel=0)
|
[] |
def store(self, key, value, _skip_if_present=False, _stacklevel=0):
hexdigest_key = self.key_builder(key)
cleanup_m = CleanupManager()
try:
try:
LockManager(cleanup_m, self._lock_file(hexdigest_key),
1 + _stacklevel)
item_dir_m = ItemDirManager(
cleanup_m, self._item_dir(hexdigest_key),
delete_on_error=True)
if item_dir_m.existed:
if _skip_if_present:
return
item_dir_m.reset()
item_dir_m.mkdir()
key_path = self._key_file(hexdigest_key)
value_path = self._contents_file(hexdigest_key)
self._write(value_path, value)
self._write(key_path, key)
logger.debug("%s: cache store [key=%s]",
self.identifier, hexdigest_key)
except Exception:
cleanup_m.error_clean_up()
raise
finally:
cleanup_m.clean_up()
|
[
"def",
"store",
"(",
"self",
",",
"key",
",",
"value",
",",
"_skip_if_present",
"=",
"False",
",",
"_stacklevel",
"=",
"0",
")",
":",
"hexdigest_key",
"=",
"self",
".",
"key_builder",
"(",
"key",
")",
"cleanup_m",
"=",
"CleanupManager",
"(",
")",
"try",
":",
"try",
":",
"LockManager",
"(",
"cleanup_m",
",",
"self",
".",
"_lock_file",
"(",
"hexdigest_key",
")",
",",
"1",
"+",
"_stacklevel",
")",
"item_dir_m",
"=",
"ItemDirManager",
"(",
"cleanup_m",
",",
"self",
".",
"_item_dir",
"(",
"hexdigest_key",
")",
",",
"delete_on_error",
"=",
"True",
")",
"if",
"item_dir_m",
".",
"existed",
":",
"if",
"_skip_if_present",
":",
"return",
"item_dir_m",
".",
"reset",
"(",
")",
"item_dir_m",
".",
"mkdir",
"(",
")",
"key_path",
"=",
"self",
".",
"_key_file",
"(",
"hexdigest_key",
")",
"value_path",
"=",
"self",
".",
"_contents_file",
"(",
"hexdigest_key",
")",
"self",
".",
"_write",
"(",
"value_path",
",",
"value",
")",
"self",
".",
"_write",
"(",
"key_path",
",",
"key",
")",
"logger",
".",
"debug",
"(",
"\"%s: cache store [key=%s]\"",
",",
"self",
".",
"identifier",
",",
"hexdigest_key",
")",
"except",
"Exception",
":",
"cleanup_m",
".",
"error_clean_up",
"(",
")",
"raise",
"finally",
":",
"cleanup_m",
".",
"clean_up",
"(",
")"
] |
https://github.com/nextstrain/ncov/blob/71e7d593e5c97b67ad657bca41fb8e61b50c2803/workflow/lib/persistent_dict.py#L750-L781
|
||||
facebookresearch/Large-Scale-VRD
|
7ababfe1023941c3653d7aebe9f835a47f5e8277
|
lib/utils/c2.py
|
python
|
CudaDevice
|
(gpu_id)
|
return core.DeviceOption(caffe2_pb2.CUDA, gpu_id)
|
Create a Cuda device.
|
Create a Cuda device.
|
[
"Create",
"a",
"Cuda",
"device",
"."
] |
def CudaDevice(gpu_id):
"""Create a Cuda device."""
return core.DeviceOption(caffe2_pb2.CUDA, gpu_id)
|
[
"def",
"CudaDevice",
"(",
"gpu_id",
")",
":",
"return",
"core",
".",
"DeviceOption",
"(",
"caffe2_pb2",
".",
"CUDA",
",",
"gpu_id",
")"
] |
https://github.com/facebookresearch/Large-Scale-VRD/blob/7ababfe1023941c3653d7aebe9f835a47f5e8277/lib/utils/c2.py#L126-L128
|
|
FederatedAI/FATE
|
32540492623568ecd1afcb367360133616e02fa3
|
python/fate_client/flow_client/flow_cli/commands/job.py
|
python
|
log
|
(ctx, **kwargs)
|
\b
- DESCRIPTION:
Download Log Files of A Specified Job.
\b
- USAGE:
flow job log -j JOB_ID --output-path ./examples/
|
\b
- DESCRIPTION:
Download Log Files of A Specified Job.
|
[
"\\",
"b",
"-",
"DESCRIPTION",
":",
"Download",
"Log",
"Files",
"of",
"A",
"Specified",
"Job",
"."
] |
def log(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Download Log Files of A Specified Job.
\b
- USAGE:
flow job log -j JOB_ID --output-path ./examples/
"""
config_data, dsl_data = preprocess(**kwargs)
job_id = config_data['job_id']
tar_file_name = 'job_{}_log.tar.gz'.format(job_id)
extract_dir = os.path.join(config_data['output_path'], 'job_{}_log'.format(job_id))
with closing(access_server('post', ctx, 'job/log/download', config_data, False, stream=True)) as response:
if response.status_code == 200:
download_from_request(http_response=response, tar_file_name=tar_file_name, extract_dir=extract_dir)
res = {'retcode': 0,
'directory': extract_dir,
'retmsg': 'download successfully, please check {} directory'.format(extract_dir)}
else:
res = response.json() if isinstance(response, requests.models.Response) else response
prettify(res)
|
[
"def",
"log",
"(",
"ctx",
",",
"*",
"*",
"kwargs",
")",
":",
"config_data",
",",
"dsl_data",
"=",
"preprocess",
"(",
"*",
"*",
"kwargs",
")",
"job_id",
"=",
"config_data",
"[",
"'job_id'",
"]",
"tar_file_name",
"=",
"'job_{}_log.tar.gz'",
".",
"format",
"(",
"job_id",
")",
"extract_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"config_data",
"[",
"'output_path'",
"]",
",",
"'job_{}_log'",
".",
"format",
"(",
"job_id",
")",
")",
"with",
"closing",
"(",
"access_server",
"(",
"'post'",
",",
"ctx",
",",
"'job/log/download'",
",",
"config_data",
",",
"False",
",",
"stream",
"=",
"True",
")",
")",
"as",
"response",
":",
"if",
"response",
".",
"status_code",
"==",
"200",
":",
"download_from_request",
"(",
"http_response",
"=",
"response",
",",
"tar_file_name",
"=",
"tar_file_name",
",",
"extract_dir",
"=",
"extract_dir",
")",
"res",
"=",
"{",
"'retcode'",
":",
"0",
",",
"'directory'",
":",
"extract_dir",
",",
"'retmsg'",
":",
"'download successfully, please check {} directory'",
".",
"format",
"(",
"extract_dir",
")",
"}",
"else",
":",
"res",
"=",
"response",
".",
"json",
"(",
")",
"if",
"isinstance",
"(",
"response",
",",
"requests",
".",
"models",
".",
"Response",
")",
"else",
"response",
"prettify",
"(",
"res",
")"
] |
https://github.com/FederatedAI/FATE/blob/32540492623568ecd1afcb367360133616e02fa3/python/fate_client/flow_client/flow_cli/commands/job.py#L204-L226
|
||
motion-planning/rrt-algorithms
|
22aad606516eef751160433329ea2b1f70ef27d0
|
src/rrt/rrt_star_bid.py
|
python
|
RRTStarBidirectional.swap_trees
|
(self)
|
Swap trees and start/goal
|
Swap trees and start/goal
|
[
"Swap",
"trees",
"and",
"start",
"/",
"goal"
] |
def swap_trees(self):
"""
Swap trees and start/goal
"""
# swap trees
self.trees[0], self.trees[1] = self.trees[1], self.trees[0]
# swap start/goal
self.x_init, self.x_goal = self.x_goal, self.x_init
self.swapped = not self.swapped
|
[
"def",
"swap_trees",
"(",
"self",
")",
":",
"# swap trees",
"self",
".",
"trees",
"[",
"0",
"]",
",",
"self",
".",
"trees",
"[",
"1",
"]",
"=",
"self",
".",
"trees",
"[",
"1",
"]",
",",
"self",
".",
"trees",
"[",
"0",
"]",
"# swap start/goal",
"self",
".",
"x_init",
",",
"self",
".",
"x_goal",
"=",
"self",
".",
"x_goal",
",",
"self",
".",
"x_init",
"self",
".",
"swapped",
"=",
"not",
"self",
".",
"swapped"
] |
https://github.com/motion-planning/rrt-algorithms/blob/22aad606516eef751160433329ea2b1f70ef27d0/src/rrt/rrt_star_bid.py#L50-L58
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.